blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5dbe640a7962de2d943dfb53aed0930dc9248b69
|
1054d2c6ad221bd5e2ec1cbe496679d6eebd1264
|
/app.py
|
73a8e2a8d7dd19e7b933de6f8d4d90bc295a01c8
|
[] |
no_license
|
feng147258/reconment
|
d2e96253716678b994eebd11b103b7ca89db5ea6
|
a1c8119235c6cc329e76f53f1bb0d4ec8e51f292
|
refs/heads/master
| 2023-07-14T12:00:18.878090
| 2021-08-13T08:58:57
| 2021-08-13T08:58:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,245
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/8/3 6:49 PM
# @Author : Yingjun Zhu
# @File : app.py.py
from flask import Flask, jsonify, request
from web.service.pageData import PageData
from dataclean.dao.mysql_db import Mysql
from web.entity.user import UserId
from web.service.logData import LogData
from web.kafka_service import kafka_producter
app = Flask(__name__)
log_data = LogData()
page_data = PageData()
@app.route('/reconmend/get_rec_list', methods=['POST', 'GET'])
def bertsimer():
if request.method == 'POST':
page_size = request.get_json().get("pageSize")
page_num = request\
.get_json().get("pageNum")
user_id = request.get_json().get("userId")
types = request.get_json().get("types")
try:
# data = "page_size:" + str(page_size) + ",page_num:" + str(page_num) + ",user_id:" + str(user_id)
data = page_data.get_page_data(page_size=page_size, page_num=page_num)
return jsonify({"code": 0, "msg": "success", "data": data})
except Exception as e:
print(str(e))
return jsonify({"code": 1000, "msg": "fail"})
@app.route('/reconmend/likes', methods=['POST', 'GET'])
def likes():
if request.method == 'POST':
title = request.get_json().get("title")
content_id = request.get_json().get("contentId")
user_id = request.get_json().get("userId")
try:
mysql = Mysql()
session = mysql._DBSession()
if session.query(UserId.id).filter(UserId.id == user_id).count() > 0:
#
if log_data.insert_log(user_id, content_id, title, "likes") and log_data.modify_articles_details("key", "likes"):
kafka_producter.main("recommend", str.encode(str(content_id) + ":likes"))
return jsonify({"code": 0, "msg": "success", "data": "喜欢成功"})
else:
return jsonify({"code": 1001, "msg": "success", "data": "喜欢失败"})
else:
return jsonify({"code": 1000, "msg": "success", "data": "用户不存在"})
except Exception as e:
print(str(e))
return jsonify({"code": 1000, "msg": "fail"})
@app.route('/reconmend/read', methods=['POST', 'GET'])
def read():
if request.method == 'POST':
title = request.get_json().get("title")
content_id = request.get_json().get("contentId")
user_id = request.get_json().get("userId")
try:
mysql = Mysql()
session = mysql._DBSession()
if session.query(UserId.id).filter(UserId.id == user_id).count() > 0:
if log_data.insert_log(user_id, content_id, title, "read") and log_data.modify_articles_details("key", "read"):
return jsonify({"code": 0, "msg": "success", "data": "阅读陈宫"})
else:
return jsonify({"code": 1001, "msg": "success", "data": "阅读失败"})
else:
return jsonify({"code": 1000, "msg": "success", "data": "用户不存在"})
except Exception as e:
print(str(e))
return jsonify({"code": 1000, "msg": "fail"})
@app.route('/reconmend/collections', methods=['POST', 'GET'])
def collections():
if request.method == 'POST':
title = request.get_json().get("title")
content_id = request.get_json().get("contentId")
user_id = request.get_json().get("userId")
try:
mysql = Mysql()
session = mysql._DBSession()
if session.query(UserId.id).filter(UserId.id == user_id).count() > 0:
if log_data.insert_log(user_id, content_id, title, "collections") and log_data.modify_articles_details("key", "collections"):
return jsonify({"code": 0, "msg": "success", "data": "收藏成功"})
else:
return jsonify({"code": 1001, "msg": "success", "data": "收藏失败"})
else:
return jsonify({"code": 1000, "msg": "success", "data": "接口操作出现问题"})
except Exception as e:
print(str(e))
return jsonify({"code": 1000, "msg": "fail"})
def register():
pass
def login():
pass
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=False, port=8080)
|
[
"yingjun.zhu@esoon.com"
] |
yingjun.zhu@esoon.com
|
057960e15bf592de3eaac0311f6e861f90dda900
|
2562c465fbf059b8846acbcb13442347c5fd058d
|
/src/pms7003.py
|
56b466c6621cbbd6345763080b9c6dc1bceeaac2
|
[] |
no_license
|
dawncold/raspberry_pms7003
|
3d343836cb49851483eec6babe4bec62cd3260a4
|
f45571f2a7ad5c38fad86c873434188e1f582a5e
|
refs/heads/master
| 2021-04-27T00:12:18.906348
| 2019-08-04T13:37:18
| 2019-08-04T13:37:18
| 123,770,113
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,854
|
py
|
#! coding: utf-8
from __future__ import unicode_literals, print_function, division
import serial
SERIAL_DEVICE = '/dev/ttyAMA0'
HEAD_FIRST = 0x42
HEAD_SECOND = 0x4d
DATA_LENGTH = 32
BODY_LENGTH = DATA_LENGTH - 1 - 1
P_CF_PM10 = 2
P_CF_PM25 = 4
P_CF_PM100 = 6
P_C_PM10 = 8
P_C_PM25 = 10
P_C_PM100 = 12
P_C_03 = 14
P_C_05 = 16
P_C_10 = 18
P_C_25 = 20
P_C_50 = 22
P_C_100 = 24
DATA_DESC = [
(P_CF_PM10, 'CF=1, PM1.0', 'μg/m3'),
(P_CF_PM25, 'CF=1, PM2.5', 'μg/m3'),
(P_CF_PM100, 'CF=1, PM10', 'μg/m3'),
(P_C_PM10, 'PM1.0', 'μg/m3'),
(P_C_PM25, 'PM2.5', 'μg/m3'),
(P_C_PM100, 'PM10', 'μg/m3'),
(P_C_03, '0.1L, d>0.3μm', ''),
(P_C_05, '0.1L, d>0.5μm', ''),
(P_C_10, '0.1L, d>1μm', ''),
(P_C_25, '0.1L, d>2.5μm', ''),
(P_C_50, '0.1L, d>5.0μm', ''),
(P_C_100, '0.1L, d>10μm', ''),
]
def get_frame(_serial):
while True:
b = _serial.read()
if b != chr(HEAD_FIRST):
continue
b = _serial.read()
if b != chr(HEAD_SECOND):
continue
body = _serial.read(BODY_LENGTH)
if len(body) != BODY_LENGTH:
continue
return body
def get_frame_length(_frame):
h8 = ord(_frame[0])
l8 = ord(_frame[1])
return int(h8 << 8 | l8)
def get_version_and_error_code(_frame):
return _frame[-4], _frame[-3]
def valid_frame_checksum(_frame):
checksum = ord(_frame[-2]) << 8 | ord(_frame[-1])
calculated_checksum = HEAD_FIRST + HEAD_SECOND
for field in _frame[:-2]:
calculated_checksum += ord(field)
return checksum == calculated_checksum
def decode_frame(_frame):
data = {}
for item in DATA_DESC:
start, desc, unit = item
value = int(ord(_frame[start]) << 8 | ord(_frame[start + 1]))
data[str(start)] = (desc, value, unit)
return data
def read_data():
ser = serial.Serial(port=SERIAL_DEVICE, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE)
try:
frame = get_frame(ser)
except Exception as e:
print('get frame got exception: {}'.format(e.message))
else:
if not valid_frame_checksum(frame):
print('frame checksum mismatch')
return
data = {'data': decode_frame(frame)}
version, error_code = get_version_and_error_code(frame)
data['version'] = version
data['errcode'] = error_code
return data
finally:
ser.close()
if __name__ == '__main__':
data = read_data()
if not data:
print('no data')
exit(0)
if data['errcode'] != '\0':
print('got error: {}'.format(data['errcode']))
exit(-1)
for k in sorted(data['data'], key=lambda x: int(x)):
v = data['data'][k]
print('{}: {} {}'.format(v[0], v[1], v[2]))
|
[
"loooseleaves@gmail.com"
] |
loooseleaves@gmail.com
|
0fdb7a7c501f03fb7f776e4965cd4da3243f4ed9
|
741ee09b8b73187fab06ecc1f07f46a6ba77e85c
|
/AutonomousSourceCode/data/raw/squareroot/7ab7bec6-576b-4910-98d1-ec30c84244ab__calculate_square.py
|
0bf1d0137076df117eaec3d77052d26dce255f54
|
[] |
no_license
|
erickmiller/AutomatousSourceCode
|
fbe8c8fbf215430a87a8e80d0479eb9c8807accb
|
44ee2fb9ac970acf7389e5da35b930d076f2c530
|
refs/heads/master
| 2021-05-24T01:12:53.154621
| 2020-11-20T23:50:11
| 2020-11-20T23:50:11
| 60,889,742
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,150
|
py
|
# calculate_square.py
from Tkinter import *
import ttk
def calculate_square(*args):
value_in = float(number_in.get())
number_out.set(value_in * value_in)
root = Tk()
root.title('Calculate square')
mainframe = ttk.Frame(root)
mainframe.grid(column=1, row=1, sticky=(N, E, S, W))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
number_in = StringVar()
number_out = StringVar()
square_of_string_label = ttk.Label(mainframe, text='The square of')
square_of_string_label.grid(column=1, row=1, sticky=E)
number_in_entry = ttk.Entry(mainframe, width=5, textvariable=number_in)
number_in_entry.grid(column=2, row=1, sticky=(E, W))
is_string_label = ttk.Label(mainframe, text='is')
is_string_label.grid(column=1, row=2, sticky=E)
number_out_label = ttk.Label(mainframe, textvariable=number_out)
number_out_label.grid(column=2, row=2, sticky=W)
go_button = ttk.Button(mainframe, text='Go!', command=calculate_square)
go_button.grid(column=2, row=3, sticky=W)
for child in mainframe.winfo_children():
child.grid_configure(padx=2, pady=2)
number_in_entry.focus()
root.bind('<Return>', calculate_square)
root.mainloop()
|
[
"erickmiller@gmail.com"
] |
erickmiller@gmail.com
|
112567ac517d406a0446057912ddeb86c659bb3f
|
87292fde6a47fcb326c25c2f99546653ef8345ed
|
/Frame/Model_Train.py
|
6a9fcc3fdf75f4b7c03e9070d13e2237092fbfd4
|
[] |
no_license
|
amuge1997/NetManageAndTrainFrame
|
b07f4263ff3c769244921701f9ce862ff21ceddb
|
4ef5186f638381d69127308efa554db8cb7869fc
|
refs/heads/master
| 2022-09-18T02:42:42.226814
| 2020-05-25T04:04:28
| 2020-05-25T04:04:28
| 257,864,961
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
import torch.nn as nn
import torch.optim as optim
import time
class Train:
def __init__(self):
pass
def train(self, model, loader, dc_train_params,):
lr = dc_train_params['lr']
epochs = dc_train_params['epochs']
lossf_sel = dc_train_params['lossf']
opt_sel = dc_train_params['optim']
momentum = dc_train_params['momentum']
is_show_detail = dc_train_params['is_show_detail']
if lossf_sel == 'mse':
loss_func = nn.MSELoss()
elif lossf_sel == 'smo':
loss_func = nn.SmoothL1Loss()
elif lossf_sel == 'bce':
loss_func = nn.BCELoss
else:
raise Exception('loss function')
if opt_sel == 'adam':
optimizer = optim.Adam(model.parameters(),lr=lr)
elif opt_sel == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
else:
raise Exception('optimizer function')
dc_model = {
'model':model,
'train_log':None,
}
train_log = {
'lr':lr,
'epochs':epochs,
'lossf':lossf_sel,
'optim':opt_sel,
'momentum':momentum,
'loss':None
}
# 模型训练
ls_loss = []
ls_rate = []
start = time.time()
for epoch in range(epochs):
print()
train_lossSum = 0
for step, (batch_x, batch_y) in enumerate(loader):
# 正向计算获得输出
Y = model(batch_x)
# 与模型连接
loss = loss_func(Y, batch_y)
# 梯度初始化归零,准备优化
optimizer.zero_grad()
# 反向传播,更新梯度
loss.backward()
# 根据计算得到的梯度,结合优化器参数进行模型参数更新
optimizer.step()
train_loss = loss.item()
train_lossSum += train_loss
if is_show_detail:
print('{}-{}: {}'.format(epoch, step, train_loss))
train_lossSum = train_lossSum / len(loader)
print()
print('{}-mean: {}'.format(epoch, train_lossSum))
ls_loss.append(train_lossSum)
if len(ls_loss) > 1:
fl_rateFirst = ls_loss[-1] / ls_loss[0]
fl_rateLast = ls_loss[-1] / ls_loss[-2]
ls_rate.append(fl_rateFirst)
print('{}-rate-compare with first: {}'.format(epoch, fl_rateFirst))
print('{}-rate-compare with last: {}'.format(epoch, fl_rateLast))
print()
end = time.time()
use_time = end - start
train_log['time'] = use_time
train_log['loss'] = ls_loss
dc_model['train_log'] = train_log
return dc_model
|
[
"amuge1997z@qq.com"
] |
amuge1997z@qq.com
|
b796b20a4d9e957f27a98c703b071bbc111e9bde
|
b144c5142226de4e6254e0044a1ca0fcd4c8bbc6
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocolstack/ancpvlanrange_58418cab117460d5be96e7c24e4e1bfb.py
|
00c591b0ead7d391ee148ba1bd8b5a0ea079d425
|
[
"MIT"
] |
permissive
|
iwanb/ixnetwork_restpy
|
fa8b885ea7a4179048ef2636c37ef7d3f6692e31
|
c2cb68fee9f2cc2f86660760e9e07bd06c0013c2
|
refs/heads/master
| 2021-01-02T17:27:37.096268
| 2020-02-11T09:28:15
| 2020-02-11T09:28:15
| 239,721,780
| 0
| 0
|
NOASSERTION
| 2020-02-11T09:20:22
| 2020-02-11T09:20:21
| null |
UTF-8
|
Python
| false
| false
| 12,400
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class AncpVlanRange(Base):
"""
The AncpVlanRange class encapsulates a required ancpVlanRange resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'ancpVlanRange'
def __init__(self, parent):
super(AncpVlanRange, self).__init__(parent)
@property
def VlanIdInfo(self):
"""An instance of the VlanIdInfo class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_afba627c0a86f7bdccdbbac157859f9e.VlanIdInfo)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_afba627c0a86f7bdccdbbac157859f9e import VlanIdInfo
return VlanIdInfo(self)
@property
def Enabled(self):
"""Disabled ranges won't be configured nor validated.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def FirstId(self):
"""DEPRECATED The first ID to be used for the first VLAN tag.
Returns:
number
"""
return self._get_attribute('firstId')
@FirstId.setter
def FirstId(self, value):
self._set_attribute('firstId', value)
@property
def IdIncrMode(self):
"""Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
Returns:
number
"""
return self._get_attribute('idIncrMode')
@IdIncrMode.setter
def IdIncrMode(self, value):
self._set_attribute('idIncrMode', value)
@property
def Increment(self):
"""DEPRECATED Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
Returns:
number
"""
return self._get_attribute('increment')
@Increment.setter
def Increment(self, value):
self._set_attribute('increment', value)
@property
def IncrementStep(self):
"""DEPRECATED Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
Returns:
number
"""
return self._get_attribute('incrementStep')
@IncrementStep.setter
def IncrementStep(self, value):
self._set_attribute('incrementStep', value)
@property
def InnerEnable(self):
"""DEPRECATED Enable the inner VLAN.
Returns:
bool
"""
return self._get_attribute('innerEnable')
@InnerEnable.setter
def InnerEnable(self, value):
self._set_attribute('innerEnable', value)
@property
def InnerFirstId(self):
"""DEPRECATED The first ID to be used for the inner VLAN tag.
Returns:
number
"""
return self._get_attribute('innerFirstId')
@InnerFirstId.setter
def InnerFirstId(self, value):
self._set_attribute('innerFirstId', value)
@property
def InnerIncrement(self):
"""DEPRECATED Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
Returns:
number
"""
return self._get_attribute('innerIncrement')
@InnerIncrement.setter
def InnerIncrement(self, value):
self._set_attribute('innerIncrement', value)
@property
def InnerIncrementStep(self):
"""DEPRECATED Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
Returns:
number
"""
return self._get_attribute('innerIncrementStep')
@InnerIncrementStep.setter
def InnerIncrementStep(self, value):
self._set_attribute('innerIncrementStep', value)
@property
def InnerPriority(self):
"""DEPRECATED The 802.1Q priority to be used for the inner VLAN tag.
Returns:
number
"""
return self._get_attribute('innerPriority')
@InnerPriority.setter
def InnerPriority(self, value):
self._set_attribute('innerPriority', value)
@property
def InnerTpid(self):
"""DEPRECATED The TPID value in the inner VLAN Tag.
Returns:
str
"""
return self._get_attribute('innerTpid')
@InnerTpid.setter
def InnerTpid(self, value):
self._set_attribute('innerTpid', value)
@property
def InnerUniqueCount(self):
"""DEPRECATED Number of unique inner VLAN IDs to use.
Returns:
number
"""
return self._get_attribute('innerUniqueCount')
@InnerUniqueCount.setter
def InnerUniqueCount(self, value):
self._set_attribute('innerUniqueCount', value)
@property
def Name(self):
"""Name of range
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def ObjectId(self):
"""Unique identifier for this object
Returns:
str
"""
return self._get_attribute('objectId')
@property
def Priority(self):
"""DEPRECATED The 802.1Q priority to be used for the outer VLAN tag.
Returns:
number
"""
return self._get_attribute('priority')
@Priority.setter
def Priority(self, value):
self._set_attribute('priority', value)
@property
def Tpid(self):
"""DEPRECATED The TPID value in the outer VLAN Tag.
Returns:
str
"""
return self._get_attribute('tpid')
@Tpid.setter
def Tpid(self, value):
self._set_attribute('tpid', value)
@property
def UniqueCount(self):
"""DEPRECATED Number of unique first VLAN IDs to use.
Returns:
number
"""
return self._get_attribute('uniqueCount')
@UniqueCount.setter
def UniqueCount(self, value):
self._set_attribute('uniqueCount', value)
def update(self, Enabled=None, FirstId=None, IdIncrMode=None, Increment=None, IncrementStep=None, InnerEnable=None, InnerFirstId=None, InnerIncrement=None, InnerIncrementStep=None, InnerPriority=None, InnerTpid=None, InnerUniqueCount=None, Name=None, Priority=None, Tpid=None, UniqueCount=None):
"""Updates a child instance of ancpVlanRange on the server.
Args:
Enabled (bool): Disabled ranges won't be configured nor validated.
FirstId (number): The first ID to be used for the first VLAN tag.
IdIncrMode (number): Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
Increment (number): Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
IncrementStep (number): Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
InnerEnable (bool): Enable the inner VLAN.
InnerFirstId (number): The first ID to be used for the inner VLAN tag.
InnerIncrement (number): Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
InnerIncrementStep (number): Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
InnerPriority (number): The 802.1Q priority to be used for the inner VLAN tag.
InnerTpid (str): The TPID value in the inner VLAN Tag.
InnerUniqueCount (number): Number of unique inner VLAN IDs to use.
Name (str): Name of range
Priority (number): The 802.1Q priority to be used for the outer VLAN tag.
Tpid (str): The TPID value in the outer VLAN Tag.
UniqueCount (number): Number of unique first VLAN IDs to use.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def CustomProtocolStack(self, *args, **kwargs):
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2:list, Arg3:enum)
Args:
args[0] is Arg2 (list(str)): List of plugin types to be added in the new custom stack
args[1] is Arg3 (str(kAppend|kMerge|kOverwrite)): Append, merge or overwrite existing protocol stack
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to disable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2:string)string
Args:
args[0] is Arg2 (str): Protocol class name to enable
Returns:
str: Status of the exec
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
|
[
"srvc_cm_packages@keysight.com"
] |
srvc_cm_packages@keysight.com
|
3781aec365f284490c5bfb10dcd9c409d8e70233
|
69f5b9375c4ae3908cfbd29c0b6d10117de31e5d
|
/predict_frete.py
|
0f394a310a71fbbfcc1e22f6c47df92383025ca9
|
[] |
no_license
|
deepsideoflearning/freight_predictive_model
|
f3a898eb87bf8f03ab15bce065b7945c060b0655
|
11965ce729d406a2748d81759e2dc686769d130f
|
refs/heads/master
| 2021-09-14T20:34:46.784241
| 2018-05-18T19:45:03
| 2018-05-18T19:45:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
from util import *
import pickle
import numpy as np
df = pd.read_excel('novas_rotas.xlsx')
data = select_cols(df)
data = make_dummies(data)
data = complete_cols(data)
norm_data = np.asarray(data)
X = norm_data[:, 1:]
y_norm = pickle.load(open('y_norm.pkl', 'rb'))
X_norm = pickle.load(open('X_norm.pkl', 'rb'))
X = X_norm.transform(X)
model = rnn_model(X)
model.load_weights('model_frete_weights.hdf5')
result = model.predict(X)
result = y_norm.inverse_transform(result)
df['Frete por kg'] = result
df.to_csv('predict.csv', encoding='latin1')
|
[
"br_aquino@yahoo.com.br"
] |
br_aquino@yahoo.com.br
|
201ec0e778d39c619ca7d2db0f6caee17ddd1f95
|
d7363da78e6f1e8ae2c6abca3f845853756165d4
|
/src/adafruit_blinka/board/dragonboard_410c.py
|
a627309d6c32ff8ab6a13dc5b5cc9a989804b538
|
[
"MIT"
] |
permissive
|
adafruit/Adafruit_Blinka
|
7a9ed88f39ff12082d1b46647fa8869b541fba49
|
009b352a3234339000c32d2e61e830455cf389fa
|
refs/heads/main
| 2023-08-09T06:25:02.178935
| 2023-07-28T16:45:40
| 2023-07-28T16:45:40
| 120,540,744
| 398
| 331
|
MIT
| 2023-09-14T20:32:23
| 2018-02-07T00:25:03
|
Python
|
UTF-8
|
Python
| false
| false
| 972
|
py
|
# SPDX-FileCopyrightText: 2021 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Pin definitions for the Dragonboard 410c."""
from adafruit_blinka.microcontroller.snapdragon.apq8016 import pin
GPIO_A = pin.GPIO_36
GPIO_B = pin.GPIO_12
GPIO_C = pin.GPIO_13
GPIO_D = pin.GPIO_69
GPIO_E = pin.GPIO_115
GPIO_F = pin.PM_MPP_4
GPIO_G = pin.GPIO_24
GPIO_H = pin.GPIO_25
GPIO_I = pin.GPIO_35
GPIO_J = pin.GPIO_34
GPIO_K = pin.GPIO_28
GPIO_L = pin.GPIO_33
GPIO_36 = pin.GPIO_36
GPIO_12 = pin.GPIO_12
GPIO_13 = pin.GPIO_13
GPIO_69 = pin.GPIO_69
GPIO_115 = pin.GPIO_115
GPIO_4 = pin.PM_MPP_4
GPIO_24 = pin.GPIO_24
GPIO_25 = pin.GPIO_25
GPIO_35 = pin.GPIO_35
GPIO_34 = pin.GPIO_34
GPIO_28 = pin.GPIO_28
GPIO_33 = pin.GPIO_33
SDA = pin.I2C0_SDA
SCL = pin.I2C0_SCL
I2C0_SDA = pin.I2C0_SDA
I2C0_SCL = pin.I2C0_SCL
I2C1_SDA = pin.I2C1_SDA
I2C1_SCL = pin.I2C1_SCL
SCLK = pin.SPI0_SCLK
MOSI = pin.SPI0_MOSI
MISO = pin.SPI0_MISO
SPI_CS = pin.SPI0_CS
|
[
"melissa@adafruit.com"
] |
melissa@adafruit.com
|
e4275df4e69cf6565d2afddbef18539b2d4d99f3
|
4f875744ccae8fa9225318ce16fc483b7bf2735e
|
/google/findDuplicate.py
|
44e01dd1b67af92eaf0af5a61e728e840331fdcb
|
[] |
no_license
|
nguyenngochuy91/companyQuestions
|
62c0821174bb3cb33c7af2c5a1e83a60e4a29977
|
c937fe19be665ba7ac345e1729ff531f370f30e8
|
refs/heads/master
| 2020-07-27T05:58:36.794033
| 2020-04-10T20:57:15
| 2020-04-10T20:57:15
| 208,893,527
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 20:34:41 2020
@author: huyn
"""
#609. Find Duplicate File in System
from typing import List
class Solution:
def findDuplicate(self, paths: List[str]) -> List[List[str]]:
d = {}
for path in paths:
item = path.split()
root = item[0]
for file in item[1:]:
file = file.split("(")
fileName = file[0]
content = file[1].split(")")[0]
if content not in d:
d[content] = []
d[content].append(root+"/"+fileName)
return [d[key] for key in d if len(d[key])>=2]
|
[
"huyn@cvm6h4zv52.cvm.iastate.edu"
] |
huyn@cvm6h4zv52.cvm.iastate.edu
|
5eb5529af8f60cd68b06b563bc7e299035d7dbe5
|
c4c20c4c7653da52249dac4d9ced3ffcbcb2c299
|
/aidooit_people/migrations/0003_personhistory.py
|
86541a6fb8ec385c16959d24326c34acf4ad2a3e
|
[] |
no_license
|
rejamen/aidooit
|
4ebccdba65b07da29f13273c474dd45ddd78968d
|
31361307b70175d4e00ef4f7bbbb320ab7779551
|
refs/heads/master
| 2022-05-02T09:25:14.812540
| 2019-09-09T10:34:30
| 2019-09-09T10:34:30
| 191,284,990
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
# Generated by Django 2.2.3 on 2019-07-11 20:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('aidooit_people', '0002_person_email'),
]
operations = [
migrations.CreateModel(
name='PersonHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aidooit_people.Person')),
],
),
]
|
[
"rejamen@gmail.com"
] |
rejamen@gmail.com
|
c3c919f2d48788afdacf95e58dcf569a9f7e7671
|
15611d5e76b1f25e04755bc9370390cd11ee004c
|
/revision/data/awardfix.py
|
e2b6ce6b7c2c474cda9ce32b2e4c39df885e0d86
|
[] |
no_license
|
paultopia/paulgowdercom
|
dd3d09fb027142188bf49ff3d65bbac600593ff1
|
b9583c4bae6b2db7274ebdaee799d7c675866160
|
refs/heads/master
| 2020-04-15T14:30:01.907498
| 2017-07-25T19:25:39
| 2017-07-25T19:25:39
| 51,056,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
import json
with open('awards.json') as aj:
old = json.load(aj)
new = [{"award": x["name"], "year": x["year"]} for x in old]
with open('awards2.json', 'w') as aj2:
json.dump(new, aj2)
|
[
"paul.gowder@gmail.com"
] |
paul.gowder@gmail.com
|
fc11ed2f1b37ee77de7206c64314dedf713c76d6
|
ac5d3907a4e1333dc89c8d037b555113fc950db3
|
/MainPage/migrations/0002_remove_post_preview_image.py
|
28ae2721e90c28c5d54196102ce4a980806528c4
|
[] |
no_license
|
Shkuin/CyberWorld
|
29e12cf56c7aa58079d44a7d008efeed05fee872
|
153d834cc7e33b75e46c534bebfd187ee00a4852
|
refs/heads/master
| 2022-11-10T09:12:34.529883
| 2020-06-28T17:02:10
| 2020-06-28T17:02:10
| 273,578,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
# Generated by Django 3.0.5 on 2020-04-17 11:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('MainPage', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='preview_image',
),
]
|
[
"bioniklsn123@mail.ru"
] |
bioniklsn123@mail.ru
|
6f7d487f3b03c0c24d82708a38a6e037e81955a4
|
a8dfac05ab2726f00db4630a47741768174e3e96
|
/Information-Extraction/QANet/config.py
|
400ebd2a5f4b68c3390c842208e7b0e8622f0754
|
[] |
no_license
|
mohitsshah/documents-caf
|
0ba33a565ce272c3fb19a5cd388e8508825d15ba
|
7e457a2fc1f942de7ef2606feb860e6a952b41ef
|
refs/heads/master
| 2020-03-11T01:28:15.811371
| 2018-05-31T09:15:04
| 2018-05-31T09:15:04
| 129,692,354
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,809
|
py
|
import os
import tensorflow as tf
import json
# from prepro import prepro
# from main import train, test, demo
flags = tf.flags
home = os.path.expanduser("~")
train_file = os.path.join(home, "data", "squad", "train-v1.1.json")
dev_file = os.path.join(home, "data", "squad", "dev-v1.1.json")
test_file = os.path.join(home, "data", "squad", "dev-v1.1.json")
glove_word_file = os.path.join(home, "data", "glove", "glove.840B.300d.txt")
train_dir = "models"
model_name = "FRC"
dir_name = os.path.join(train_dir, model_name)
if not os.path.exists(train_dir):
os.mkdir(train_dir)
if not os.path.exists(os.path.join(os.getcwd(),dir_name)):
os.mkdir(os.path.join(os.getcwd(),dir_name))
target_dir = "data"
log_dir = os.path.join(dir_name, "event")
save_dir = os.path.join(dir_name, "model")
answer_dir = os.path.join(dir_name, "answer")
train_record_file = os.path.join(target_dir, "train.tfrecords")
dev_record_file = os.path.join(target_dir, "dev.tfrecords")
test_record_file = os.path.join(target_dir, "test.tfrecords")
word_emb_file = os.path.join(target_dir, "word_emb.json")
char_emb_file = os.path.join(target_dir, "char_emb.json")
train_eval = os.path.join(target_dir, "train_eval.json")
dev_eval = os.path.join(target_dir, "dev_eval.json")
test_eval = os.path.join(target_dir, "test_eval.json")
dev_meta = os.path.join(target_dir, "dev_meta.json")
test_meta = os.path.join(target_dir, "test_meta.json")
word_dictionary = os.path.join(target_dir, "word_dictionary.json")
char_dictionary = os.path.join(target_dir, "char_dictionary.json")
answer_file = os.path.join(answer_dir, "answer.json")
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if not os.path.exists(answer_dir):
os.makedirs(answer_dir)
flags.DEFINE_string("mode", "train", "Running mode train/debug/test")
flags.DEFINE_string("target_dir", target_dir, "Target directory for out data")
flags.DEFINE_string("log_dir", log_dir, "Directory for tf event")
flags.DEFINE_string("save_dir", save_dir, "Directory for saving model")
flags.DEFINE_string("train_file", train_file, "Train source file")
flags.DEFINE_string("dev_file", dev_file, "Dev source file")
flags.DEFINE_string("test_file", test_file, "Test source file")
flags.DEFINE_string("glove_word_file", glove_word_file, "Glove word embedding source file")
flags.DEFINE_string("train_record_file", train_record_file, "Out file for train data")
flags.DEFINE_string("dev_record_file", dev_record_file, "Out file for dev data")
flags.DEFINE_string("test_record_file", test_record_file, "Out file for test data")
flags.DEFINE_string("word_emb_file", word_emb_file, "Out file for word embedding")
flags.DEFINE_string("char_emb_file", char_emb_file, "Out file for char embedding")
flags.DEFINE_string("train_eval_file", train_eval, "Out file for train eval")
flags.DEFINE_string("dev_eval_file", dev_eval, "Out file for dev eval")
flags.DEFINE_string("test_eval_file", test_eval, "Out file for test eval")
flags.DEFINE_string("dev_meta", dev_meta, "Out file for dev meta")
flags.DEFINE_string("test_meta", test_meta, "Out file for test meta")
flags.DEFINE_string("answer_file", answer_file, "Out file for answer")
flags.DEFINE_string("word_dictionary", word_dictionary, "Word dictionary")
flags.DEFINE_string("char_dictionary", char_dictionary, "Character dictionary")
flags.DEFINE_integer("glove_char_size", 94, "Corpus size for Glove")
flags.DEFINE_integer("glove_word_size", int(2.2e6), "Corpus size for Glove")
flags.DEFINE_integer("glove_dim", 300, "Embedding dimension for Glove")
flags.DEFINE_integer("char_dim", 64, "Embedding dimension for char")
flags.DEFINE_integer("para_limit", 400, "Limit length for paragraph")
flags.DEFINE_integer("ques_limit", 50, "Limit length for question")
flags.DEFINE_integer("ans_limit", 30, "Limit length for answers")
flags.DEFINE_integer("test_para_limit", 1000, "Limit length for paragraph in test file")
flags.DEFINE_integer("test_ques_limit", 100, "Limit length for question in test file")
flags.DEFINE_integer("char_limit", 16, "Limit length for character")
flags.DEFINE_integer("word_count_limit", -1, "Min count for word")
flags.DEFINE_integer("char_count_limit", -1, "Min count for char")
flags.DEFINE_integer("capacity", 15000, "Batch size of dataset shuffle")
flags.DEFINE_integer("num_threads", 4, "Number of threads in input pipeline")
flags.DEFINE_boolean("is_bucket", False, "build bucket batch iterator or not")
flags.DEFINE_list("bucket_range", [40, 401, 40], "the range of bucket")
flags.DEFINE_integer("batch_size", 32, "Batch size")
flags.DEFINE_integer("num_steps", 60000, "Number of steps")
flags.DEFINE_integer("checkpoint", 1000, "checkpoint to save and evaluate the model")
flags.DEFINE_integer("period", 100, "period to save batch loss")
flags.DEFINE_integer("val_num_batches", 150, "Number of batches to evaluate the model")
flags.DEFINE_float("dropout", 0.1, "Dropout prob across the layers")
flags.DEFINE_float("grad_clip", 5.0, "Global Norm gradient clipping rate")
flags.DEFINE_float("learning_rate", 0.001, "Learning rate")
flags.DEFINE_float("decay", 0.9999, "Exponential moving average decay")
flags.DEFINE_float("l2_norm", 3e-7, "L2 norm scale")
flags.DEFINE_integer("hidden", 96, "Hidden size")
flags.DEFINE_integer("num_heads", 1, "Number of heads in self attention")
flags.DEFINE_boolean("q2c", True, "Whether to use query to context attention or not")
# Extensions (Uncomment corresponding code in download.sh to download the required data)
glove_char_file = os.path.join(home, "data", "glove", "glove.840B.300d-char.txt")
flags.DEFINE_string("glove_char_file", glove_char_file, "Glove character embedding source file")
flags.DEFINE_boolean("pretrained_char", False, "Whether to use pretrained character embedding")
fasttext_file = os.path.join(home, "data", "fasttext", "wiki-news-300d-1M.vec")
flags.DEFINE_string("fasttext_file", fasttext_file, "Fasttext word embedding source file")
flags.DEFINE_boolean("fasttext", False, "Whether to use fasttext")
def main(_):
config = flags.FLAGS
params = config.flag_values_dict()
del params["mode"]
with open("config.json", "w") as fi:
fi.write(json.dumps(params))
# if config.mode == "train":
# train(config)
# elif config.mode == "prepro":
# prepro(config)
# elif config.mode == "debug":
# config.num_steps = 2
# config.val_num_batches = 1
# config.checkpoint = 1
# config.period = 1
# train(config)
# elif config.mode == "test":
# test(config)
# elif config.mode == "demo":
# demo(config)
# else:
# print("Unknown mode")
# exit(0)
if __name__ == "__main__":
tf.app.run()
|
[
"mohit@Sandeeps-MacBook-Air.local"
] |
mohit@Sandeeps-MacBook-Air.local
|
d65a31c823fa8efead544ec0a4f9c5345bc0530f
|
e6f0ebccf689ca4a3eb1b2349f0d9a8aa1af081e
|
/iblog/blog/models.py
|
0ca0d664c5dbb86c34d112017f146c6a8b048cb5
|
[] |
no_license
|
pragy540/IBlog
|
d5175b485903b8c454b9ba6ea29b34f9af97ade3
|
1615972d2ccdaf2cb586486456e210f1c86fa355
|
refs/heads/master
| 2022-07-09T00:30:35.227168
| 2020-05-17T14:14:44
| 2020-05-17T14:14:44
| 264,673,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
from django.db import models
from django.utils.timezone import now
# Create your models here.
class Post(models.Model):
post_id=models.AutoField(primary_key= True)
author=models.CharField(max_length=100)
title=models.CharField(max_length=150)
content=models.TextField()
timeStamp=models.DateTimeField(default=now)
def __str__(self):
return self.title+" by "+ self.author
|
[
"pragyaptl131996@gmail.com"
] |
pragyaptl131996@gmail.com
|
3dcca22538909e4ca7c9e1f85a4a19c897d9ccc0
|
bf4178e73f0f83781be6784d7587cb34a38d6edd
|
/platform/radio/efr32_multiphy_configurator/pro2_chip_configurator/src/si4010_cfg_calc/si4010cfgcalcsecurity.py
|
3da55602e5855910430be093d1a8e3ae2b503b84
|
[] |
no_license
|
kolbertv/ZigbeeSiliconV3
|
80d70515e93be1413c24cdcb3485f50c65a1564b
|
ab0bd8d4bb6c1048adef81d0e66d96006c2fabd9
|
refs/heads/master
| 2023-01-02T07:18:01.393003
| 2020-10-25T15:33:08
| 2020-10-25T15:33:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,459
|
py
|
'''
Created on August 25, 2013
@author: shyang
'''
__all__ = ["Si4010CfgCalcSecurity"]
class Si4010CfgCalcSecurity(object):
'''
classdocs
'''
OEM_Key16_Table = [
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0x4B, 0x92, 0xCD, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x29, 0xCD, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xDC, 0x42, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x24, 0x20, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x02, 0x03, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x20, 0x30, 0xBC, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
[0x63, 0xB4, 0x92, 0xCD, 0x42, 0x20, 0x03, 0xCB, 0x73, 0x29, 0x09, 0xBB, 0xFF, 0x6A, 0xDC, 0x6D],
]
OEM_ID_KEY_Table = [ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000 ]
def __init__(self, inputs):
self.cfg = {}
self.cfg_PQ_file = {}
# TODO check
if inputs.security.OEM_Key == []:
self.Key16 = self.OEM_Key16_Table[inputs.topLevelSetup.OTPcfgNum]
else:
self.Key16 = inputs.security.OEM_Key
self.cfg['bOEM_Key[16]'] = self.Key16
self.cfg_PQ_file['bOEM_Key[16]'] = self.Key16
if inputs.security.OEM_ID_Key == 0:
self.ID_Key = self.OEM_ID_KEY_Table[inputs.topLevelSetup.OTPcfgNum]
else:
self.ID_Key = inputs.security.OEM_ID_Key
self.cfg['lOEM_ID_Key'] = self.ID_Key
def get_ID_Key(self, index):
return self.OEM_ID_KEY_Table[index]
def get_Key16(self, index):
return self.OEM_Key16_Table[index]
def get_cfg_data(self):
return self.cfg
def dump(self):
print(' ------------- configuration data -------------')
for m in self.cfg:
print(' {} = {}'.format(m, self.cfg[m]))
|
[
"1048267279@qq.com"
] |
1048267279@qq.com
|
d30eee20a1d1d88fce083765411f82cec5f9d46b
|
cec0eab05940e68e110c9afc0ae953e8d21c2a79
|
/app/main/views.py
|
3b1658c374e21d0777f387570a6809032c50220f
|
[
"Apache-2.0"
] |
permissive
|
sunyyangy/AutoLine
|
6e9c8da62e8201c091a146546226e8923c63716d
|
4977ff70c1fc056fca022505d07bc7d6e06de995
|
refs/heads/master
| 2020-03-16T22:04:52.493028
| 2018-05-11T08:59:31
| 2018-05-11T08:59:31
| 133,025,915
| 1
| 0
| null | 2018-05-11T10:28:44
| 2018-05-11T10:28:44
| null |
UTF-8
|
Python
| false
| false
| 2,305
|
py
|
# -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
公众号: 开源优测
Email: lymking@foxmail.com
"""
import os
from flask import render_template, send_file
from flask_login import login_required, current_user, logout_user
from . import main
from ..utils.runner import run_process
from ..utils.report import Report
@main.route('/', methods=['GET'])
def index():
return render_template('index.html')
@login_required
@main.route('/dashboard', methods=['GET'])
def dashboard():
return render_template('dashboard.html', user=current_user)
@login_required
@main.route('/logout', methods=['GET'])
def logout():
logout_user()
return render_template('index.html')
@login_required
@main.route('/user', methods=['GET'])
def user():
return render_template('user.html', user=current_user)
@login_required
@main.route('/product', methods=['GET'])
def product():
return render_template('product.html', user=current_user)
@login_required
@main.route('/project', methods=['GET'])
def project():
return render_template('project.html', user=current_user)
@login_required
@main.route('/task/<id>', methods=['GET'])
def task(id):
return render_template('task.html', id=id)
@login_required
@main.route('/task_list', methods=['GET'])
def task_list():
return render_template('task_list.html')
@login_required
@main.route('/manage/<category>/<id>', methods=['GET'])
def manage(category, id):
return render_template('%s.html' % category, id=id)
#@login_required
@main.route('/test_run/<category>/<id>', methods=['GET'])
def test_run(category, id):
status = run_process(id)
return status
@login_required
@main.route('/report/<project_id>/<build_no>', methods=['GET'])
def report(project_id, build_no):
r = Report(project_id, build_no)
return r.build_report()
@login_required
@main.route('/detail/<project_id>/<build_no>', methods=['POST'])
def detail(project_id, build_no):
r = Report(project_id, build_no)
import json
return json.dumps(r.parser_detail_info())
@login_required
@main.route('/view_image/<project_id>/<build_no>/<filename>', methods=['GET'])
def view_image(project_id, build_no, filename):
img_path = os.getcwd() + "/logs/%s/%s/images/%s" % (project_id, build_no, filename)
return send_file(img_path)
|
[
"lyy@LYM.local"
] |
lyy@LYM.local
|
1673bc08529da23e92015f740be5d341a4f5d8c4
|
1fcb09533ae683e905e528412efbbe5dc5923a7e
|
/Programming101/Week15/cms/website/education/urls.py
|
3e18dbada3c9beb85133a76b2fb1bbdbef37d4fc
|
[] |
no_license
|
VikiDinkova/HackBulgaria
|
ac7ca668489986ca2262ced587801d8abd2de4f1
|
e2f226bf29b87f6d3b33bdc2b981e1224a8237aa
|
refs/heads/master
| 2021-01-10T14:34:03.201644
| 2016-03-24T17:03:45
| 2016-03-24T17:03:45
| 46,211,432
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home),
url(r'^course/new/$', views.add_course)
]
|
[
"vikidinkova94@gmail.com"
] |
vikidinkova94@gmail.com
|
4b8356e015a92eae5abbff0173af027270ea6179
|
04bee90cccab4e628722413c687bd5c99e756074
|
/tron/cli/venv/Scripts/rst2s5.py
|
129e8b7e5543fe96021258c18f6d8d785c8d002d
|
[] |
no_license
|
XUMO-97/backup
|
3275eff2edddbf4d0985bdd98e743c6c1ecab20f
|
757ca980df6d58cfd78f7f27731692ef1f8540b6
|
refs/heads/master
| 2020-04-25T15:42:30.259247
| 2019-02-25T10:42:27
| 2019-02-25T10:42:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
#!d:\tron\cli\venv\scripts\python.exe
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
|
[
"xumo@onchain.com"
] |
xumo@onchain.com
|
e2239a0650d215016079290ab35e3ad0a2dd922f
|
bf36e89983f964b6f86eb015c5b25d07cb66dd5e
|
/orgs_and_ngos/views.py
|
90610dc734abcbedd7465883d20fcabf1333c4fa
|
[] |
no_license
|
Real-DeviLs/NASA-Hackathon
|
6e01e395d27f84fa340854c1fb1105ef7436ec09
|
c2e9f4b3bf7307de9e91f52b86ed4de7ca800b32
|
refs/heads/master
| 2022-12-28T12:44:56.445085
| 2020-10-03T14:12:45
| 2020-10-03T14:12:45
| 299,040,361
| 0
| 9
| null | 2020-10-03T14:12:46
| 2020-09-27T13:40:01
|
CSS
|
UTF-8
|
Python
| false
| false
| 327
|
py
|
from django.shortcuts import render
from .models import orgs,ngos
# Create your views here.
def orgs_view(request):
contents=orgs.objects.all()
return render(request,'orgs.html',{'contents':contents})
def ngos_view(request):
contents=ngos.objects.all()
return render(request,'ngos.html',{'contents':contents})
|
[
"matharooamrit098@gmail.com"
] |
matharooamrit098@gmail.com
|
9ee4147b880a0b424def9590a021fd355e44ae93
|
e5c39d112c3240e747f6dfd3260673a02bd384e9
|
/manuscript_figures/script_per_figure/gmsl.py
|
73c93be55ece0b77a0911fc034aa19c29afc59ee
|
[] |
no_license
|
carocamargo/regionalSLB
|
8057174d823d5f2cd7015fe1ea7a72921d2c780d
|
5b9e906b2e95928c9487750334b0b48ef95555cc
|
refs/heads/main
| 2023-04-09T11:21:50.423516
| 2023-01-09T12:28:36
| 2023-01-09T12:28:36
| 586,869,251
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,097
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 3 16:09:27 2022
@author: ccamargo
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 3 15:10:42 2022
@author: ccamargo
"""
# Import libraries
import xarray as xr
import numpy as np
# import os
import pandas as pd
import sys
# sys.path.append("/Users/ccamargo/Documents/github/SLB/")
# from utils_SLB import cluster_mean, plot_map_subplots, sum_linear, sum_square, get_dectime
# from utils_SLB import plot_map2 as plot_map
sys.path.append("/Users/ccamargo/Documents/py_scripts/")
import utils_SL as sl
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cmocean as cm
import matplotlib.pyplot as plt
# from matplotlib.gridspec import GridSpec
# from matplotlib.cm import ScalarMappable
cmap_trend = cm.cm.balance
cmap_unc = cm.tools.crop(cmap_trend,0,3,0)
# from matplotlib import cm as cmat
# import matplotlib.colors as col
# import seaborn as sns
# import scipy.stats as st
# from scipy import stats
# import sklearn.metrics as metrics
# import random
import warnings
warnings.filterwarnings("ignore","Mean of empty slice", RuntimeWarning)
import string
#%%
path_figures = '/Users/ccamargo/Desktop/manuscript_SLB/overleaf/figures/'
def make_figure(save=True,
path_to_figures = path_figures,
figname = 'gmsl',
figfmt='png'
):
fontsize=25
datasets = ['alt','sum', 'steric','barystatic', 'dynamic']
global settings
settings = set_settings()
#% % make list with datasets
titles = [settings['titles_dic'][dataset] for dataset in datasets]
das_unc,das_trend,das_ts = das(datasets)
dic = load_data(fmt = 'pkl')
landmask = dic['landmask']
tdec = dic['dims']['time']['tdec']
fig = plt.figure(figsize=(15,5),dpi=300)
ax2 = plt.subplot(111)
for idata,data in enumerate(das_ts):
data = data*landmask
mu = np.nanmean(data,axis=(1,2))
out = sl.get_ts_trend(tdec,mu,plot=False)
tr = np.round(out[0],2)
if tr==0:
tr=0.00
ax2.plot(tdec, mu - np.nanmean(mu[144:276]),
color=settings['colors_dic'][settings['acronym_dic'][datasets[idata]]],
label='{}: {:.2f} mm/yr'.format(titles[idata],tr),
linewidth=3)
plt.title('Global Mean Sea Level',fontsize=fontsize)
plt.ylabel('mm',fontsize=fontsize-5)
plt.xlabel('time',fontsize=fontsize-5)
#. plt.legend(fontsize=fontsize-5)
ax2.legend(loc='lower center', bbox_to_anchor=(0.5,-0.4),
ncol=3,
fancybox=True,
shadow=True,
fontsize=fontsize-8)
plt.show()
if save:
kurs=path_to_figures+figname+'.'+figfmt
fig.savefig(kurs,format=figfmt,dpi=300,bbox_inches='tight')
return
def load_data(path = '/Volumes/LaCie_NIOZ/data/budget/',
file = 'budget_v2',
fmt = 'pkl'
):
if fmt =='pkl':
return pd.read_pickle(path+file+'.'+fmt)
elif fmt =='nc':
return xr.open_dataset(path+file+'.'+fmt)
else:
raise 'format not recognized'
return
def das(datasets):
dic = load_data(fmt = 'pkl')
das_unc = []
das_trend = []
das_ts = []
for key in datasets:
if key =='sterodynamic':
das_unc.append(dic['steric']['unc'] + dic['dynamic']['unc'])
das_trend.append(dic['steric']['trend'] + dic['dynamic']['trend'])
das_ts.append(dic['steric']['ts'] + dic['dynamic']['ts'])
else:
das_unc.append(dic[key]['unc']*dic['landmask'])
das_trend.append(dic[key]['trend']*dic['landmask'])
das_ts.append(dic[key]['ts'])
return das_unc,das_trend,das_ts
def set_settings():
global settings
settings = {}
# Global plotting settings
settings['fontsize']=25
settings['lon0']=201;
settings['fsize']=(15,10)
settings['proj']='robin'
settings['land']=True
settings['grid']=False
settings['landcolor']='papayawhip'
settings['extent'] = False
settings['plot_type'] = 'contour'
settings['colors_dic'] = {
'Altimetry':'mediumseagreen',
'Sum':'mediumpurple',
'Steric':'goldenrod',
'Dynamic':'indianred',
'Barystatic':'steelblue',
'Sterodynamic':'palevioletred',
'Residual':'gray'
}
settings['acronym_dic'] = {
'alt':'Altimetry',
'sum':'Sum',
'steric':'Steric',
'res':'Residual',
'dynamic':'Dynamic',
'barystatic':'Barystatic'
}
settings['titles_dic'] = {
'alt':r"$\eta_{obs}$",
'steric': r"$\eta_{SSL}$",
'sum': r"$\sum(\eta_{SSL}+\eta_{GRD}+\eta_{DSL})$",
'barystatic':r"$\eta_{GRD}$",
'res': r"$\eta_{obs} - \eta_{\sum(drivers)}$",
'dynamic':r"$\eta_{DSL}$",
}
settings['letters'] = list(string.ascii_lowercase)
return settings
|
[
"carolina.camargo@nioz.nl"
] |
carolina.camargo@nioz.nl
|
090dd601c11a36a06bf98fdc0f553def999da558
|
c2f58e28937d88c76182f4950cb8662c4ed39d52
|
/celery_worker.py
|
4f0e5ea87c4dda9e6238755191c13b07244bfb52
|
[] |
no_license
|
cacods/flask-chat
|
537ea855fa8fc11f5142576b4debd12668c35c6b
|
ad80967223543016ff7d2b3ef1d77173d3c89c96
|
refs/heads/master
| 2023-06-16T12:52:25.072104
| 2021-07-15T23:41:51
| 2021-07-15T23:41:51
| 386,438,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
from chat_app import celery, create_app
app = create_app()
app.app_context().push()
|
[
"cacods21@gmail.com"
] |
cacods21@gmail.com
|
0eb5470f765d0b30122b58e73c2d8332fddc228e
|
ad9f741f31d07d448c0d963f4a2428f3c6ed7f4f
|
/Projetos/Projeto 2/knowledge_base.py
|
db7ffd003d72ef614e321a8e0782598cb2ebea57
|
[] |
no_license
|
MekaMdan/IIA
|
3e8a912065f768789c912ef56c30681f1a23b740
|
2930d26408ad789495d49effe3c61534a5f667a6
|
refs/heads/master
| 2023-01-22T19:39:51.356246
| 2020-11-30T02:19:50
| 2020-11-30T02:19:50
| 292,424,237
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,335
|
py
|
# Dor nos músculos / Frequênca dor na articulação / Dor de cabeça
ALTA = 3
MEDIA = 2
BAIXA = 1
# Intensidade dor articular / coceira / discrasia / hipertrofia ganglionar
INTENSA = 3
MODERADA = 2
LEVE = 1
AUSENTE = 0
# Edema na articulação
EDEMA_RARO = 1
EDEMA_LEVE = 2
EDEMA_MODERADO = 3
EDEMA_INTENSO = 4
# Frequência do edema
EDEMA_FREQUENTE = 5
EDEMA_N_FREQUENTE = 6
# Acometimento Neurológico
ACOMETIMENTO_RARO = 0
ACOMETIMENTO_FREQ = 1
# Presença de conjuntivite
CONJUNTIVITE_AUSENTE = 0
CONJUNTIVITE = 1
def knowledge_base():
return {
'dengue': [
{'grauDeFebre': 39, 'tempoDeFebre': 4, 'surgimentoManchas': 4, 'dorNosMusculos': ALTA, 'freqDorArticular': BAIXA,
'IntensidadeDorArticular': LEVE, 'edemaArticulacao': EDEMA_RARO, 'freqEdemaArticulacao': EDEMA_N_FREQUENTE,
'conjuntivite': CONJUNTIVITE_AUSENTE, 'dorDeCabeca': ALTA, 'coceira': LEVE, 'hipertrofiaGanglionar': LEVE,
'discrasiaHemorragica': MODERADA, 'acometimentoNeurologico': ACOMETIMENTO_RARO},
{'grauDeFebre': 40, 'tempoDeFebre': 5, 'surgimentoManchas': 4, 'dorNosMusculos': ALTA, 'freqDorArticular': BAIXA,
'IntensidadeDorArticular': LEVE, 'edemaArticulacao': EDEMA_RARO, 'freqEdemaArticulacao': EDEMA_N_FREQUENTE,
'conjuntivite': CONJUNTIVITE_AUSENTE, 'dorDeCabeca': ALTA, 'coceira': LEVE, 'hipertrofiaGanglionar': LEVE,
'discrasiaHemorragica': MODERADA, 'acometimentoNeurologico': ACOMETIMENTO_RARO},
{'grauDeFebre': 41, 'tempoDeFebre': 6, 'surgimentoManchas': 4, 'dorNosMusculos': ALTA, 'freqDorArticular': BAIXA,
'IntensidadeDorArticular': LEVE, 'edemaArticulacao': EDEMA_RARO, 'freqEdemaArticulacao': EDEMA_N_FREQUENTE,
'conjuntivite': CONJUNTIVITE_AUSENTE, 'dorDeCabeca': ALTA, 'coceira': LEVE, 'hipertrofiaGanglionar': LEVE,
'discrasiaHemorragica': MODERADA, 'acometimentoNeurologico': ACOMETIMENTO_RARO},
{'grauDeFebre': 42, 'tempoDeFebre': 7, 'surgimentoManchas': 4, 'dorNosMusculos': ALTA, 'freqDorArticular': BAIXA,
'IntensidadeDorArticular': LEVE, 'edemaArticulacao': EDEMA_RARO, 'freqEdemaArticulacao': EDEMA_N_FREQUENTE,
'conjuntivite': CONJUNTIVITE_AUSENTE, 'dorDeCabeca': ALTA, 'coceira': LEVE, 'hipertrofiaGanglionar': LEVE,
'discrasiaHemorragica': MODERADA, 'acometimentoNeurologico': ACOMETIMENTO_RARO}
],
'zika': [
{'grauDeFebre': 37, 'tempoDeFebre': 0, 'surgimentoManchas': 1, 'dorNosMusculos': MEDIA, 'freqDorArticular': MEDIA,
'IntensidadeDorArticular': LEVE, 'edemaArticulacao': EDEMA_LEVE, 'freqEdemaArticulacao': EDEMA_FREQUENTE,
'conjuntivite': CONJUNTIVITE, 'dorDeCabeca': MEDIA, 'coceira': MODERADA, 'hipertrofiaGanglionar': LEVE,
'discrasiaHemorragica': AUSENTE, 'acometimentoNeurologico': ACOMETIMENTO_FREQ},
{'grauDeFebre': 38, 'tempoDeFebre': 1, 'surgimentoManchas': 2, 'dorNosMusculos': MEDIA, 'freqDorArticular': MEDIA,
'IntensidadeDorArticular': MODERADA, 'edemaArticulacao': EDEMA_LEVE, 'freqEdemaArticulacao': EDEMA_FREQUENTE,
'conjuntivite': CONJUNTIVITE_AUSENTE, 'dorDeCabeca': MEDIA, 'coceira': INTENSA, 'hipertrofiaGanglionar': LEVE,
'discrasiaHemorragica': AUSENTE, 'acometimentoNeurologico': ACOMETIMENTO_FREQ},
{'grauDeFebre': 38, 'tempoDeFebre': 2, 'surgimentoManchas': 2, 'dorNosMusculos': MEDIA, 'freqDorArticular': MEDIA,
'IntensidadeDorArticular': MODERADA, 'edemaArticulacao': EDEMA_LEVE, 'freqEdemaArticulacao': EDEMA_FREQUENTE,
'conjuntivite': CONJUNTIVITE, 'dorDeCabeca': MEDIA, 'coceira': INTENSA, 'hipertrofiaGanglionar': LEVE,
'discrasiaHemorragica': AUSENTE, 'acometimentoNeurologico': ACOMETIMENTO_FREQ},
],
'chikungunya': [
{'grauDeFebre': 39, 'tempoDeFebre': 2, 'surgimentoManchas': 2, 'dorNosMusculos': BAIXA, 'freqDorArticular': ALTA,
'IntensidadeDorArticular': MODERADA, 'edemaArticulacao': EDEMA_MODERADO, 'freqEdemaArticulacao': EDEMA_FREQUENTE,
'conjuntivite': CONJUNTIVITE_AUSENTE, 'dorDeCabeca': MEDIA, 'coceira': LEVE, 'hipertrofiaGanglionar': MODERADA,
'discrasiaHemorragica': LEVE, 'acometimentoNeurologico': ACOMETIMENTO_RARO},
{'grauDeFebre': 41, 'tempoDeFebre': 3, 'surgimentoManchas': 5, 'dorNosMusculos': BAIXA, 'freqDorArticular': ALTA,
'IntensidadeDorArticular': INTENSA, 'edemaArticulacao': EDEMA_INTENSO, 'freqEdemaArticulacao': EDEMA_FREQUENTE,
'conjuntivite': CONJUNTIVITE, 'dorDeCabeca': MEDIA, 'coceira': LEVE, 'hipertrofiaGanglionar': MODERADA,
'discrasiaHemorragica': LEVE, 'acometimentoNeurologico': ACOMETIMENTO_RARO
},
{'grauDeFebre': 42, 'tempoDeFebre': 3, 'surgimentoManchas': 4, 'dorNosMusculos': BAIXA, 'freqDorArticular': ALTA,
'IntensidadeDorArticular': INTENSA, 'edemaArticulacao': EDEMA_INTENSO, 'freqEdemaArticulacao': EDEMA_FREQUENTE,
'conjuntivite': CONJUNTIVITE_AUSENTE, 'dorDeCabeca': MEDIA, 'coceira': LEVE, 'hipertrofiaGanglionar': MODERADA,
'discrasiaHemorragica': LEVE, 'acometimentoNeurologico': ACOMETIMENTO_RARO
},
]
}
|
[
"nanda.sousa.m@gmail.com"
] |
nanda.sousa.m@gmail.com
|
4a4eadcb8c18b97d45799ba9e8da0619aa28ea5f
|
298e47d0ffad9ac374f7e6fecf1eb20f340f680b
|
/django/realest_estate/backend/realtors/views.py
|
ced5a78d726a7f34d70164a2b4a15436d74a678e
|
[] |
no_license
|
nahidsaikat/Learning
|
7be26eba70a9e8a132424cfab828477792a243b7
|
75c7a45d8f7c61a28c8ed29438b1765f2761610a
|
refs/heads/master
| 2023-01-29T14:22:09.508428
| 2022-01-16T11:32:31
| 2022-01-16T11:32:31
| 155,268,879
| 1
| 1
| null | 2023-01-19T16:33:08
| 2018-10-29T19:27:54
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 703
|
py
|
from rest_framework.generics import ListAPIView, RetrieveAPIView
from rest_framework import permissions
from .models import Realtor
from .serializers import RealtorSerializer
class RealtorListView(ListAPIView):
permission_classes = [permissions.AllowAny]
serializer_class = RealtorSerializer
queryset = Realtor.objects.all()
pagination_class = None
class RealtorRetrieveView(RetrieveAPIView):
serializer_class = RealtorSerializer
queryset = Realtor.objects.all()
class TopSellerListView(ListAPIView):
permission_classes = [permissions.AllowAny]
serializer_class = RealtorSerializer
queryset = Realtor.objects.filter(top_seller=True)
pagination_class = None
|
[
"nahidur.rahman@newtonx.com"
] |
nahidur.rahman@newtonx.com
|
8f9c7c45bf173c6b1593881386614ed222c6c593
|
2bf43e862b432d44ba545beea4e67e3e086c1a1c
|
/tests/nemo_text_processing/zh/test_char.py
|
1ca553eca3d027fe254df28f4d9b682ca08f9b57
|
[
"Apache-2.0"
] |
permissive
|
ericharper/NeMo
|
719e933f6ffce1b27358bc21efe87cdf144db875
|
f1825bc4b724b78c2d6ca392b616e8dc9a8cde04
|
refs/heads/master
| 2022-10-06T01:45:21.887856
| 2022-09-14T19:09:42
| 2022-09-14T19:09:42
| 259,380,135
| 1
| 0
|
Apache-2.0
| 2022-09-20T18:01:57
| 2020-04-27T15:54:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,257
|
py
|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestChar:
normalizer_zh = Normalizer(lang='zh', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased')
@parameterized.expand(parse_test_case_file('zh/data_text_normalization/test_cases_char.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_char(self, test_input, expected):
preds = self.normalizer_zh.normalize(test_input)
assert expected == preds
|
[
"noreply@github.com"
] |
noreply@github.com
|
a5c9186d5e1f0dbcda4d98c83b2cbd55c9188cc0
|
e849853a8abbce8b0c7594b4418407219242c8a7
|
/benchmark/train/original_data/resnet_101.py
|
b2fa10774b27e1866f9424413cf3cdbcbe7d51d1
|
[] |
no_license
|
qihaoyang123/Automatic-Rail-Surface-Multi-flaw-Identification
|
736a000a2d47d18301983c8153903147b47207e8
|
c3fda9ef965564f9cb8fb5bf32b7849395753317
|
refs/heads/main
| 2023-06-17T04:36:46.136158
| 2021-07-08T13:57:19
| 2021-07-08T13:57:19
| 383,699,103
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,320
|
py
|
# -*- coding: utf-8 -*-
from keras.models import Sequential
from keras.optimizers import SGD, Adam
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, merge, Reshape, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras import backend as K
from sklearn.metrics import log_loss, accuracy_score
from keras.models import load_model
import numpy as np
import os
import cv2
import pandas as pd
import time
from sklearn.model_selection import KFold
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
import tensorflow as tf
from custom_layers.scale_layer import Scale
os.environ["CUDA_VISIBLE_DEVICES"]="1,2"
import sys
sys.setrecursionlimit(3000)
def identity_block(input_tensor, kernel_size, filters, stage, block):
'''The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
'''
eps = 1.1e-5
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
scale_name_base = 'scale' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, name=conv_name_base + '2a', bias=False)(input_tensor)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
x = Activation('relu', name=conv_name_base + '2a_relu')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
name=conv_name_base + '2b', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
x = Activation('relu', name=conv_name_base + '2b_relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)
x = merge([x, input_tensor], mode='sum', name='res' + str(stage) + block)
x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
'''conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: defualt 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
'''
eps = 1.1e-5
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
scale_name_base = 'scale' + str(stage) + block + '_branch'
x = Convolution2D(nb_filter1, 1, 1, subsample=strides,
name=conv_name_base + '2a', bias=False)(input_tensor)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
x = Activation('relu', name=conv_name_base + '2a_relu')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
x = Convolution2D(nb_filter2, kernel_size, kernel_size,
name=conv_name_base + '2b', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
x = Activation('relu', name=conv_name_base + '2b_relu')(x)
x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c')(x)
x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)
shortcut = Convolution2D(nb_filter3, 1, 1, subsample=strides,
name=conv_name_base + '1', bias=False)(input_tensor)
shortcut = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '1')(shortcut)
shortcut = Scale(axis=bn_axis, name=scale_name_base + '1')(shortcut)
x = merge([x, shortcut], mode='sum', name='res' + str(stage) + block)
x = Activation('relu', name='res' + str(stage) + block + '_relu')(x)
return x
def resnet101_model(img_rows, img_cols, color_type=1, num_classes=None):
"""
Resnet 101 Model for Keras
Model Schema and layer naming follow that of the original Caffe implementation
https://github.com/KaimingHe/deep-residual-networks
ImageNet Pretrained Weights
Theano: https://drive.google.com/file/d/0Byy2AcGyEVxfdUV1MHJhelpnSG8/view?usp=sharing
TensorFlow: https://drive.google.com/file/d/0Byy2AcGyEVxfTmRRVmpGWDczaXM/view?usp=sharing
Parameters:
img_rows, img_cols - resolution of inputs
channel - 1 for grayscale, 3 for color
num_classes - number of class labels for our classification task
"""
eps = 1.1e-5
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='data')
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='data')
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=bn_axis, name='bn_conv1')(x)
x = Scale(axis=bn_axis, name='scale_conv1')(x)
x = Activation('relu', name='conv1_relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
for i in range(1,4):
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b'+str(i))
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
for i in range(1,23):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b'+str(i))
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x_fc = AveragePooling2D((7, 7), name='avg_pool')(x)
x_fc = Flatten()(x_fc)
x_fc = Dense(1000, activation='softmax', name='fc1000')(x_fc)
model = Model(img_input, x_fc)
if K.image_dim_ordering() == 'th':
# Use pre-trained weights for Theano backend
weights_path = 'imagenet_models/resnet101_weights_th.h5'
else:
# Use pre-trained weights for Tensorflow backend
weights_path = 'imagenet_models/resnet101_weights_tf.h5'
model.load_weights(weights_path, by_name=True)
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = AveragePooling2D((7, 7), name='avg_pool')(x)
x_newfc = Flatten()(x_newfc)
x_newfc = Dense(num_classes, activation='softmax', name='fc8')(x_newfc)
model = Model(img_input, x_newfc)
# Learning rate is changed to 0.001
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
if __name__ == '__main__':
# Example to fine-tune on 3000 samples from Cifar10
img_rows, img_cols = 224, 224 # Resolution of inputs
channel = 3
num_classes = 6
batch_size = 8
nb_epoch = 100
file = pd.read_csv(r'C:\Users\sdscit\Desktop\Data-defect\analysis_validation_select_checked.csv')
label_dict = {'normal':0,'Corrugation':1,'Defect':2,'Rail with Grinding Mark':3,'Shelling':4,'Squat':5}
x = []
y = np.array([])
def read_image(path,label_name):
img_path = os.listdir(path)
data = []
y = np.array([])
for i in img_path:
img = cv2.imread(os.path.join(path,i))
if img.shape[1]<224:
img = cv2.resize(cv2.copyMakeBorder(img,0,0,int((224-img.shape[1])/2),int((224-img.shape[1])/2),cv2.BORDER_CONSTANT,value=255),(224,224))
else: img = cv2.resize(img,(224,224))
data.append(img)
y = np.append(y,label_dict[label_name])
return np.array(data),y
kf = KFold(n_splits=4)
x1,y1 = read_image(r'C:\Users\sdscit\Desktop\Data-defect\Corrugation','Corrugation')
x2,y2 = read_image(r'C:\Users\sdscit\Desktop\Data-defect\Defect','Defect')
x3,y3 = read_image(r'C:\Users\sdscit\Desktop\Data-defect\Rail_with_Grinding_Mark','Rail with Grinding Mark')
x4,y4 = read_image(r'C:\Users\sdscit\Desktop\Data-defect\Shelling','Shelling')
x5,y5 = read_image(r'C:\Users\sdscit\Desktop\Data-defect\Squat','Squat')
x6,y6 = read_image(r'C:\Users\sdscit\Desktop\Data-defect\normal_all_resize','normal')
score1 = 0
score2 = 0
score3 = 0
score4 = 0
score5 = 0
score6 = 0
for threshold in range(1,5):
index = 0
for train_index, test_index in kf.split(x1):
x1_train,x1_test,y1_train,y1_test = x1[train_index],x1[test_index],y1[train_index],y1[test_index]
index+=1
if index ==threshold: break
index = 0
for train_index, test_index in kf.split(x2):
x2_train,x2_test,y2_train,y2_test = x2[train_index],x2[test_index],y2[train_index],y2[test_index]
index+=1
if index ==threshold: break
index = 0
for train_index, test_index in kf.split(x3):
x3_train,x3_test,y3_train,y3_test = x3[train_index],x3[test_index],y3[train_index],y3[test_index]
index+=1
if index ==threshold: break
index = 0
for train_index, test_index in kf.split(x4):
x4_train,x4_test,y4_train,y4_test = x4[train_index],x4[test_index],y4[train_index],y4[test_index]
index+=1
if index ==threshold: break
index = 0
for train_index, test_index in kf.split(x5):
x5_train,x5_test,y5_train,y5_test = x5[train_index],x5[test_index],y5[train_index],y5[test_index]
index+=1
if index ==threshold: break
index = 0
for train_index, test_index in kf.split(x6):
x6_train,x6_test,y6_train,y6_test = x6[train_index],x6[test_index],y6[train_index],y6[test_index]
index+=1
if index ==threshold: break
x_train = np.concatenate((x1_train,x2_train,x3_train,x4_train,x5_train,x6_train))
x_test = np.concatenate((x1_test,x2_test,x3_test,x4_test,x5_test,x6_test))
y_train = np.concatenate((y1_train,y2_train,y3_train,y4_train,y5_train,y6_train))
y_test = np.concatenate((y1_test,y2_test,y3_test,y4_test,y5_test,y6_test))
y_train = np_utils.to_categorical(y_train,num_classes=6)
y_test = np_utils.to_categorical(y_test,num_classes=6)
#filepath = r'C:\Users\sdscit\Desktop\cnn_finetune-master\model\resnet_101_'+time.strftime("%m-%d",time.localtime())+'_'+str(threshold)
filepath = r'C:\Users\sdscit\Desktop\cnn_finetune-master\model\resnet_101_12-09_'+str(threshold)
# Load our model
'''
model = resnet101_model(img_rows, img_cols, channel, num_classes)
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max', period=1)
callbacks_list = [checkpoint]
# Start Fine-tuning
model.fit(x_train, y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=2,
validation_data=(x_test, y_test),
callbacks = callbacks_list
)
K.clear_session()
tf.reset_default_graph()
'''
start = time.time()
model = load_model(filepath,custom_objects={'Scale': Scale})
end = time.time()
print("Execution Time: ", end - start)
#%%
predictions_valid = model.predict(x_test, batch_size=batch_size, verbose=1)
prediction = np.argmax(predictions_valid,axis=1)
score1+=accuracy_score(y1_test, prediction)*len(x1_test)/len(x1)
predictions_valid = model.predict(x2_test, batch_size=batch_size, verbose=1)
prediction = np.argmax(predictions_valid,axis=1)
score2+=accuracy_score(y2_test, prediction)*len(x2_test)/len(x2)
predictions_valid = model.predict(x3_test, batch_size=batch_size, verbose=1)
prediction = np.argmax(predictions_valid,axis=1)
score3+=accuracy_score(y3_test, prediction)*len(x3_test)/len(x3)
predictions_valid = model.predict(x4_test, batch_size=batch_size, verbose=1)
prediction = np.argmax(predictions_valid,axis=1)
score4+=accuracy_score(y4_test, prediction)*len(x4_test)/len(x4)
predictions_valid = model.predict(x5_test, batch_size=batch_size, verbose=1)
prediction = np.argmax(predictions_valid,axis=1)
score5+=accuracy_score(y5_test, prediction)*len(x5_test)/len(x5)
predictions_valid = model.predict(x6_test, batch_size=batch_size, verbose=1)
prediction = np.argmax(predictions_valid,axis=1)
score6+=accuracy_score(y6_test, prediction)*len(x6_test)/len(x6)
|
[
"895714656@qq.com"
] |
895714656@qq.com
|
b12ac59426ccc36f6b9534995fe07d7f4cbb4b03
|
870ffac5df4f386c2d79263d8424d1e2c3844837
|
/hal/player.py
|
8f716862f6371e78705ee5d42f69119cbe38669b
|
[] |
no_license
|
easying001/DcsDemo
|
5f077dffaa1f6f993928d72977968f0aa12e21e1
|
e5811bd48deb5d16b4a12774d8e9e13e71d5eea2
|
refs/heads/master
| 2021-06-15T21:57:10.159855
| 2020-07-30T07:11:13
| 2020-07-30T07:11:13
| 98,290,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 661
|
py
|
import pyaudio
import PyMedia
import wave
class Player():
def __init__(self):
print "Player Initialized"
def play_file(self, path):
chunk = 1024;
f = wave.open(path, "rb")
p = pyaudio.PyAudio();
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True);
data = f.readframes(chunk)
while data:
stream.write(data)
data = f.readframes(chunk)
stream.stop_stream()
stream.close()
p.terminate()
|
[
"yangjie11@baidu.com"
] |
yangjie11@baidu.com
|
114051973dfb7998dcd083c97d150316f7106c68
|
5399a0be824d32475b6cf643d6cc1203a4752141
|
/main.py
|
ca5f015a6cfc287e8dd4d75ecd69d7693e905bed
|
[] |
no_license
|
dalon1/python-firebase-db-test
|
5dcbb3700567bcf33d9bc7bf780a8020e10f2752
|
e16a931fab2ec805f7d16475ba3b8727bb771224
|
refs/heads/master
| 2023-02-08T07:30:34.720433
| 2020-12-31T21:30:02
| 2020-12-31T21:30:02
| 325,874,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
# Importing packages
import requests
import yaml
# 1. Reading config file
with open("config.yml") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
print("Config Object: " + str(config))
# 2. Calling data source rest api (e.g. country-rest-api) and parsing data
api_response = requests.get(config.get("data_source_url"))
raw_data = api_response.json()
# 2.1. Just getting the name for each country record
parsed_data = [country.get("name") for country in raw_data]
# print(parsed_data)
# 3.0 Log in to firebase realtime database and storing parsed data
from firebase import firebase
firebase_db = firebase.FirebaseApplication(config.get("firebase_db_url"), authentication=None)
firebase_db_result = firebase_db.get('/countries', None, {'print': 'pretty'})
print("First DB Request - Output: " + str(firebase_db_result))
# 3.1. Push data to firebase realtime database
firebase_db.post('/countries', parsed_data, {'print': 'pretty'}, {'X_FANCY_HEADER': 'VERY FANCY'})
# 3.2. Confirm new parsed data is stored in firebase realtime db
firebase_db_result = firebase_db.get('/countries', None, {'print': 'pretty'})
print("Second DB Request - Output" + str(firebase_db_result))
|
[
"dalon@Katrinas-MacBook-Air.local"
] |
dalon@Katrinas-MacBook-Air.local
|
d1dd3215917e4cb08a2834d996811daf313c8420
|
586d8b2ee7e531537ae3ec2a4683c595ced09207
|
/bin/django-admin.py
|
8782171b2a973d44b0093762c5e18b5260703599
|
[] |
no_license
|
nicholasmercurio/Pastey
|
89dfbbbaa3461324dbc2c52a63bd7471b9a883c4
|
7432364e1bbc6ce7a90913a2704b0776ebf93640
|
refs/heads/master
| 2021-10-23T08:58:08.353531
| 2019-03-16T08:30:16
| 2019-03-16T08:30:16
| 174,892,280
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
#!/home/nick/Dev/Pastey/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"nick@nicholasmercur.io"
] |
nick@nicholasmercur.io
|
2bb192e13d0b897544b36848f736cf1666918f37
|
e8160ba62759fc390daf60d88146e95c0c0de1b4
|
/TestDjangoORM/settings.py
|
97366c9073674155c60edddae7971a54bbb699fe
|
[] |
no_license
|
imranq2/TestDjangoORM
|
2a3a72aff36f03b6e2bb1a0f394a3499d2607bba
|
8d51d772f42635c0dbbd1d462057defaa9cdfbff
|
refs/heads/master
| 2023-01-05T23:07:07.662717
| 2020-11-03T04:36:44
| 2020-11-03T04:36:44
| 309,496,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,510
|
py
|
"""
Django settings for TestDjangoORM project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@f5a-qggnb9d=y^%tcto40rnxzb=6kq5)=077s*9in+$wx&y37'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
# Django stuff
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TestDjangoORM.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TestDjangoORM.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
LOGGING = {
'version': 1,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
},
},
'root': {
'handlers': ['console'],
}
}
|
[
"imranq2@hotmail.com"
] |
imranq2@hotmail.com
|
8ee343a9529c4a9d48aa41f9d1404b81f9747738
|
ad3737fdd3fd04dc93e6651277d8ce7ca9c31264
|
/neutron-sriov/neutron/plugins/vmware/vshield/edge_appliance_driver.py
|
782a528e740f3193402834faf1b14f1fa489178e
|
[
"Apache-2.0"
] |
permissive
|
VeenaSL/sriov
|
15ec75bda045a26d0a9b577d3b446914518bafdd
|
f2850ef8e327250bc6e5d12eb1e3413e04f67154
|
refs/heads/master
| 2021-01-10T15:25:46.294154
| 2015-05-26T10:03:04
| 2015-05-26T10:03:04
| 36,286,683
| 0
| 1
| null | 2020-07-24T06:10:58
| 2015-05-26T09:45:52
|
Python
|
UTF-8
|
Python
| false
| false
| 25,894
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Kaiwei Fan, VMware, Inc.
# @author: Bo Link, VMware, Inc.
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.vshield.common import (
constants as vcns_const)
from neutron.plugins.vmware.vshield.common.constants import RouterStatus
from neutron.plugins.vmware.vshield.common import exceptions
from neutron.plugins.vmware.vshield.tasks.constants import TaskState
from neutron.plugins.vmware.vshield.tasks.constants import TaskStatus
from neutron.plugins.vmware.vshield.tasks import tasks
LOG = logging.getLogger(__name__)
class EdgeApplianceDriver(object):
def __init__(self):
# store the last task per edge that has the latest config
self.updated_task = {
'nat': {},
'route': {},
}
def _assemble_edge(self, name, appliance_size="compact",
deployment_container_id=None, datacenter_moid=None,
enable_aesni=True, hypervisor_assist=False,
enable_fips=False, remote_access=False):
edge = {
'name': name,
'fqdn': name,
'hypervisorAssist': hypervisor_assist,
'type': 'gatewayServices',
'enableAesni': enable_aesni,
'enableFips': enable_fips,
'cliSettings': {
'remoteAccess': remote_access
},
'appliances': {
'applianceSize': appliance_size
},
'vnics': {
'vnics': []
}
}
if deployment_container_id:
edge['appliances']['deploymentContainerId'] = (
deployment_container_id)
if datacenter_moid:
edge['datacenterMoid'] = datacenter_moid
return edge
def _assemble_edge_appliance(self, resource_pool_id, datastore_id):
appliance = {}
if resource_pool_id:
appliance['resourcePoolId'] = resource_pool_id
if datastore_id:
appliance['datastoreId'] = datastore_id
return appliance
def _assemble_edge_vnic(self, name, index, portgroup_id,
primary_address=None, subnet_mask=None,
secondary=None,
type="internal",
enable_proxy_arp=False,
enable_send_redirects=True,
is_connected=True,
mtu=1500):
vnic = {
'index': index,
'name': name,
'type': type,
'portgroupId': portgroup_id,
'mtu': mtu,
'enableProxyArp': enable_proxy_arp,
'enableSendRedirects': enable_send_redirects,
'isConnected': is_connected
}
if primary_address and subnet_mask:
address_group = {
'primaryAddress': primary_address,
'subnetMask': subnet_mask
}
if secondary:
address_group['secondaryAddresses'] = {
'ipAddress': secondary,
'type': 'IpAddressesDto'
}
vnic['addressGroups'] = {
'addressGroups': [address_group]
}
return vnic
def _edge_status_to_level(self, status):
if status == 'GREEN':
status_level = RouterStatus.ROUTER_STATUS_ACTIVE
elif status in ('GREY', 'YELLOW'):
status_level = RouterStatus.ROUTER_STATUS_DOWN
else:
status_level = RouterStatus.ROUTER_STATUS_ERROR
return status_level
def _enable_loadbalancer(self, edge):
if not edge.get('featureConfigs') or (
not edge['featureConfigs'].get('features')):
edge['featureConfigs'] = {'features': []}
edge['featureConfigs']['features'].append(
{'featureType': 'loadbalancer_4.0',
'enabled': True})
def get_edge_status(self, edge_id):
try:
response = self.vcns.get_edge_status(edge_id)[1]
status_level = self._edge_status_to_level(
response['edgeStatus'])
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to get edge status:\n%s"),
e.response)
status_level = RouterStatus.ROUTER_STATUS_ERROR
try:
desc = jsonutils.loads(e.response)
if desc.get('errorCode') == (
vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
status_level = RouterStatus.ROUTER_STATUS_DOWN
except ValueError:
LOG.exception(e.response)
return status_level
def get_edges_statuses(self):
edges_status_level = {}
edges = self._get_edges()
for edge in edges['edgePage'].get('data', []):
edge_id = edge['id']
status = edge['edgeStatus']
edges_status_level[edge_id] = self._edge_status_to_level(status)
return edges_status_level
def _update_interface(self, task):
edge_id = task.userdata['edge_id']
config = task.userdata['config']
LOG.debug(_("VCNS: start updating vnic %s"), config)
try:
self.vcns.update_interface(edge_id, config)
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to update vnic %(config)s:\n"
"%(response)s"), {
'config': config,
'response': e.response})
raise e
except Exception as e:
LOG.exception(_("VCNS: Failed to update vnic %d"),
config['index'])
raise e
return TaskStatus.COMPLETED
def update_interface(self, router_id, edge_id, index, network,
address=None, netmask=None, secondary=None,
jobdata=None):
LOG.debug(_("VCNS: update vnic %(index)d: %(addr)s %(netmask)s"), {
'index': index, 'addr': address, 'netmask': netmask})
if index == vcns_const.EXTERNAL_VNIC_INDEX:
name = vcns_const.EXTERNAL_VNIC_NAME
intf_type = 'uplink'
elif index == vcns_const.INTERNAL_VNIC_INDEX:
name = vcns_const.INTERNAL_VNIC_NAME
intf_type = 'internal'
else:
msg = _("Vnic %d currently not supported") % index
raise exceptions.VcnsGeneralException(msg)
config = self._assemble_edge_vnic(
name, index, network, address, netmask, secondary, type=intf_type)
userdata = {
'edge_id': edge_id,
'config': config,
'jobdata': jobdata
}
task_name = "update-interface-%s-%d" % (edge_id, index)
task = tasks.Task(task_name, router_id,
self._update_interface, userdata=userdata)
task.add_result_monitor(self.callbacks.interface_update_result)
self.task_manager.add(task)
return task
def _deploy_edge(self, task):
userdata = task.userdata
name = userdata['router_name']
LOG.debug(_("VCNS: start deploying edge %s"), name)
request = userdata['request']
try:
header = self.vcns.deploy_edge(request)[0]
objuri = header['location']
job_id = objuri[objuri.rfind("/") + 1:]
response = self.vcns.get_edge_id(job_id)[1]
edge_id = response['edgeId']
LOG.debug(_("VCNS: deploying edge %s"), edge_id)
userdata['edge_id'] = edge_id
status = TaskStatus.PENDING
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: deploy edge failed for router %s."),
name)
raise e
return status
def _status_edge(self, task):
edge_id = task.userdata['edge_id']
try:
response = self.vcns.get_edge_deploy_status(edge_id)[1]
task.userdata['retries'] = 0
system_status = response.get('systemStatus', None)
if system_status is None:
status = TaskStatus.PENDING
elif system_status == 'good':
status = TaskStatus.COMPLETED
else:
status = TaskStatus.ERROR
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Edge %s status query failed."), edge_id)
raise e
except Exception as e:
retries = task.userdata.get('retries', 0) + 1
if retries < 3:
task.userdata['retries'] = retries
msg = _("VCNS: Unable to retrieve edge %(edge_id)s status. "
"Retry %(retries)d.") % {
'edge_id': edge_id,
'retries': retries}
LOG.exception(msg)
status = TaskStatus.PENDING
else:
msg = _("VCNS: Unable to retrieve edge %s status. "
"Abort.") % edge_id
LOG.exception(msg)
status = TaskStatus.ERROR
LOG.debug(_("VCNS: Edge %s status"), edge_id)
return status
def _result_edge(self, task):
router_name = task.userdata['router_name']
edge_id = task.userdata.get('edge_id')
if task.status != TaskStatus.COMPLETED:
LOG.error(_("VCNS: Failed to deploy edge %(edge_id)s "
"for %(name)s, status %(status)d"), {
'edge_id': edge_id,
'name': router_name,
'status': task.status
})
else:
LOG.debug(_("VCNS: Edge %(edge_id)s deployed for "
"router %(name)s"), {
'edge_id': edge_id, 'name': router_name
})
def _delete_edge(self, task):
edge_id = task.userdata['edge_id']
LOG.debug(_("VCNS: start destroying edge %s"), edge_id)
status = TaskStatus.COMPLETED
if edge_id:
try:
self.vcns.delete_edge(edge_id)
except exceptions.ResourceNotFound:
pass
except exceptions.VcnsApiException as e:
msg = _("VCNS: Failed to delete %(edge_id)s:\n"
"%(response)s") % {
'edge_id': edge_id, 'response': e.response}
LOG.exception(msg)
status = TaskStatus.ERROR
except Exception:
LOG.exception(_("VCNS: Failed to delete %s"), edge_id)
status = TaskStatus.ERROR
return status
def _get_edges(self):
try:
return self.vcns.get_edges()[1]
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to get edges:\n%s"), e.response)
raise e
def deploy_edge(self, router_id, name, internal_network, jobdata=None,
wait_for_exec=False, loadbalancer_enable=True):
task_name = 'deploying-%s' % name
edge_name = name
edge = self._assemble_edge(
edge_name, datacenter_moid=self.datacenter_moid,
deployment_container_id=self.deployment_container_id,
appliance_size='large', remote_access=True)
appliance = self._assemble_edge_appliance(self.resource_pool_id,
self.datastore_id)
if appliance:
edge['appliances']['appliances'] = [appliance]
vnic_external = self._assemble_edge_vnic(
vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX,
self.external_network, type="uplink")
edge['vnics']['vnics'].append(vnic_external)
vnic_inside = self._assemble_edge_vnic(
vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX,
internal_network,
vcns_const.INTEGRATION_EDGE_IPADDRESS,
vcns_const.INTEGRATION_SUBNET_NETMASK,
type="internal")
edge['vnics']['vnics'].append(vnic_inside)
if loadbalancer_enable:
self._enable_loadbalancer(edge)
userdata = {
'request': edge,
'router_name': name,
'jobdata': jobdata
}
task = tasks.Task(task_name, router_id,
self._deploy_edge,
status_callback=self._status_edge,
result_callback=self._result_edge,
userdata=userdata)
task.add_executed_monitor(self.callbacks.edge_deploy_started)
task.add_result_monitor(self.callbacks.edge_deploy_result)
self.task_manager.add(task)
if wait_for_exec:
# waitl until the deploy task is executed so edge_id is available
task.wait(TaskState.EXECUTED)
return task
def delete_edge(self, router_id, edge_id, jobdata=None):
task_name = 'delete-%s' % edge_id
userdata = {
'router_id': router_id,
'edge_id': edge_id,
'jobdata': jobdata
}
task = tasks.Task(task_name, router_id, self._delete_edge,
userdata=userdata)
task.add_result_monitor(self.callbacks.edge_delete_result)
self.task_manager.add(task)
return task
def _assemble_nat_rule(self, action, original_address,
translated_address,
vnic_index=vcns_const.EXTERNAL_VNIC_INDEX,
enabled=True):
nat_rule = {}
nat_rule['action'] = action
nat_rule['vnic'] = vnic_index
nat_rule['originalAddress'] = original_address
nat_rule['translatedAddress'] = translated_address
nat_rule['enabled'] = enabled
return nat_rule
def get_nat_config(self, edge_id):
try:
return self.vcns.get_nat_config(edge_id)[1]
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to get nat config:\n%s"),
e.response)
raise e
def _create_nat_rule(self, task):
# TODO(fank): use POST for optimization
# return rule_id for future reference
rule = task.userdata['rule']
LOG.debug(_("VCNS: start creating nat rules: %s"), rule)
edge_id = task.userdata['edge_id']
nat = self.get_nat_config(edge_id)
location = task.userdata['location']
del nat['version']
if location is None or location == vcns_const.APPEND:
nat['rules']['natRulesDtos'].append(rule)
else:
nat['rules']['natRulesDtos'].insert(location, rule)
try:
self.vcns.update_nat_config(edge_id, nat)
status = TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
e.response)
status = TaskStatus.ERROR
return status
def create_snat_rule(self, router_id, edge_id, src, translated,
jobdata=None, location=None):
LOG.debug(_("VCNS: create snat rule %(src)s/%(translated)s"), {
'src': src, 'translated': translated})
snat_rule = self._assemble_nat_rule("snat", src, translated)
userdata = {
'router_id': router_id,
'edge_id': edge_id,
'rule': snat_rule,
'location': location,
'jobdata': jobdata
}
task_name = "create-snat-%s-%s-%s" % (edge_id, src, translated)
task = tasks.Task(task_name, router_id, self._create_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.snat_create_result)
self.task_manager.add(task)
return task
def _delete_nat_rule(self, task):
# TODO(fank): pass in rule_id for optimization
# handle routes update for optimization
edge_id = task.userdata['edge_id']
address = task.userdata['address']
addrtype = task.userdata['addrtype']
LOG.debug(_("VCNS: start deleting %(type)s rules: %(addr)s"), {
'type': addrtype, 'addr': address})
nat = self.get_nat_config(edge_id)
del nat['version']
status = TaskStatus.COMPLETED
for nat_rule in nat['rules']['natRulesDtos']:
if nat_rule[addrtype] == address:
rule_id = nat_rule['ruleId']
try:
self.vcns.delete_nat_rule(edge_id, rule_id)
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to delete snat rule:\n"
"%s"), e.response)
status = TaskStatus.ERROR
return status
def delete_snat_rule(self, router_id, edge_id, src, jobdata=None):
LOG.debug(_("VCNS: delete snat rule %s"), src)
userdata = {
'edge_id': edge_id,
'address': src,
'addrtype': 'originalAddress',
'jobdata': jobdata
}
task_name = "delete-snat-%s-%s" % (edge_id, src)
task = tasks.Task(task_name, router_id, self._delete_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.snat_delete_result)
self.task_manager.add(task)
return task
def create_dnat_rule(self, router_id, edge_id, dst, translated,
jobdata=None, location=None):
# TODO(fank): use POST for optimization
# return rule_id for future reference
LOG.debug(_("VCNS: create dnat rule %(dst)s/%(translated)s"), {
'dst': dst, 'translated': translated})
dnat_rule = self._assemble_nat_rule(
"dnat", dst, translated)
userdata = {
'router_id': router_id,
'edge_id': edge_id,
'rule': dnat_rule,
'location': location,
'jobdata': jobdata
}
task_name = "create-dnat-%s-%s-%s" % (edge_id, dst, translated)
task = tasks.Task(task_name, router_id, self._create_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.dnat_create_result)
self.task_manager.add(task)
return task
def delete_dnat_rule(self, router_id, edge_id, translated,
jobdata=None):
# TODO(fank): pass in rule_id for optimization
LOG.debug(_("VCNS: delete dnat rule %s"), translated)
userdata = {
'edge_id': edge_id,
'address': translated,
'addrtype': 'translatedAddress',
'jobdata': jobdata
}
task_name = "delete-dnat-%s-%s" % (edge_id, translated)
task = tasks.Task(task_name, router_id, self._delete_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.dnat_delete_result)
self.task_manager.add(task)
return task
def _update_nat_rule(self, task):
# TODO(fank): use POST for optimization
# return rule_id for future reference
edge_id = task.userdata['edge_id']
if task != self.updated_task['nat'][edge_id]:
# this task does not have the latest config, abort now
# for speedup
return TaskStatus.ABORT
rules = task.userdata['rules']
LOG.debug(_("VCNS: start updating nat rules: %s"), rules)
nat = {
'featureType': 'nat',
'rules': {
'natRulesDtos': rules
}
}
try:
self.vcns.update_nat_config(edge_id, nat)
status = TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to create snat rule:\n%s"),
e.response)
status = TaskStatus.ERROR
return status
def update_nat_rules(self, router_id, edge_id, snats, dnats,
jobdata=None):
LOG.debug(_("VCNS: update nat rule\n"
"SNAT:%(snat)s\n"
"DNAT:%(dnat)s\n"), {
'snat': snats, 'dnat': dnats})
nat_rules = []
for dnat in dnats:
nat_rules.append(self._assemble_nat_rule(
'dnat', dnat['dst'], dnat['translated']))
nat_rules.append(self._assemble_nat_rule(
'snat', dnat['translated'], dnat['dst']))
for snat in snats:
nat_rules.append(self._assemble_nat_rule(
'snat', snat['src'], snat['translated']))
userdata = {
'edge_id': edge_id,
'rules': nat_rules,
'jobdata': jobdata,
}
task_name = "update-nat-%s" % edge_id
task = tasks.Task(task_name, router_id, self._update_nat_rule,
userdata=userdata)
task.add_result_monitor(self.callbacks.nat_update_result)
self.updated_task['nat'][edge_id] = task
self.task_manager.add(task)
return task
def _update_routes(self, task):
edge_id = task.userdata['edge_id']
if (task != self.updated_task['route'][edge_id] and
task.userdata.get('skippable', True)):
# this task does not have the latest config, abort now
# for speedup
return TaskStatus.ABORT
gateway = task.userdata['gateway']
routes = task.userdata['routes']
LOG.debug(_("VCNS: start updating routes for %s"), edge_id)
static_routes = []
for route in routes:
static_routes.append({
"description": "",
"vnic": vcns_const.INTERNAL_VNIC_INDEX,
"network": route['cidr'],
"nextHop": route['nexthop']
})
request = {
"staticRoutes": {
"staticRoutes": static_routes
}
}
if gateway:
request["defaultRoute"] = {
"description": "default-gateway",
"gatewayAddress": gateway,
"vnic": vcns_const.EXTERNAL_VNIC_INDEX
}
try:
self.vcns.update_routes(edge_id, request)
status = TaskStatus.COMPLETED
except exceptions.VcnsApiException as e:
LOG.exception(_("VCNS: Failed to update routes:\n%s"),
e.response)
status = TaskStatus.ERROR
return status
def update_routes(self, router_id, edge_id, gateway, routes,
skippable=True, jobdata=None):
if gateway:
gateway = gateway.split('/')[0]
userdata = {
'edge_id': edge_id,
'gateway': gateway,
'routes': routes,
'skippable': skippable,
'jobdata': jobdata
}
task_name = "update-routes-%s" % (edge_id)
task = tasks.Task(task_name, router_id, self._update_routes,
userdata=userdata)
task.add_result_monitor(self.callbacks.routes_update_result)
self.updated_task['route'][edge_id] = task
self.task_manager.add(task)
return task
def create_lswitch(self, name, tz_config, tags=None,
port_isolation=False, replication_mode="service"):
lsconfig = {
'display_name': utils.check_and_truncate(name),
"tags": tags or [],
"type": "LogicalSwitchConfig",
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
"transport_zones": tz_config
}
if port_isolation is bool:
lsconfig["port_isolation_enabled"] = port_isolation
if replication_mode:
lsconfig["replication_mode"] = replication_mode
response = self.vcns.create_lswitch(lsconfig)[1]
return response
def delete_lswitch(self, lswitch_id):
self.vcns.delete_lswitch(lswitch_id)
def get_loadbalancer_config(self, edge_id):
try:
header, response = self.vcns.get_loadbalancer_config(
edge_id)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to get service config"))
return response
def enable_service_loadbalancer(self, edge_id):
config = self.get_loadbalancer_config(
edge_id)
if not config['enabled']:
config['enabled'] = True
try:
self.vcns.enable_service_loadbalancer(edge_id, config)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to enable loadbalancer "
"service config"))
|
[
"mveenasl@gmail.com"
] |
mveenasl@gmail.com
|
0c9146f670808e930b36605e77ad014288be9991
|
e86e5d6a1524244cd79b7fdf104203b718d64c01
|
/Pyshell.py
|
a060c958c599d449e709229fff311d7dce906212
|
[] |
no_license
|
ravijakhania13/Terminal_using_python
|
db101166930b8eac060fc4a5d8ee3e81a7085b25
|
15215daf2418b558019ed941573573006dd0165f
|
refs/heads/master
| 2020-04-04T20:45:48.760731
| 2018-11-05T17:59:32
| 2018-11-05T17:59:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,789
|
py
|
import os
import sys
import getpass
import socket
from pathlib import Path
import itertools
from more_itertools import *
import re
import fileinput
import difflib
import pwd
import grp
import time
def load():
path = os.getcwd().split('/')
str1 = "~"
for i in path[3:]:
str1 = str1 + "/" + i
print ("\033[1;32;1m" + getpass.getuser() + "@" + socket.gethostname() + "\033[0;0m:\033[1;34;1m" + str1 + "\033[0;0m$ " , end = "")
def execute_cd(command):
path = command[3:]
if(os.path.exists(path) == False):
print ("bash: cd: "+path+": No such file or directory")
else:
if(os.path.isdir(path) == True):
os.chdir(path)
else:
print ("bash: cd: "+path+": Not a directory")
load()
def execute_ls(words):
files = [os.curdir, os.pardir] + os.listdir(os.getcwd())
files.sort()
sorted(files)
l = 0
a = 0
h = 0
if(len(words) == 1):
for name in files[2:]:
if(os.path.isdir(name)):
print ("\033[1;34;1m"+name+"\033[0;0;0m")
else:
print (name)
else:
for i in words[1][1:]:
if (i == 'a'):
a = 1
elif (i == 'h'):
h = 1
elif (i == 'l'):
l = 1
if(l == 1):
dir_path = os.path.join(os.getcwd())
dir_inode = os.stat(dir_path)
if(h == 1):
print('total {0:.1f}K'.format((dir_inode.st_size)/1024.0))
else:
print("total",dir_inode.st_size)
if (a == 0):
files = files[2:]
for name in files:
full_path = os.path.join(os.getcwd(), name)
if os.path.isdir(full_path):
print('d',end="")
else:
print('-',end="")
inode = os.stat(full_path)
user = format(int(oct(inode.st_mode)[-3:-2]), "b")
group = format(int(oct(inode.st_mode)[-2:-1]), "b")
other = format(int(oct(inode.st_mode)[-1:]), "b")
if (user[0] == '1'):
print ("r",end="")
else:
print ("-",end="")
if (user[1] == '1'):
print ("w",end="")
else:
print ("-",end="")
if (user[1] == '1'):
print ("x",end="")
else:
print ("-",end="")
if (group[0] == '1'):
print ("r",end="")
else:
print ("-",end="")
if (group[1] == '1'):
print ("w",end="")
else:
print ("-",end="")
if (group[1] == '1'):
print ("x",end="")
else:
print ("-",end="")
if (other[0] == '1'):
print ("r",end="")
else:
print ("-",end="")
if (other[1] == '1'):
print ("w",end="")
else:
print ("-",end="")
if (other[1] == '1'):
print ("x",end="")
else:
print ("-",end=" ")
print(str(inode.st_nlink) , end = " ")
print(pwd.getpwuid(inode.st_uid).pw_name , end = " ")
print(grp.getgrgid(inode.st_gid).gr_name , end = " ")
if(h == 0):
print('{:>8} '.format(str(inode.st_size)),end="")
else:
print('{:7.1f}K'.format((inode.st_size)/1024.0),end=" ")
print(time.ctime(inode.st_mtime)[:-8] , end = " ")
print(name)
else:
for name in files:
if(os.path.isdir(name)):
print ("\033[1;34;1m"+name+"\033[0;0;0m")
else:
print (name)
load()
def execute_pwd():
print (os.getcwd())
load()
def execute_touch(command):
file_name = command[6:]
filename = Path(file_name)
filename.touch(exist_ok = True)
load()
def execute_head(words):
v = 0
n = 0
N = 10
c = 0
if(len(words) == 2):
N = 10
file = words[1]
else:
for i in words[1:-1]:
if (i == '-v'):
v = 1
elif (i == '-n'):
n = 1
elif (i == '-c'):
c = 1
else:
N = int(i)
file = words[-1]
if(os.path.exists(file)):
with open(file) as myfile:
if(v == 1):
print ("==> " + file + " <==")
if (c == 1):
file_size = os.path.getsize(file)
if (N < 0):
file_size += N
if (file_size > 0):
byte = myfile.read(file_size)
for b in byte:
print (b,end="")
else:
if(file_size < N):
N = file_size
byte = myfile.read(N)
for b in byte:
print (b,end="")
else:
num_lines = sum(1 for line in myfile)
if(N < 0):
num_lines += N
if(num_lines > 0):
myfile.seek(0, 0)
for x in range(num_lines):
print (next(myfile) , end = "")
else:
if(num_lines > 0):
myfile.seek(0, 0)
for x in range(N):
if(x == num_lines):
break
else:
print (next(myfile) , end = "")
else:
print ("head: cannot open '"+file+"' for reading: No such file or directory")
load()
def execute_tail(words):
v = 0
n = 0
N = 10
c = 0
if(len(words) == 2):
N = 10
file = words[1]
else:
for i in words[1:-1]:
if (i == '-v'):
v = 1
elif (i == '-n'):
n = 1
elif (i == '-c'):
c = 1
else:
N = int(i)
file = words[-1]
if(os.path.exists(file)):
with open(file) as myfile:
if(v == 1):
print ("==> " + file + " <==")
if (c == 1):
file_size = os.path.getsize(file)
file_size -= abs(N)
if(file_size > 0):
myfile.seek(file_size,0)
else:
myfile.seek(0,0)
byte = myfile.read(abs(N))
for b in byte:
print (b,end="")
else:
num_lines = sum(1 for line in myfile)
myfile.seek(0, 0)
if(N > num_lines):
for lines in myfile:
print (lines)
else:
rest_of_file = itertools.islice(myfile,num_lines-N,None,1)
for lines in rest_of_file:
print (lines , end = "")
else:
print ("tail: cannot open '"+file+"' for reading: No such file or directory")
load()
def execute_grep(words):
i = 0
v = 0
c = 0
w = 0
for i in words:
if (i == '-i'):
i = 1
elif (i == '-v'):
v = 1
elif (i == '-c'):
c = 1
elif (i == '-w'):
w = 1
if (words[-2] == "<<<"):
file = words[-1][1:-1]
if(i == 1):
pattern = re.compile(words[-3][1:-1],re.IGNORECASE)
else:
pattern = re.compile(words[-3][1:-1])
if(c == 1):
if(w == 0):
if ((re.search(pattern,file) and v == 0) or (not(re.search(pattern,file)) and v == 1)):
print ("1")
else:
print ("0")
else:
if(len(file.split()) == 1):
if ((re.match(pattern,file) and v == 0) or (not (re.match(pattern,file)) and v == 1)):
print ("1")
else:
print ("0")
else:
flag = 1
for word in file:
if ((re.match(pattern,word) and v == 0) or (not (re.match(pattern,word)) and v == 1)):
print ("1")
flag = 0
break
if(flag):
print ("0")
else:
if(w == 0):
if ((re.search(pattern,file) and v == 0) or (not (re.search(pattern,file)) and v == 1)):
print (file)
else:
if(len(file.split()) == 1):
if ((re.match(pattern,file) and v == 0) or (not (re.match(pattern,file)) and v == 1)):
print (file)
else:
for word in file:
if ((re.match(pattern,word) and v == 0) or (not (re.match(pattern,word)) and v == 1)):
print (file)
break
else:
file = words[-1]
if(os.path.exists(file)):
if(i == 1):
pattern = re.compile(words[-2][1:-1],re.IGNORECASE)
else:
pattern = re.compile(words[-2][1:-1])
if(c == 1):
result = 0
for i,line in enumerate(open(file)):
if ((re.search(pattern,line) and v == 0) or (not(re.search(pattern,line)) and v == 1)):
result +=1
print (result)
else:
for i,line in enumerate(open(file)):
if ((re.search(pattern,line) and v == 0) or (not(re.search(pattern,line)) and v == 1)):
print (line,end = "")
else:
print ("grep: "+file+": No such file or directory")
load()
def execute_sed(words):
N = 1
texts = words[1][1:-1].split('/')
if (texts[3] == "G"):
N = 0
pattern = re.compile(texts[1])
if(os.path.exists(words[2])):
for line in fileinput.input(words[2]):
print (pattern.sub(texts[2],line, count = N),end = "")
else:
print ("sed: can't read "+words[2]+": No such file or directory")
load()
def execute_diff(words):
flag1 = 1
flag2 = 1
if(os.path.exists(words[1]) == False):
print ("diff: "+words[1]+": No such file or directory")
flag1 = 0
if(os.path.exists(words[2]) == False):
print ("diff: "+words[2]+": No such file or directory")
flag1 = 0
if (flag1 and flag2):
with open(words[1], 'r') as hosts0:
with open(words[2], 'r') as hosts1:
diff = difflib.unified_diff(hosts0.readlines(),hosts1.readlines(),fromfile=words[1],tofile=words[2],)
for line in diff:
print (line,end="")
load()
def execute_tr(words):
if (words[0] == "cat"):
if(os.path.exists(words[1]) == False):
print ("cat: "+words[1]+": No such file or directory")
load()
return
if (words[5] == "[:upper:]" or words[5] == "[:A-Z:]"):
if (words[4] == "[:lower:]" or words[4] == "[:a-z:]"):
if(words[0] == "echo"):
print (words[1][1:-1].lower())
else:
file = open(words[1])
for line in file:
print(line,end = "").lower()
file.close()
elif (words[4] == "[:lower:]" or words[4] == "[:a-z:]"):
if (words[5] == "[:upper:]" or words[5] == "[:A-Z:]"):
if(words[0] == "echo"):
print (words[1][1:-1].upper())
else:
file = open(words[1])
for line in file:
print(line,end = "").upper()
file.close()
elif (words[4] == "-d"):
if(words[0] == "echo"):
print (words[1][1:-1].translate(str.maketrans('','',words[5][1:-1])),end = "")
else:
file = open(words[1])
for line in file:
print (line.translate(str.maketrans('','',words[5][1:-1])),end = "")
file.close()
else:
if(words[0] == "echo"):
print (words[1][1:-1].translate(str.maketrans(words[4][1:-1],words[5][1:-1])),end = "")
else:
file = open(words[1])
# print ("yes")
for line in file:
print (line.translate(str.maketrans(words[4][1:-1],words[5][1:-1])),end = "")
file.close()
# print ("yes")
load()
def execute_clear():
print ("\033[3J", end = '')
print ("\033[H\033[J", end = '')
load()
def main():
print ("\033[3J", end = '')
print ("\033[H\033[J", end = '')
load()
while(1):
command = input()
words = command.split()
if(len(words)):
if(words[0] == "cd"):
execute_cd(command)
elif(words[0] == "ls"):
execute_ls(words)
elif(words[0] == "pwd"):
execute_pwd()
elif(words[0] == "touch"):
execute_touch(command)
elif(words[0] == "grep"):
execute_grep(words)
elif(words[0] == "head"):
execute_head(words)
elif(words[0] == "tail"):
execute_tail(words)
elif "tr" in words:
execute_tr(words)
elif(words[0] == "sed"):
execute_sed(words)
elif(words[0] == "diff"):
execute_diff(words)
elif(words[0] == "clear"):
execute_clear()
elif(words[0] == "exit"):
exit()
else:
print (words[0]+ ": command not found")
load()
else:
load()
if __name__=="__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
424480239805faf235cfee036b138f53a3666030
|
8d6c31030552389225d7800243209acdd414cee1
|
/city/mainapp/migrations/0004_auto_20210718_1531.py
|
6fe21bb16ad661f1ec60f0ddf94e38714ebf0cd3
|
[] |
no_license
|
DREAD21/siteproject
|
f0d15fd011de47a1f21d4ea0fdac844f2a98c607
|
10c2ee41c5a39455e6c66ce0e132435b6bb8b337
|
refs/heads/master
| 2023-07-29T23:56:52.101614
| 2021-09-16T19:32:43
| 2021-09-16T19:32:43
| 399,522,481
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
# Generated by Django 3.2.5 on 2021-07-18 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0003_advices_aquapark'),
]
operations = [
migrations.CreateModel(
name='cinema',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='название кинотеатра')),
('adress', models.CharField(max_length=255, verbose_name='адрес кинотеатра')),
],
),
migrations.AlterField(
model_name='park',
name='adress',
field=models.CharField(max_length=255, verbose_name='адрес парка'),
),
]
|
[
"nickit.crasnov@gmail.com"
] |
nickit.crasnov@gmail.com
|
65ce3e109266778a7e4bbac0c6f09d03de0dd1da
|
a16bb446854335c769901559156ee22a9f53e021
|
/project1/urls.py
|
deb8ad5afe13f2ffa60f97d8d6746169cf6c7de0
|
[] |
no_license
|
devanshi12kar/My_new_website
|
4b3edd09188ecd02b3c02273392015d0b3c0435f
|
21f910b1c18b86202a14b62a58e4f0e8c52a7842
|
refs/heads/master
| 2020-03-28T23:23:18.862213
| 2018-10-03T12:32:37
| 2018-10-03T12:32:37
| 149,288,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,134
|
py
|
"""project1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls.static import static
from django.urls import path
from home import views as hv
from project1 import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', hv.index, name='index'),
path('contact/', hv.showcon),
path('', hv.showhome),
path('news/', hv.shownews),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"devs@firststart.com"
] |
devs@firststart.com
|
05e9b18ceb97396b31ed4ea4d7f88b2d2b449755
|
171d2420681c1fbcd0ecfa030916958382edbfb0
|
/loaders.py
|
fa3dac65e25473446c93dee8643eb1274b2febab
|
[] |
no_license
|
sicarul/argentum_assets_converter
|
1ae70076938d8d1182d36bc805ae0794ecec53b7
|
ac557b208b1c09dc8a7c721fd1941418d8c4bcae
|
refs/heads/master
| 2021-01-23T15:53:20.536353
| 2014-04-09T21:02:22
| 2014-04-09T21:02:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,570
|
py
|
import os, re
import ConfigParser
DIR_CONVERTED = 'converted'
DIR_ASSETS = 'assets'
DIR_BODIES = os.path.join(DIR_ASSETS, 'bodies')
DIR_HEADS = os.path.join(DIR_ASSETS, 'heads')
DIR_METADATA = os.path.join(DIR_ASSETS, 'metadata')
DIR_TILESETS = os.path.join(DIR_ASSETS, 'tiles')
DIR_INPUTINI = 'INIT'
DIR_INPUT_TILESETS = 'tilesets'
TILESET_SIZE = 32
FILE_GRAFICOS = os.path.join(DIR_INPUTINI, 'Graficos3.ini')
FILE_PERSONAJES = os.path.join(DIR_INPUTINI, 'Personajes.ini')
FILE_HEADS = os.path.join(DIR_INPUTINI, 'cabezas.ini')
FILE_OUTPUT_BODIES = os.path.join(DIR_METADATA, 'bodies.json')
FILE_OUTPUT_HEADS = os.path.join(DIR_METADATA, 'heads.json')
def loadGraphics(file_input):
g = {}
with open(file_input, 'r') as f:
for line in f:
if re.match('^Grh', line):
SplittedLine = line.split('=')
NameGraph=SplittedLine[0]
m = re.match("Grh([0-9]+)", NameGraph)
NumGraph = m.group(1)
SplittedValues = SplittedLine[1].split('-')
if SplittedValues[0] == '1': # Normal graphic
g[int(NumGraph)] ={
'id': int(NumGraph),
'img': int(SplittedValues[1]),
'x': int(SplittedValues[2]),
'y': int(SplittedValues[3]),
'width': int(SplittedValues[4]),
'height': int(SplittedValues[5]),
}
return g
def loadAnimations(file_input):
a = {}
with open(file_input, 'r') as f:
for line in f:
if re.match('^Grh', line):
SplittedLine = line.split('=')
NameGraph=SplittedLine[0]
m = re.match("Grh([0-9]+)", NameGraph)
NumGraph = m.group(1)
SplittedValues = SplittedLine[1].split('-')
if int(SplittedValues[0]) > 1: # Animation
a[int(NumGraph)] = {
'id': int(NumGraph),
'frames': map(int, SplittedValues[2:-1]),
'speed': int(float(SplittedValues[-1]))
}
return a
def loadBodies(file_input):
b = {}
config = ConfigParser.ConfigParser()
config.readfp(open(file_input))
for body in config.sections():
m = re.match('BODY([0-9]+)', body)
if m:
NumBody = int(m.group(1))
b[NumBody] = {
'walk1': int(config.get(body, 'walk1').split("'")[0]),
'walk2': int(config.get(body, 'walk2').split("'")[0]),
'walk3': int(config.get(body, 'walk3').split("'")[0]),
'walk4': int(config.get(body, 'walk4').split("'")[0]),
'HeadOffsetX': int(config.get(body, 'HeadOffsetX').split("'")[0]),
'HeadOffsetY': int(config.get(body, 'HeadOffsetY').split("'")[0])
}
return b
def loadHeads(file_input):
b = {}
config = ConfigParser.ConfigParser()
config.readfp(open(file_input))
for head in config.sections():
m = re.match('HEAD([0-9]+)', head)
if m:
NumHead = int(m.group(1))
valid = True
t = {}
for x in range(4):
head_id = 'head%d' % (x+1)
t[head_id] = int(config.get(head, head_id).split("'")[0])
if t[head_id] < 1:
valid = False
if valid:
b[NumHead] = t
return b
|
[
"pabloseibelt@gmail.com"
] |
pabloseibelt@gmail.com
|
89787b75cd0410301edae68711d91b62561895ba
|
14176826a610eec5df62f6637e45a90cf2ffe931
|
/tfoms/management/commands/shit_03.py
|
ee4ad837b1d4b4040ea081dddc01cba7c9046717
|
[] |
no_license
|
demidrol911/medical_registry
|
4b38079ba0224719e6a3fae993fb675c0364f7b7
|
608758652266395e5aaeafd971ba76fac320ebd3
|
refs/heads/master
| 2021-01-21T02:21:05.356206
| 2018-02-19T00:41:36
| 2018-02-19T00:41:36
| 23,862,923
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,237
|
py
|
#! -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django.db.models import Sum
from django.db.models import Q
import datetime
from tfoms.models import (ProvidedService, ProvidedEvent)
import csv
from xlutils.copy import copy
from xlrd import open_workbook
from xlwt import easyxf
def main():
YEAR = '2013'
PERIOD = ('05', '06', '07', '08', '09', '10')
rb = open_workbook('d:/work/03_disp_hard_life_kids.xls', formatting_info=True)
tl = easyxf('border: left thin, top thin, bottom thin, right thin; font: name Times New Roman, height 200;')
r_sheet = rb.sheet_by_index(0)
wb = copy(rb)
w_sheet = wb.get_sheet(0)
services = ProvidedService.objects.filter(
event__record__register__year=YEAR,
event__record__register__period__in=PERIOD,
event__record__register__is_active=True,
code__code='119001')
q = Q(event__comment__startswith='F01') | Q(event__comment__startswith='F11')
total_invoiced = services.aggregate(sum=Sum('event__invoiced_payment'))['sum']
comment_round_one = Q(event__comment__startswith='F0')
comment_round_two = Q(event__comment__startswith='F1')
invoiced_round_one_events = services.filter(
comment_round_one).values('event__pk').distinct('event__pk').count()
invoiced_round_one_sum = services.filter(comment_round_one
).aggregate(sum=Sum('event__invoiced_payment'))['sum']
invoiced_round_two_events = services.filter(
comment_round_two).values('event__pk').distinct('event__pk').count()
invoiced_round_tow_sum = services.filter(
comment_round_two).aggregate(sum=Sum('event__invoiced_payment'))['sum']
total_accepted = services.aggregate(sum=Sum('event__accepted_payment'))['sum']
accepted_round_one_events = services.filter(
comment_round_one &
Q(payment_type_id=2)).values('event__id').distinct('event__id').count()
accepted_round_one_sum = services.filter(
comment_round_one &
Q(payment_type_id=2)).aggregate(sum=Sum('accepted_payment'))['sum']
accepted_round_two_events = services.filter(
comment_round_two &
Q(payment_type__in=(2, 4))).values('event__id').distinct('event__id').count()
accepted_round_two_sum = services.filter(
comment_round_two &
Q(payment_type_id__in=(2, 4))).aggregate(sum=Sum('event__accepted_payment'))['sum']
group_one = Q(event__comment__startswith='F01') | Q(event__comment__startswith='F11')
group_two = Q(event__comment__startswith='F02') | Q(event__comment__startswith='F12')
group_three = Q(event__comment__startswith='F03') | Q(event__comment__startswith='F13')
group_four = Q(event__comment__startswith='F04') | Q(event__comment__startswith='F14')
group_five = Q(event__comment__startswith='F05') | Q(event__comment__startswith='F15') | Q(event__comment=None)
#group_none =
ps = []
patient_health_group_one = services.filter(group_one).values('event__record__patient__pk').distinct('event__record__patient__pk').count()
patient_health_group_two = services.filter(group_two).values('event__record__patient__pk').distinct('event__record__patient__pk').count()
patient_health_group_three = services.filter(group_three).values('event__record__patient__pk').distinct('event__record__patient__pk').count()
patient_health_group_four = services.filter(group_four).values('event__record__patient__pk').distinct('event__record__patient__pk').count()
patient_health_group_five = services.filter(group_five).values('event__record__patient__pk').distinct('event__record__patient__pk').count()
#patient_health_group_none = services.filter(group_none).values('event__record__patient__pk').distinct('event__record__patient__pk').count()
#print patient_health_group_none
print invoiced_round_one_events, invoiced_round_one_sum, invoiced_round_two_events, invoiced_round_tow_sum
print services.values('event__id').distinct('event__id').count()
w_sheet.write(2, 0,
u'по состоянию на %s' % (datetime.datetime.now().strftime('%d.%m.%Y')),
style=tl)
e = []
w_sheet.write(7, 0, total_invoiced, style=tl)
w_sheet.write(7, 1, invoiced_round_one_events, style=tl)
w_sheet.write(7, 2, invoiced_round_one_sum, style=tl)
w_sheet.write(7, 3, invoiced_round_two_events, style=tl)
w_sheet.write(7, 4, invoiced_round_tow_sum, style=tl)
w_sheet.write(7, 5, total_accepted, style=tl)
w_sheet.write(7, 6, accepted_round_one_events, style=tl)
w_sheet.write(7, 7, accepted_round_one_sum, style=tl)
w_sheet.write(7, 8, accepted_round_two_events, style=tl)
w_sheet.write(7, 9, accepted_round_two_sum, style=tl)
w_sheet.write(7, 10, patient_health_group_one, style=tl)
w_sheet.write(7, 11, patient_health_group_two, style=tl)
w_sheet.write(7, 12, patient_health_group_three, style=tl)
w_sheet.write(7, 13, patient_health_group_four, style=tl)
w_sheet.write(7, 14, patient_health_group_five, style=tl)
wb.save('d:/work/03_disp_hard_life_kids_05_10_%s.xls' % (YEAR, ))
class Command(BaseCommand):
help = 'export reports'
def handle(self, *args, **options):
main()
|
[
"SFHSHHT@DSG"
] |
SFHSHHT@DSG
|
8d0bf49c97190af924336ce053ddf32447c6894a
|
d0773c14db3507831e9181f5aa4f4b11adadd3f7
|
/EDIOrderingMessage2003-Order2.py
|
5b72cd27d61d7f79eab35c721bddd6910932e961
|
[] |
no_license
|
NegarBatenipour/Semantic-EDI
|
3adc62e887a6ef4e25206f37e76e79bc7f5122ed
|
b23d4645052402c5b0da023d8136616e95d33a17
|
refs/heads/master
| 2021-06-03T19:49:34.785490
| 2021-02-08T21:59:28
| 2021-02-08T21:59:28
| 145,393,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 78,854
|
py
|
# -*- coding: utf-8 -*-
from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
from rdflib.namespace import DC, FOAF
from rdflib import Namespace
n = Namespace("http://example.org/")
g = Graph()
# Create an identifier to use as the subject for Donna.
Order1 = BNode()
s1 = []
UNH = []
UNHElements = 0
UNHComponents = []
BGM=[]
BGMElements=0
BGMComponents=[]
DTM = []
DTMElements = 0
DTMComponents = []
NAD = []
NADElements = 0
NADComponents = []
LIN = []
LINElements = 0
LINComponents = []
QTY = []
QTYElements = 0
QTYComponents = []
PRI = []
PRIElements = 0
PRIComponents = []
UNS = []
UNSElements=0
CNT = []
CNTElements = 0
CNTComponents = []
UNT=[]
UNTElements = 0
PAI = []
PAIElements = 0
PAIComponents = []
ALI = []
ALIElements = 0
IMD = []
IMDElements = 0
IMDComponents = []
FTX = []
FTXElements = 0
FTXComponents = []
GIR = []
GIRElements = 0
GIRComponents = []
RFF = []
RFFElements = 0
RFFComponents = []
LOC = []
LOCElements = 0
LOCComponents = []
FII = []
FIIElements = 0
FIIComponents = []
PCD = []
PCDElements = 0
PCDComponents = []
MOA = []
MOAElements = 0
MOAComponents = []
RTE = []
RTEElements = 0
RTEComponents = []
TAX = []
TAXElements = 0
TAXComponents = []
RCS = []
RCSElements = 0
RCSComponents = []
DGS = []
DGSElements = 0
DGSComponents = []
CTA = []
CTAElements = 0
CTAComponents = []
COM = []
COMElements = 0
COMComponents = []
PIA= []
PIAElements = 0
PIAComponents = []
GEI = []
GEIElements = 0
GEIComponents = []
GIN = []
GINElements = 0
GINComponents = []
QVR = []
QVRElements = 0
QVRComponents = []
DOC = []
DOCElements = 0
DOCComponents = []
MTD = []
MTDElements = 0
MTDComponents = []
CCI = []
CCIElements = 0
CCIComponents = []
CAV = []
CAVElements = 0
CAVComponents = []
PCI = []
PCIElements = 0
PCIComponents = []
CUX = []
CUXElements = 0
CUXComponents = []
PYT = []
PYTElements = 0
PYTComponents = []
RJL = []
RJLElements = 0
RJLComponents = []
TDT = []
TDTElements = 0
TDTComponents = []
TOD = []
TODElements = 0
TODComponents = []
PAC = []
PACElements = 0
PACComponents = []
MEA = []
MEAElements = 0
MEAComponents = []
EQD = []
EQDElements = 0
EQDComponents = []
HAN = []
HANElements = 0
HANComponents = []
SCC = []
SCCElements = 0
SCCComponents = []
APR = []
APRElements = 0
APRComponents = []
RNG = []
RNGElements = 0
RNGComponents = []
ALC = []
ALCElements = 0
ALCComponents = []
STG = []
STGElements = 0
STGComponents = []
l = []
with open("order1-2003.txt") as fh:
for line in fh:
if line.startswith("UNH"):
l = line.split("’")[0]
s1 = l.split('+')
UNHElements = len(s1)
for i in range(len(s1)):
UNH.append(s1[i].split(':'))
UNHComponents.append(len(s1[i].split(':')))
if line.startswith("ALC"):
l = line.split("’")[0]
s1 = l.split('+')
ALCElements = len(s1)
ALC = []
ALCComponents = []
for i in range(len(s1)):
ALC.append(s1[i].split(':'))
ALCComponents.append(len(s1[i].split(':')))
if line.startswith("RNG"):
l = line.split("’")[0]
s1 = l.split('+')
RNGElements = len(s1)
RNG = []
RNGComponents = []
for i in range(len(s1)):
RNG.append(s1[i].split(':'))
RNGComponents.append(len(s1[i].split(':')))
if line.startswith("APR"):
l = line.split("’")[0]
s1 = l.split('+')
APRElements = len(s1)
APR = []
APRComponents = []
for i in range(len(s1)):
APR.append(s1[i].split(':'))
APRComponents.append(len(s1[i].split(':')))
if line.startswith("SCC"):
l = line.split("’")[0]
s1 = l.split('+')
SCCElements = len(s1)
SCC = []
SCCComponents = []
for i in range(len(s1)):
SCC.append(s1[i].split(':'))
SCCComponents.append(len(s1[i].split(':')))
if line.startswith("HAN"):
l = line.split("’")[0]
s1 = l.split('+')
HANElements = len(s1)
HAN = []
HANComponents = []
for i in range(len(s1)):
HAN.append(s1[i].split(':'))
HANComponents.append(len(s1[i].split(':')))
if line.startswith("EQD"):
l = line.split("’")[0]
s1 = l.split('+')
EQDElements = len(s1)
EQD = []
EQDComponents = []
for i in range(len(s1)):
EQD.append(s1[i].split(':'))
EQDComponents.append(len(s1[i].split(':')))
if line.startswith("MEA"):
l = line.split("’")[0]
s1 = l.split('+')
MEAElements = len(s1)
MEA = []
MEAComponents = []
for i in range(len(s1)):
MEA.append(s1[i].split(':'))
MEAComponents.append(len(s1[i].split(':')))
if line.startswith("PAC"):
l = line.split("’")[0]
s1 = l.split('+')
PACElements = len(s1)
PAC = []
PACComponents = []
for i in range(len(s1)):
PAC.append(s1[i].split(':'))
PACComponents.append(len(s1[i].split(':')))
if line.startswith("TOD"):
l = line.split("’")[0]
s1 = l.split('+')
TODElements = len(s1)
TOD = []
TODComponents = []
for i in range(len(s1)):
TOD.append(s1[i].split(':'))
TODComponents.append(len(s1[i].split(':')))
if line.startswith("TDT"):
l = line.split("’")[0]
s1 = l.split('+')
TDTElements = len(s1)
TDT = []
TDTComponents = []
for i in range(len(s1)):
TDT.append(s1[i].split(':'))
TDTComponents.append(len(s1[i].split(':')))
if line.startswith("RJL"):
l = line.split("’")[0]
s1 = l.split('+')
RJLElements = len(s1)
RJL = []
RJLComponents = []
for i in range(len(s1)):
RJL.append(s1[i].split(':'))
RJLComponents.append(len(s1[i].split(':')))
if line.startswith("PYT"):
l = line.split("’")[0]
s1 = l.split('+')
PYTElements = len(s1)
PYT = []
PYTComponents = []
for i in range(len(s1)):
PYT.append(s1[i].split(':'))
PYTComponents.append(len(s1[i].split(':')))
if line.startswith("CUX"):
l = line.split("’")[0]
s1 = l.split('+')
CUXElements = len(s1)
CUX = []
CUXComponents = []
for i in range(len(s1)):
CUX.append(s1[i].split(':'))
CUXComponents.append(len(s1[i].split(':')))
if line.startswith("FII"):
l = line.split("’")[0]
s1 = l.split('+')
FIIElements = len(s1)
FII = []
FIIComponents = []
for i in range(len(s1)):
FII.append(s1[i].split(':'))
FIIComponents.append(len(s1[i].split(':')))
if line.startswith("LOC"):
l = line.split("’")[0]
s1 = l.split('+')
LOCElements = len(s1)
LOC = []
LOCComponents = []
for i in range(len(s1)):
LOC.append(s1[i].split(':'))
LOCComponents.append(len(s1[i].split(':')))
if line.startswith("BGM"):
l = line.split("’")[0]
s1 = l.split('+')
BGMElements=len(s1)
for i in range(len(s1)):
BGM.append(s1[i].split(':'))
BGMComponents.append(len(s1[i].split(':')))
if line.startswith("DTM"):
l = line.split("’")[0]
s1 = l.split('+')
DTMElements = len(s1)
DTM = []
DTMComponents = []
for i in range(len(s1)):
DTM.append(s1[i].split(':'))
DTMComponents.append(len(s1[i].split(':')))
if line.startswith("NAD"):
l = line.split("’")[0]
s1 = l.split('+')
NADElements = len(s1)
NAD = []
NADComponents = []
for i in range(len(s1)):
NAD.append(s1[i].split(':'))
NADComponents.append(len(s1[i].split(':')))
if line.startswith("LIN"):
l = line.split("’")[0]
s1 = l.split('+')
LINElements = len(s1)
LIN = []
LINComponents = []
for i in range(len(s1)):
LIN.append(s1[i].split(':'))
LINComponents.append(len(s1[i].split(':')))
if line.startswith("QTY"):
l = line.split("’")[0]
s1 = l.split('+')
QTYElements = len(s1)
QTY = []
QTYComponents = []
for i in range(len(s1)):
QTY.append(s1[i].split(':'))
QTYComponents.append(len(s1[i].split(':')))
if line.startswith("PRI"):
l = line.split("’")[0]
s1 = l.split('+')
PRIElements = len(s1)
PRI = []
PRIComponents = []
for i in range(len(s1)):
PRI.append(s1[i].split(':'))
PRIComponents.append(len(s1[i].split(':')))
if line.startswith("IMD"):
l = line.split("’")[0]
s1 = l.split('+')
IMDElements = len(s1)
IMD = []
IMDComponents = []
for i in range(len(s1)):
IMD.append(s1[i].split(':'))
IMDComponents.append(len(s1[i].split(':')))
if line.startswith("UNS"):
l = line.split("’")[0]
s1 = l.split('+')
UNSElements = len(s1)
for i in range(len(s1)):
UNS.append(s1[i].split(':'))
if line.startswith("ALI"):
l = line.split("’")[0]
s1 = l.split('+')
ALIElements = len(s1)
ALI = []
ALIComponents = []
for i in range(len(s1)):
ALI.append(s1[i].split(':'))
ALIComponents.append(len(s1[i].split(':')))
if line.startswith("CNT"):
l = line.split("’")[0]
s1 = l.split('+')
CNTElements = len(s1)
CNT = []
CNTComponents = []
for i in range(len(s1)):
CNT.append(s1[i].split(':'))
CNTComponents.append(len(s1[i].split(':')))
if line.startswith("RFF"):
l = line.split("’")[0]
s1 = l.split('+')
RFFElements = len(s1)
RFF = []
RFFComponents = []
for i in range(len(s1)):
RFF.append(s1[i].split(':'))
RFFComponents.append(len(s1[i].split(':')))
if line.startswith("PAI"):
l = line.split("’")[0]
s1 = l.split('+')
PAIElements = len(s1)
PAI = []
PAIComponents = []
for i in range(len(s1)):
PAI.append(s1[i].split(':'))
PAIComponents.append(len(s1[i].split(':')))
if line.startswith("GIR"):
l = line.split("’")[0]
s1 = l.split('+')
GIRElements = len(s1)
GIR = []
GIRComponents = []
for i in range(len(s1)):
GIR.append(s1[i].split(':'))
GIRComponents.append(len(s1[i].split(':')))
if line.startswith("FTX"):
l = line.split("’")[0]
s1 = l.split('+')
FTXElements = len(s1)
FTX = []
FTXComponents = []
for i in range(len(s1)):
FTX.append(s1[i].split(':'))
FTXComponents.append(len(s1[i].split(':')))
if line.startswith("UNT"):
l = line.split("’")[0]
s1 = l.split('+')
UNTElements = len(s1)
for i in range(len(s1)):
UNT.append(s1[i])
if line.startswith("PCD"):
l = line.split("’")[0]
s1 = l.split('+')
PCDElements = len(s1)
PCD = []
PCDComponents = []
for i in range(len(s1)):
PCD.append(s1[i].split(':'))
PCDComponents.append(len(s1[i].split(':')))
if line.startswith("MOA"):
l = line.split("’")[0]
s1 = l.split('+')
MOAElements = len(s1)
MOA = []
MOAComponents = []
for i in range(len(s1)):
MOA.append(s1[i].split(':'))
MOAComponents.append(len(s1[i].split(':')))
if line.startswith("RTE"):
l = line.split("’")[0]
s1 = l.split('+')
RTEElements = len(s1)
RTE = []
RTEComponents = []
for i in range(len(s1)):
RTE.append(s1[i].split(':'))
RTEComponents.append(len(s1[i].split(':')))
if line.startswith("TAX"):
l = line.split("’")[0]
s1 = l.split('+')
TAXElements = len(s1)
TAX = []
TAXComponents = []
for i in range(len(s1)):
TAX.append(s1[i].split(':'))
TAXComponents.append(len(s1[i].split(':')))
if line.startswith("RCS"):
l = line.split("’")[0]
s1 = l.split('+')
RCSElements = len(s1)
RCS = []
RCSComponents = []
for i in range(len(s1)):
RCS.append(s1[i].split(':'))
RCSComponents.append(len(s1[i].split(':')))
if line.startswith("DGS"):
l = line.split("’")[0]
s1 = l.split('+')
DGSElements = len(s1)
DGS = []
DGSComponents = []
for i in range(len(s1)):
DGS.append(s1[i].split(':'))
DGSComponents.append(len(s1[i].split(':')))
if line.startswith("CTA"):
l = line.split("’")[0]
s1 = l.split('+')
CTAElements = len(s1)
CTA = []
CTAComponents = []
for i in range(len(s1)):
CTA.append(s1[i].split(':'))
CTAComponents.append(len(s1[i].split(':')))
if line.startswith("COM"):
l = line.split("’")[0]
s1 = l.split('+')
COMElements = len(s1)
COM = []
COMComponents = []
for i in range(len(s1)):
COM.append(s1[i].split(':'))
COMComponents.append(len(s1[i].split(':')))
if line.startswith("PIA"):
l = line.split("’")[0]
s1 = l.split('+')
PIAElements = len(s1)
PIA = []
PIAComponents = []
for i in range(len(s1)):
PIA.append(s1[i].split(':'))
PIAComponents.append(len(s1[i].split(':')))
if line.startswith("GEI"):
l = line.split("’")[0]
s1 = l.split('+')
GEIElements = len(s1)
GEI = []
GEIComponents = []
for i in range(len(s1)):
GEI.append(s1[i].split(':'))
GEIComponents.append(len(s1[i].split(':')))
if line.startswith("GIN"):
l = line.split("’")[0]
s1 = l.split('+')
GINElements = len(s1)
GIN = []
GINComponents = []
for i in range(len(s1)):
GIN.append(s1[i].split(':'))
GINComponents.append(len(s1[i].split(':')))
if line.startswith("QVR"):
l = line.split("’")[0]
s1 = l.split('+')
QVRElements = len(s1)
QVR = []
QVRComponents = []
for i in range(len(s1)):
QVR.append(s1[i].split(':'))
QVRComponents.append(len(s1[i].split(':')))
if line.startswith("DOC"):
l = line.split("’")[0]
s1 = l.split('+')
DOCElements = len(s1)
DOC = []
DOCComponents = []
for i in range(len(s1)):
DOC.append(s1[i].split(':'))
DOCComponents.append(len(s1[i].split(':')))
if line.startswith("MTD"):
l = line.split("’")[0]
s1 = l.split('+')
MTDElements = len(s1)
MTD = []
MTDComponents = []
for i in range(len(s1)):
MTD.append(s1[i].split(':'))
MTDComponents.append(len(s1[i].split(':')))
if line.startswith("CCI"):
l = line.split("’")[0]
s1 = l.split('+')
CCIElements = len(s1)
CCI = []
CCIComponents = []
for i in range(len(s1)):
CCI.append(s1[i].split(':'))
CCIComponents.append(len(s1[i].split(':')))
if line.startswith("CAV"):
l = line.split("’")[0]
s1 = l.split('+')
CAVElements = len(s1)
CAV = []
CAVComponents = []
for i in range(len(s1)):
CAV.append(s1[i].split(':'))
CAVComponents.append(len(s1[i].split(':')))
if line.startswith("PCI"):
l = line.split("’")[0]
s1 = l.split('+')
PCIElements = len(s1)
PCI = []
PCIComponents = []
for i in range(len(s1)):
PCI.append(s1[i].split(':'))
PCIComponents.append(len(s1[i].split(':')))
if line.startswith("STG"):
l = line.split("’")[0]
s1 = l.split('+')
STGElements = len(s1)
for i in range(len(s1)):
STG.append(s1[i].split(':'))
STGComponents.append(len(s1[i].split(':')))
#UNH
if UNHElements>=2:
g.add((Order1, n.MessageReferenceNumber, Literal(UNH[1][0])))
if UNHElements >= 3:
g.add((Order1, n.MessageType, Literal(UNH[2][0])))
if UNHComponents[2] >= 2:
g.add((Order1, n.MessageVersionNumber, Literal(UNH[2][1])))
if UNHComponents[2] >= 3:
g.add((Order1, n.MessageReleaseNumber, Literal(UNH[2][2])))
if UNHComponents[2] >= 4:
g.add((Order1, n.ControllingAgencyCoded, Literal(UNH[2][3])))
if UNHComponents[2] >= 5:
g.add((Order1, n.AssociationAssignedCode, Literal(UNH[2][4])))
if UNHComponents[2] >= 6:
g.add((Order1, n.CodeListDirectoryVersionNumber, Literal(UNH[2][5])))
if UNHComponents[2] >= 7:
g.add((Order1, n.MessageTypeSubFunctionIdentification, Literal(UNH[2][6])))
if UNHElements >= 4:
g.add((Order1, n.CommonAccessReference, Literal(UNH[3][0])))
if UNHElements >= 5:
g.add((Order1, n.SequenceOfTransfers, Literal(UNH[4][0])))
if UNHComponents[4] >= 2:
g.add((Order1, n.FirstAndLastTransfer, Literal(UNH[4][1])))
if UNHElements >= 6:
g.add((Order1, n.MessageSubsetIdentification, Literal(UNH[5][0])))
if UNHComponents[5] >= 2:
g.add((Order1, n.MessageSubsetVersionNumber, Literal(UNH[5][1])))
if UNHComponents[5] >= 3:
g.add((Order1, n.MessageSubsetReleaseNumber, Literal(UNH[5][2])))
if UNHComponents[5] >= 4:
g.add((Order1, n.ControllingAgencyCoded, Literal(UNH[5][3])))
if UNHElements >= 7:
g.add((Order1, n.MessageImplementationGuidelineIdentification, Literal(UNH[6][0])))
if UNHComponents[6] >= 2:
g.add((Order1, n.MessageImplementationGuidelineVersionNumber, Literal(UNH[6][1])))
if UNHComponents[6] >= 3:
g.add((Order1, n.MessageImplementationGuidelineReleaseNumber, Literal(UNH[6][2])))
if UNHComponents[6] >= 4:
g.add((Order1, n.ControllingAgencyCoded, Literal(UNH[6][3])))
if UNHElements >= 8:
g.add((Order1, n.ScenarioIdentification, Literal(UNH[7][0])))
if UNHComponents[7] >= 2:
g.add((Order1, n.ScenarioVersionNumber, Literal(UNH[7][1])))
if UNHComponents[7] >= 3:
g.add((Order1, n.ScenarioReleaseNumber, Literal(UNH[7][2])))
if UNHComponents[7] >= 4:
g.add((Order1, n.ControllingAgencyCoded, Literal(UNH[7][3])))
#IMD
if IMDElements >= 2:
g.add((Order1, n.DescriptionFormatCode, Literal(IMD[1][0])))
if IMDElements >= 3:
if IMDComponents[2] >= 1:
g.add((Order1, n.ItemCharacteristicCode, Literal(IMD[2][0])))
if IMDComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(IMD[2][1])))
if IMDComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(IMD[2][2])))
if IMDElements >= 4:
if IMDComponents[3] >= 1:
g.add((Order1, n.ItemDescriptionCode, Literal(IMD[3][0])))
if IMDComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(IMD[3][1])))
if IMDComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(IMD[3][2])))
if IMDComponents[3] >= 4:
g.add((Order1, n.ItemDescription, Literal(IMD[3][3])))
if IMDComponents[3] >= 5:
g.add((Order1, n.ItemDescription, Literal(IMD[3][4])))
if IMDComponents[3] >= 6:
g.add((Order1, n.LanguageNameCode, Literal(IMD[3][5])))
if IMDElements >= 5:
g.add((Order1, n.SufaceOrLayerCode, Literal(IMD[4][0])))
#ALC
if ALCElements >= 2:
g.add((Order1, n.AllowanceOrChargeCodeQualifier, Literal(ALC[1][0])))
if ALCElements >= 3:
if ALCComponents[2] >= 1:
g.add((Order1, n.AllowanceOrChargeIdentifier, Literal(ALC[2][0])))
if ALCComponents[2] >= 2:
g.add((Order1, n.AllowanceOrChargeIdentificationCode, Literal(ALC[2][1])))
if ALCElements >= 4:
g.add((Order1, n.SettlementMeansCode, Literal(ALC[3][0])))
if ALCElements >= 5:
g.add((Order1, n.CalculationSequenceCode, Literal(ALC[4][0])))
if ALCElements >= 6:
if ALCComponents[5] >= 1:
g.add((Order1, n.SpecialServiceDescriptionCode, Literal(ALC[5][0])))
if ALCComponents[5] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(ALC[5][1])))
if ALCComponents[5] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(ALC[5][2])))
if ALCComponents[5] >= 4:
g.add((Order1, n.SpecialServiceDescription, Literal(ALC[5][3])))
if ALCComponents[5] >= 5:
g.add((Order1, n.SpecialServiceDescription, Literal(ALC[5][4])))
#RNG
if RNGElements >= 2:
g.add((Order1, n.RangeTypeCodeQualifier, Literal(RNG[1][0])))
if RNGElements >= 3:
if RNGComponents[2] >= 1:
g.add((Order1, n.MeasurementUnitCode, Literal(RNG[2][0])))
if RNGComponents[2] >= 2:
g.add((Order1, n.RangeMinimumQuantity, Literal(RNG[2][1])))
if RNGComponents[2] >= 3:
g.add((Order1, n.RangeMaximumQuantity, Literal(RNG[2][2])))
#SCC
if SCCElements >= 2:
g.add((Order1, n.DeliveryPlanCommitmentLevelCode, Literal(SCC[1][0])))
if SCCElements >= 3:
g.add((Order1, n.DeliveryInstructionCode, Literal(SCC[2][0])))
if SCCElements >= 4:
if SCCComponents[3] >= 1:
g.add((Order1, n.FrequencyCode, Literal(SCC[3][0])))
if SCCComponents[3] >= 2:
g.add((Order1, n.DespatchPatternCode, Literal(SCC[3][1])))
if SCCComponents[3] >= 3:
g.add((Order1, n.DespatchPatternTimingCode, Literal(SCC[3][2])))
#APR
if APRElements >= 2:
g.add((Order1, n.TradeClassCode, Literal(APR[1][0])))
if APRElements >= 3:
if APRComponents[2] >= 1:
g.add((Order1, n.PriceMultiplierRate, Literal(APR[2][0])))
if APRComponents[2] >= 2:
g.add((Order1, n.PriceMultiplierTypeCodeQualifier, Literal(APR[2][1])))
if APRElements >= 4:
if APRComponents[3] >= 1:
g.add((Order1, n.ChangeReasonDescriptionCode, Literal(APR[3][0])))
if APRComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(APR[3][1])))
if APRComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(APR[3][2])))
if APRComponents[3] >= 4:
g.add((Order1, n.ChangeReasonDescription, Literal(APR[3][3])))
#HAN
if HANElements >= 2:
if HANComponents[1] >= 1:
g.add((Order1, n.HandlingInstructionDescriptionCode, Literal(HAN[1][0])))
if HANComponents[1] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(HAN[1][1])))
if HANComponents[1] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(HAN[1][2])))
if HANComponents[1] >= 4:
g.add((Order1, n.HandlingInstructionDescription, Literal(HAN[1][3])))
if HANElements >= 3:
if HANComponents[2] >= 1:
g.add((Order1, n.HazardousMaterialCategoryNameCode, Literal(HAN[2][0])))
if HANComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(HAN[2][1])))
if HANComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(HAN[2][2])))
if HANComponents[2] >= 4:
g.add((Order1, n.HazardousMaterialCategoryName, Literal(HAN[2][3])))
#TDT
if TDTElements >= 2:
g.add((Order1, n.TransportStageCodeQualifier, Literal(TDT[1][0])))
if TDTElements >= 3:
g.add((Order1, n.MeansOfTransportJourneyIdentifier, Literal(TDT[2][0])))
if TDTElements >= 4:
if TDTComponents[3] >= 1:
g.add((Order1, n.TransportModeNameCode, Literal(TDT[3][0])))
if TDTComponents[3] >= 2:
g.add((Order1, n.TransportModeName, Literal(TDT[3][1])))
if TDTElements >= 5:
if TDTComponents[4] >= 1:
g.add((Order1, n.TransportMeansDescriptionCode, Literal(TDT[4][0])))
if TDTComponents[4] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(TDT[4][1])))
if TDTComponents[4] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(TDT[4][2])))
if TDTComponents[4] >= 4:
g.add((Order1, n.TransportMeansDescription, Literal(TDT[4][3])))
if TDTElements >= 6:
if TDTComponents[5] >= 1:
g.add((Order1, n.CarrierIdentification, Literal(TDT[5][0])))
if TDTComponents[5] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(TDT[5][1])))
if TDTComponents[5] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCoded, Literal(TDT[5][2])))
if TDTComponents[5] >= 4:
g.add((Order1, n.CarrierName, Literal(TDT[5][3])))
if TDTElements >= 7:
g.add((Order1, n.TransitDirectionIndicatorCode, Literal(TDT[6][0])))
if TDTElements >= 8:
if TDTComponents[7] >= 1:
g.add((Order1, n.ExcessTransportationReasonCode, Literal(TDT[7][0])))
if TDTComponents[7] >= 2:
g.add((Order1, n.ExcessTransportationResponsibilityCode, Literal(TDT[7][1])))
if TDTComponents[7] >= 3:
g.add((Order1, n.CustomerShipmentAuthorizationIdentifier, Literal(TDT[7][2])))
if TDTElements >= 9:
if TDTComponents[8] >= 1:
g.add((Order1, n.TransportMeansIdentificationNameIdentifier, Literal(TDT[8][0])))
if TDTComponents[8] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(TDT[8][1])))
if TDTComponents[8] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCoded, Literal(TDT[8][2])))
if TDTComponents[8] >= 4:
g.add((Order1, n.TransportMeansIdentificationName, Literal(TDT[8][3])))
if TDTComponents[8] >= 5:
g.add((Order1, n.TransportMeansNationalityCode, Literal(TDT[8][4])))
if TDTElements >= 10:
g.add((Order1, n.TransportMeansOwnershipIndicatorCode, Literal(TDT[9][0])))
#PAC
if PACElements >= 2:
g.add((Order1, n.PackagesQuantity, Literal(PAC[1][0])))
if PACElements >= 3:
g.add((Order1, n.PackagingLevelCode, Literal(PAC[2][0])))
if PACComponents[2] >= 2:
g.add((Order1, n.PackagingRelatedDescriptionCode, Literal(PAC[2][1])))
if PACComponents[2] >= 2:
g.add((Order1, n.PackagingTermsAndConditionsCode, Literal(PAC[2][2])))
if PACElements >= 4:
g.add((Order1, n.PackageTypeDescriptionCode, Literal(PAC[3][0])))
if PACComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(PAC[3][1])))
if PACComponents[3] >= 2:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PAC[3][2])))
if PACComponents[3] >= 3:
g.add((Order1, n.TypeOfPackages, Literal(PAC[3][3])))
if PACElements >= 5:
g.add((Order1, n.DescriptionFormatCode, Literal(PAC[4][0])))
if PACComponents[4] >= 2:
g.add((Order1, n.TypeOfPackages, Literal(PAC[4][1])))
if PACComponents[4] >= 3:
g.add((Order1, n.ItemTypeIdentificationCode, Literal(PAC[4][2])))
if PACComponents[4] >= 4:
g.add((Order1, n.TypeOfPackages, Literal(PAC[4][3])))
if PACComponents[4] >= 5:
g.add((Order1, n.ItemTypeIdentificationCode, Literal(PAC[4][4])))
if PACElements >= 6:
g.add((Order1, n.ReturnablePackageFreightPaymentResponsibilityCode, Literal(PAC[5][0])))
if PACComponents[5] >= 2:
g.add((Order1, n.ReturnablePackageLoadContentsCode, Literal(PAC[5][1])))
#FII
if FIIElements >= 2:
g.add((Order1, n.PartyFunctionCodeQualifier, Literal(FII[1][0])))
if FIIElements >= 3:
if FIIComponents[2] >= 1:
g.add((Order1, n.AccountHolderIdentifier, Literal(FII[2][0])))
if FIIComponents[2] >= 2:
g.add((Order1, n.AccountHolderName, Literal(FII[2][1])))
if FIIComponents[2] >= 3:
g.add((Order1, n.AccountHolderName, Literal(FII[2][2])))
if FIIComponents[2] >= 4:
g.add((Order1, n.CurrencyIdentificationCode, Literal(FII[2][3])))
if FIIElements >= 4:
if FIIComponents[3] >= 1:
g.add((Order1, n.InstitutionNameCode, Literal(FII[3][0])))
if FIIComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(FII[3][1])))
if FIIComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(FII[3][2])))
if FIIComponents[3] >= 4:
g.add((Order1, n.InstitutionBranchIdentifier, Literal(FII[3][3])))
if FIIComponents[3] >= 5:
g.add((Order1, n.CodeListIdentificationCode, Literal(FII[3][4])))
if FIIComponents[3] >= 6:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(FII[3][5])))
if FIIComponents[3] >= 7:
g.add((Order1, n.InstitutionName, Literal(FII[3][6])))
if FIIComponents[3] >= 8:
g.add((Order1, n.InstitutionBranchLocationName, Literal(FII[3][7])))
if FIIElements >= 5:
g.add((Order1, n.CountryNameCode, Literal(FII[4][0])))
#BGM
if BGMElements >= 2:
g.add((Order1, n.DocumentNameCode, Literal(BGM[1][0])))
if BGMComponents[1] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(BGM[1][1])))
if BGMComponents[1] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(BGM[1][2])))
if BGMComponents[1] >= 4:
g.add((Order1, n.DocumentName, Literal(BGM[1][3])))
if BGMElements >= 3:
g.add((Order1, n.DocumentIdentifier, Literal(BGM[2][0])))
if BGMComponents[2] >= 2:
g.add((Order1, n.VersionIdentifier, Literal(BGM[2][1])))
if BGMComponents[2] >= 3:
g.add((Order1, n.RevisionIdentifier, Literal(BGM[2][2])))
if BGMElements >= 4:
g.add((Order1, n.MessageFunctionCode, Literal(BGM[3][0])))
if BGMElements >= 5:
g.add((Order1, n.ResponseTypeCode, Literal(BGM[4][0])))
#MEA
if MEAElements >= 2:
g.add((Order1, n.MeasurementPurposeCodeQualifier, Literal(MEA[1][0])))
if MEAElements >= 3:
if MEAComponents[2] >= 1:
g.add((Order1, n.MeasuredAttributeCode, Literal(MEA[2][0])))
if MEAComponents[2] >= 2:
g.add((Order1, n.MeasurementSignificanceCode, Literal(MEA[2][1])))
if MEAComponents[2] >= 3:
g.add((Order1, n.NonDiscreteMeasurementNameCode, Literal(MEA[2][2])))
if MEAComponents[2] >= 4:
g.add((Order1, n.NonDiscreteMeasurementAttribute, Literal(MEA[2][3])))
if MEAElements >= 4:
if MEAComponents[3] >= 1:
g.add((Order1, n.MeasurementUnitCode, Literal(MEA[3][0])))
if MEAComponents[3] >= 2:
g.add((Order1, n.Measure, Literal(MEA[3][1])))
if MEAComponents[3] >= 3:
g.add((Order1, n.RangeMinimumQuantity, Literal(MEA[3][2])))
if MEAComponents[3] >= 4:
g.add((Order1, n.RangeMaximumQuantity, Literal(MEA[3][3])))
if MEAComponents[3] >= 5:
g.add((Order1, n.SignigicantDigitsQuantity, Literal(MEA[3][3])))
if MEAElements >= 5:
g.add((Order1, n.SurfaceOrLayerCode, Literal(MEA[4][0])))
#CUX
if CUXElements >= 2:
if CUXComponents[1] >= 1:
g.add((Order1, n.CurrencyUsageCodeQualifier, Literal(CUX[1][0])))
if CUXComponents[1] >= 2:
g.add((Order1, n.CurrencyIdentificationCode, Literal(CUX[1][1])))
if CUXComponents[1] >= 3:
g.add((Order1, n.CurrencyTypeCodeQualifier, Literal(CUX[1][2])))
if CUXComponents[1] >= 4:
g.add((Order1, n.CurrencyRate, Literal(CUX[1][3])))
if CUXElements >= 3:
if CUXComponents[2] >= 1:
g.add((Order1, n.CurrencyUsageCodeQualifier, Literal(CUX[2][0])))
if CUXComponents[2] >= 2:
g.add((Order1, n.CurrencyIdentificationCode, Literal(CUX[2][1])))
if CUXComponents[2] >= 3:
g.add((Order1, n.CurrencyTypeCodeQualifier, Literal(CUX[2][2])))
if CUXComponents[2] >= 4:
g.add((Order1, n.CurrencyRate, Literal(CUX[2][3])))
if CUXElements >= 4:
g.add((Order1, n.CurrencyExchangeRate, Literal(CUX[3][0])))
if CUXElements >= 5:
g.add((Order1, n.ExchangeRateCurrencyMarketIdentifier, Literal(CUX[4][0])))
#PYT
if PYTElements >= 2:
g.add((Order1, n.PaymentTermsTypeCodeQualifier, Literal(PYT[1][0])))
if PYTElements >= 3:
if PYTComponents[2] >= 1:
g.add((Order1, n.PaymentTermsDescriptionIdentifier, Literal(PYT[2][0])))
if PYTComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(PYT[2][1])))
if PYTComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PYT[2][2])))
if PYTComponents[2] >= 4:
g.add((Order1, n.PaymentTermsDescription, Literal(PYT[2][3])))
if PYTElements >= 4:
g.add((Order1, n.TimeReferenceCode, Literal(PYT[3][0])))
if PYTElements >= 5:
g.add((Order1, n.TermsTimeRelationCode, Literal(PYT[4][0])))
if PYTElements >= 6:
g.add((Order1, n.PeriodTypeCode, Literal(PYT[5][0])))
if PYTElements >= 7:
g.add((Order1, n.PeriodCountQuantity, Literal(PYT[6][0])))
#DTM
if DTMElements >= 2:
g.add((Order1, n.DateOrTimeOrPeriodFunctionCodeQualifier, Literal(DTM[1][0])))
if DTMComponents[1] >= 2:
g.add((Order1, n.DateOrTimeOrPeriodText, Literal(DTM[1][1])))
if DTMComponents[1] >= 3:
g.add((Order1, n.DateOrTimeOrPeriodFormatCode, Literal(DTM[1][2])))
#FTX
if FTXElements >= 2:
g.add((Order1, n.TextSubjectCodeQualifier, Literal(FTX[1][0])))
if FTXElements >= 3:
g.add((Order1, n.FreeTextFunctionCode, Literal(FTX[2][0])))
if FTXElements >= 4:
g.add((Order1, n.FreeTextDescriptionCode, Literal(FTX[3][0])))
if FTXComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(FTX[3][1])))
if FTXComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(FTX[3][2])))
if FTXElements >= 5:
g.add((Order1, n.FreeText, Literal(FTX[4][0])))
if FTXComponents[4] >= 2:
g.add((Order1, n.FreeText, Literal(FTX[4][1])))
if FTXComponents[4] >= 3:
g.add((Order1, n.FreeText, Literal(FTX[4][2])))
if FTXComponents[4] >= 4:
g.add((Order1, n.FreeText, Literal(FTX[4][3])))
if FTXComponents[4] >= 5:
g.add((Order1, n.FreeText, Literal(FTX[4][4])))
if FTXElements >= 6:
g.add((Order1, n.LanguageNameCode, Literal(FTX[5][0])))
if FTXElements >= 7:
g.add((Order1, n.FreeTextFormatCode, Literal(FTX[6][0])))
#RFF
if RFFElements >= 2:
g.add((Order1, n.ReferenceCodeQualifier, Literal(RFF[1][0])))
if RFFComponents >= 2:
g.add((Order1, n.ReferenceIdentifier, Literal(RFF[1][1])))
if RFFComponents >= 3:
g.add((Order1, n.DocumentLineIdentifier, Literal(RFF[1][2])))
if RFFComponents >= 4:
g.add((Order1, n.ReferenceVersionIdentifier, Literal(RFF[1][3])))
if RFFComponents >= 5:
g.add((Order1, n.RevisionIdentifier, Literal(RFF[1][4])))
#TOD
if TODElements >= 2:
g.add((Order1, n.DeliveryOrTransportTermsFunctionCode, Literal(TOD[1][0])))
if TODElements >= 3:
g.add((Order1, n.TransportChargesPaymentMethodCode, Literal(TOD[2][0])))
if TODElements >= 4:
if TODComponents[3] >= 1:
g.add((Order1, n.DeliveryOrTransportTermsDescriptionCode, Literal(TOD[3][0])))
if TODComponents[3] >= 1:
g.add((Order1, n.CodeListIdentificationCode, Literal(TOD[3][1])))
if TODComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(TOD[3][2])))
if TODComponents[3] >=4:
g.add((Order1, n.DeliveryOrTransportTermsDescription, Literal(TOD[3][3])))
if TODComponents[3] >= 5:
g.add((Order1, n.DeliveryOrTransportTermsDescription, Literal(TOD[3][4])))
#NAD
if NADElements >= 2:
g.add((Order1, n.PartyFunctionCodeQualifier, Literal(NAD[1][0])))
if NADElements >= 3:
g.add((Order1, n.PartyIdentifier, Literal(NAD[2][0])))
if NADComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(NAD[2][1])))
if NADComponents[2] == 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(NAD[2][2])))
if NADElements >= 4:
g.add((Order1, n.NameAndAdressDescription, Literal(NAD[3][0])))
if NADComponents[3] >= 2:
g.add((Order1, n.NameAndAdressDescription, Literal(NAD[3][1])))
if NADComponents[3] >= 3:
g.add((Order1, n.NameAndAdressDescription, Literal(NAD[3][2])))
if NADComponents[3] >= 4:
g.add((Order1, n.NameAndAdressDescription, Literal(NAD[3][3])))
if NADComponents[3] >= 5:
g.add((Order1, n.NameAndAdressDescription, Literal(NAD[3][4])))
if NADElements >= 5:
g.add((Order1, n.PartyName, Literal(NAD[4][0])))
if NADComponents[4] >= 2:
g.add((Order1, n.PartyName, Literal(NAD[4][1])))
if NADComponents[4] >= 3:
g.add((Order1, n.PartyName, Literal(NAD[4][2])))
if NADComponents[4] >= 4:
g.add((Order1, n.PartyName, Literal(NAD[4][3])))
if NADComponents[4] >= 5:
g.add((Order1, n.PartyName, Literal(NAD[4][4])))
if NADComponents[4] >= 6:
g.add((Order1, n.PartyNameFormatCode, Literal(NAD[4][5])))
if NADElements >= 6:
g.add((Order1, n.StreetAndNumberOrPostOfficeBoxIdentifier, Literal(NAD[5][0])))
if NADComponents[5] >= 2:
g.add((Order1, n.StreetAndNumberOrPostOfficeBoxIdentifier, Literal(NAD[5][1])))
if NADComponents[5] >= 3:
g.add((Order1, n.StreetAndNumberOrPostOfficeBoxIdentifier, Literal(NAD[5][2])))
if NADComponents[5] >= 4:
g.add((Order1, n.StreetAndNumberOrPostOfficeBoxIdentifier, Literal(NAD[5][3])))
if NADElements >= 7:
g.add((Order1, n.CityName, Literal(NAD[6][0])))
if NADElements >= 8:
g.add((Order1, n.CountrySubEntityNameCode, Literal(NAD[7][0])))
if NADComponents[7] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(NAD[7][1])))
if NADComponents[7] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(NAD[7][2])))
if NADComponents[7] >= 4:
g.add((Order1, n.CountrySubEntityName, Literal(NAD[7][3])))
if NADElements >= 9:
g.add((Order1, n.PostalIdentificationCode, Literal(NAD[8][0])))
if NADElements >= 10:
g.add((Order1, n.CountryNameCode, Literal(NAD[9][0])))
#GIR
if GIRElements >= 2:
g.add((Order1, n.SetTypeCodeQualifier, Literal(GIR[1][0])))
if GIRElements >= 3:
g.add((Order1, n.ObjectIdentifier, Literal(GIR[2][0])))
if GIRComponents[2] >= 2:
g.add((Order1, n.ObjectIdentificationCodeQualifier, Literal(GIR[2][1])))
if GIRComponents[2] >= 3:
g.add((Order1, n.StatusDescriptionCode, Literal(GIR[2][2])))
if GIRElements >= 4:
g.add((Order1, n.ObjectIdentifier, Literal(GIR[3][0])))
if GIRComponents[2] >= 2:
g.add((Order1, n.ObjectIdentificationCodeQualifier, Literal(GIR[3][1])))
if GIRComponents[2] >= 3:
g.add((Order1, n.StatusDescriptionCode, Literal(GIR[3][2])))
if GIRElements >= 5:
g.add((Order1, n.ObjectIdentifier, Literal(GIR[4][0])))
if GIRComponents[2] >= 2:
g.add((Order1, n.ObjectIdentificationCodeQualifier, Literal(GIR[4][1])))
if GIRComponents[2] >= 3:
g.add((Order1, n.StatusDescriptionCode, Literal(GIR[4][2])))
if GIRElements >= 6:
g.add((Order1, n.ObjectIdentifier, Literal(GIR[5][0])))
if GIRComponents[2] >= 2:
g.add((Order1, n.ObjectIdentificationCodeQualifier, Literal(GIR[5][1])))
if GIRComponents[2] >= 3:
g.add((Order1, n.StatusDescriptionCode, Literal(GIR[5][2])))
if GIRElements >= 7:
g.add((Order1, n.ObjectIdentifier, Literal(GIR[6][0])))
if GIRComponents[2] >= 2:
g.add((Order1, n.ObjectIdentificationCodeQualifier, Literal(GIR[6][1])))
if GIRComponents[2] >= 3:
g.add((Order1, n.StatusDescriptionCode, Literal(GIR[6][2])))
#LIN
if LINElements >= 2:
g.add((Order1, n. LineItemIdentifier , Literal(LIN[1][0])))
if LINElements >= 3:
g.add((Order1, n.ActionRequestNotificationDescriptionCode, Literal(LIN[2][0])))
if LINElements >= 4:
g.add((Order1, n.ItemIdentifier, Literal(LIN[3][0])))
if LINComponents[3] >= 2:
g.add((Order1, n.ItemTypeIdentificationCode, Literal(LIN[3][1])))
if LINComponents[3] >= 3:
g.add((Order1, n.CodeListIdentificationCode, Literal(LIN[3][2])))
if LINComponents[3] >= 4:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(LIN[3][3])))
if LINElements >= 5:
g.add((Order1, n.SubLineIndicatorCode , Literal(LIN[4][0])))
if LINComponents[4] >= 2:
g.add((Order1, n.LineItemIdentifier, Literal(LIN[4][1])))
if LINElements >= 6:
g.add((Order1, n.ConfigurationLevelNumber , Literal(LIN[5][0])))
if LINElements >= 7:
g.add((Order1, n.ConfigurationOperationCode , Literal(LIN[6][0])))
#QTY
if QTYElements >= 2:
g.add((Order1, n.QuantityTypeCodeQualifier, Literal(QTY[1][0])))
if QTYComponents[1] >= 2:
g.add((Order1, n.Quantity, Literal(QTY[1][1])))
if QTYComponents[1] >= 3:
g.add((Order1, n.MeasurementUnitCode, Literal(QTY[1][2])))
#RJL
if RJLElements >= 2:
if RJLComponents[1] >= 1:
g.add((Order1, n.AccountingJournalIdentifier, Literal(RJL[1][0])))
if RJLComponents[1] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(RJL[1][1])))
if RJLComponents[1] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(RJL[1][2])))
if RJLComponents[1] >= 4:
g.add((Order1, n.AccountingJournalName, Literal(RJL[1][3])))
if RJLElements >= 3:
if RJLComponents[2] >= 1:
g.add((Order1, n.AccountingEntryTypeNameCode, Literal(RJL[2][0])))
if RJLComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(RJL[2][1])))
if RJLComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(RJL[2][2])))
if RJLComponents[2] >= 4:
g.add((Order1, n.AccountingEntryTypeName, Literal(RJL[2][3])))
#PRI
if PRIElements >= 2:
g.add((Order1, n.PriceCodeQualifier, Literal(PRI[1][0])))
if PRIComponents[1] >=2:
g.add((Order1, n.PriceAmount, Literal(PRI[1][1])))
if PRIComponents[1] >=3:
g.add((Order1, n.PriceTypeCode, Literal(PRI[1][2])))
if PRIComponents[1] >=4:
g.add((Order1, n.PriceSpecificationCode, Literal(PRI[1][3])))
if PRIComponents[1] >=5:
g.add((Order1, n.UnitPriceBasisQuantity, Literal(PRI[1][4])))
if PRIComponents[1] >=6:
g.add((Order1, n.MeasurementUnitCode, Literal(PRI[1][5])))
if PRIElements >= 3:
g.add((Order1, n.SubLineItemPriceChangeOperationCode, Literal(PRI[2][0])))
#UNS
if UNSElements >= 2:
g.add((Order1, n.SectionIdentification, Literal(UNS[1][0])))
#LOC
if LOCElements >= 2:
g.add((Order1, n.LocationFunctionCodeQualifier, Literal(LOC[1][0])))
if LOCElements >= 3:
if LOCComponents[2] >= 1:
g.add((Order1, n.LocationNameCode, Literal(LOC[2][0])))
if LOCComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(LOC[2][1])))
if LOCComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(LOC[2][2])))
if LOCComponents[2] >= 4:
g.add((Order1, n.LocationName, Literal(LOC[2][3])))
if LOCElements >= 4:
if LOCComponents[3] >= 1:
g.add((Order1, n.FirstRelatedLocationNameCode, Literal(LOC[3][0])))
if LOCComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(LOC[3][1])))
if LOCComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(LOC[3][2])))
if LOCComponents[2] >= 4:
g.add((Order1, n.FirstRelatedLocationName, Literal(LOC[3][3])))
if LOCElements >= 5:
if LOCComponents[4] >= 1:
g.add((Order1, n.SecondRelatedLocationNameCode, Literal(LOC[4][0])))
if LOCComponents[4] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(LOC[4][1])))
if LOCComponents[4] >= 3:
g.add((Order1, n.CodeListResponsibleAgenyCode, Literal(LOC[4][2])))
if LOCComponents[4] >= 4:
g.add((Order1, n.SecondRelatedLocationName, Literal(LOC[4][3])))
if LOCElements >= 6:
g.add((Order1, n.RelationCode, Literal(LOC[5][0])))
# CNT
if CNTElements >= 2:
g.add((Order1, n.ControlTotalTypeCodeQualifier, Literal(CNT[1][0])))
if CNTComponents[1] >= 2:
g.add((Order1, n.ControlTotalQuantity, Literal(CNT[1][1])))
if CNTComponents[1] >= 3:
g.add((Order1, n.MeasurementUnitCode, Literal(CNT[1][2])))
#UNT
if UNTElements >= 2:
g.add((Order1, n.NumberOfSegmentInAMessage,Literal(UNT[1])))
g.add((Order1, n.MessageReferenceNumber,Literal(UNT[2])))
#PAI
if PAIElements >= 2:
if PAIComponents[1] >= 1:
g.add((Order1, n.PaymentConditionsCode, Literal(PAI[1][0])))
if PAIComponents[1] >= 2:
g.add((Order1, n.PaymentGuaranteeMeansCode, Literal(PAI[1][1])))
if PAIComponents[1] >= 3:
g.add((Order1, n.PaymentMeansCode, Literal(PAI[1][2])))
if PAIComponents[1] >= 4:
g.add((Order1, n.CodeListIdentificationCode, Literal(PAI[1][3])))
if PAIComponents[1] >= 5:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PAI[1][4])))
if PAIComponents[1] >= 6:
g.add((Order1, n.PaymentChannelCode, Literal(PAI[1][5])))
#ALI
if ALIElements >= 2:
g.add((Order1, n.CountryOfOriginNameCode, Literal(ALI[1][0])))
if ALIElements >= 3:
g.add((Order1, n.DutyRegimeTypeCode, Literal(ALI[2][0])))
if ALIElements >= 4:
g.add((Order1, n.SpecialConditionCode, Literal(ALI[3][0])))
if ALIElements >= 5:
g.add((Order1, n.CountryOfOriginNameCode, Literal(ALI[4][0])))
if ALIElements >= 6:
g.add((Order1, n.CountryOfOriginNameCode, Literal(ALI[5][0])))
if ALIElements >= 7:
g.add((Order1, n.CountryOfOriginNameCode, Literal(ALI[6][0])))
if ALIElements >= 8:
g.add((Order1, n.CountryOfOriginNameCode, Literal(ALI[7][0])))
#Negar Part,21-39
#PCD, GRP21
if PCDElements >= 2:
g.add((Order1, n.PercentageTypeCodeQualifier, Literal(PCD[1][0])))
if PCDComponents[1] >= 2:
g.add((Order1, n.Percentage, Literal(PCD[1][1])))
if PCDComponents[1] >= 3:
g.add((Order1, n.PercentageBasisIdentificatonCoded, Literal(PCD[1][2])))
if PCDComponents[1] >= 4:
g.add((Order1, n.CodeListIdentification, Literal(PCD[1][3])))
if PCDComponents[1] >= 5:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PCD[1][4])))
if PCDElements >= 3:
g.add((Order1, n.StatusDescriptionCode, Literal(PCD[2][0])))
#MOA, GRP22
if MOAElements >= 2:
g.add((Order1, n.MonetaryAmountTypeCodeQualifier, Literal(MOA[1][0])))
if MOAComponents[1] >= 2:
g.add((Order1, n.MonetaryAmount, Literal(MOA[1][1])))
if MOAComponents[1] >= 3:
g.add((Order1, n.CurrencyIdentificationCode, Literal(MOA[1][2])))
if MOAComponents[1] >= 4:
g.add((Order1, n.CurrencyTypeCodeQualifier, Literal(MOA[1][3])))
if MOAComponents[1] >= 5:
g.add((Order1, n.StatusDescriptionCode, Literal(MOA[1][4])))
# RTE, GRP23
if RTEElements >= 2:
g.add((Order1, n.RateTypeCodeQualifier, Literal(RTE[1][0])))
if RTEComponents[1] >= 2:
g.add((Order1, n.UnitPriceBasisRate, Literal(RTE[1][1])))
if RTEComponents[1] >= 3:
g.add((Order1, n.UnitPriceBasisQuantity, Literal(RTE[1][2])))
if RTEComponents[1] >= 4:
g.add((Order1, n.MeasurementUnitCode, Literal(RTE[1][3])))
if RTEElements >= 3:
g.add((Order1, n.StatusDescriptionCode, Literal(RTE[2][0])))
# TAX, GRP24
if TAXElements >= 2:
g.add((Order1, n.DutyOrTaxOrFeeFunctionCodeQualifier, Literal(TAX[1][0])))
if TAXElements >= 3:
g.add((Order1, n.DutyOrTaxOrFeeTypeNameCode, Literal(TAX[2][0])))
if TAXComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(TAX[2][1])))
if TAXComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(TAX[2][2])))
if TAXComponents[2] >= 4:
g.add((Order1, n.DutyOrTaxOrFeeTypeName, Literal(TAX[2][3])))
if TAXElements >= 4:
g.add((Order1, n.DutyOrTaxOrFeeAccountCode, Literal(TAX[3][0])))
if TAXComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(TAX[3][1])))
if TAXComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(TAX[3][2])))
if TAXElements >= 5:
g.add((Order1, n.DutyORTaxOrFeeAssessmentBasisQuantity, Literal(TAX[4][0])))
if TAXElements >= 6:
g.add((Order1, n.DutyOrTaxOrFeeRateCode, Literal(TAX[5][0])))
if TAXComponents[5] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(TAX[5][1])))
if TAXComponents[5] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(TAX[5][2])))
if TAXComponents[5] >= 4:
g.add((Order1, n.DutyOrTaxOrFeeRate, Literal(TAX[5][3])))
if TAXComponents[5] >= 2:
g.add((Order1, n.DutyOrTaxOrFeeRateBasisCode, Literal(TAX[5][4])))
if TAXComponents[5] >= 3:
g.add((Order1, n.CodeListIdentificationCode, Literal(TAX[5][5])))
if TAXComponents[5] >= 4:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(TAX[5][6])))
if TAXElements >= 7:
g.add((Order1, n.DutyOrTaxOrFeeCategoryCode, Literal(TAX[6][0])))
if TAXElements >= 8:
g.add((Order1, n.PartyTaxIdentifier, Literal(TAX[7][0])))
if TAXElements >= 9:
g.add((Order1, n.CalculationSequenceCode, Literal(TAX[8][0])))
# RCS, GRP25
if RCSElements >= 2:
g.add((Order1, n.SectorAreaIdentificationCodeQualifier, Literal(RCS[1][0])))
if RCSElements >= 3:
g.add((Order1, n.RequirementOrConditionDescriptionIdentifier, Literal(RCS[2][0])))
if RCSComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(RCS[2][1])))
if RCSComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(RCS[2][2])))
if RCSComponents[2] >= 4:
g.add((Order1, n.RequirementOrConditionDescription, Literal(RCS[2][3])))
if RCSElements >= 4:
g.add((Order1, n.ActionRequestNotificationDescriptionCode, Literal(RCS[3][0])))
if RCSElements >= 5:
g.add((Order1, n.CountryNameCode, Literal(RCS[4][0])))
# DGS, GRP26
if DGSElements >= 2:
g.add((Order1, n.DangerousGoodsRegulationsCode, Literal(DGS[1][0])))
if DGSElements >= 3:
g.add((Order1, n.HazardIdentificationCode, Literal(DGS[2][0])))
if DGSComponents[2] >= 2:
g.add((Order1, n.AdditionalHazardClassificationIdentifier, Literal(DGS[2][1])))
if DGSComponents[2] >= 3:
g.add((Order1, n.HazardCodeVersionIdentifier, Literal(DGS[2][2])))
if DGSElements >= 4:
g.add((Order1, n.UnitedNationsDangerousGoodsIdentifier, Literal(DGS[3][0])))
if DGSComponents[3] >= 2:
g.add((Order1, n.DangerousGoodsFlashpointDescription, Literal(DGS[3][1])))
if DGSElements >= 5:
g.add((Order1, n.ShipmentFlashpointDegree, Literal(DGS[4][0])))
if DGSComponents[4] >= 2:
g.add((Order1, n.MeasurementUnitCode, Literal(DGS[4][1])))
if DGSElements >= 6:
g.add((Order1, n.PackagingDangerLevelCode, Literal(DGS[5][0])))
if DGSElements >= 7:
g.add((Order1, n.EmergencyProcedureForShipsIdentifier, Literal(DGS[6][0])))
if DGSElements >= 8:
g.add((Order1, n.HazardMedicalFirstAidGuideIdentifier, Literal(DGS[7][0])))
if DGSElements >= 9:
g.add((Order1, n.TransportEmergencyCardIdentifier, Literal(DGS[8][0])))
if DGSElements >= 10:
g.add((Order1, n.OrangeHazardPlacardUpperPartIdentifier, Literal(DGS[9][0])))
if DGSComponents[9] >= 2:
g.add((Order1, n. OrangeHazardPlacardLowerPartIdentifier , Literal(DGS[9][1])))
if DGSElements >= 11:
g.add((Order1, n.DangerousGoodsMarkingIdentifier , Literal(DGS[10][0])))
if DGSComponents[10] >= 2:
g.add((Order1, n.DangerousGoodsMarkingIdentifier, Literal(DGS[10][1])))
if DGSComponents[10] >= 3:
g.add((Order1, n.DangerousGoodsMarkingIdentifier, Literal(DGS[10][2])))
if DGSElements >= 12:
g.add((Order1, n.PackingInstructionTypeCode, Literal(DGS[11][0])))
if DGSElements >= 13:
g.add((Order1, n.HazardousMeansOfTransportCategoryCode, Literal(DGS[12][0])))
if DGSElements >= 14:
g.add((Order1, n.HazardousCargoTransportAuthorisationCode, Literal(DGS[13][0])))
#CTA, GRP27
if CTAElements >= 2:
g.add((Order1, n.ContactFunctionCode, Literal(CTA[1][0])))
if CTAElements >= 3:
g.add((Order1, n.DepartmentOrEmployeeNameCode, Literal(CTA[2][0])))
if CTAComponents[2] >= 2:
g.add((Order1, n.DepartmentOrEmployeeName, Literal(CTA[2][1])))
#COM, GRP27
if COMElements >= 2:
g.add((Order1, n.CommunicationAddressIdentifier, Literal(COM[1][0])))
if COMComponents[1] >= 2:
g.add((Order1, n.CommunicationAddressCodeQualifier, Literal(COM[1][1])))
#PIA, GRP28
if PIAElements >= 2:
g.add((Order1, n.ProductIdentifierCodeQualifier, Literal(PIA[1][0])))
if PIAElements >= 3:
g.add((Order1, n.ItemIdentifier, Literal(PIA[2][0])))
if PIAComponents[2] >= 2:
g.add((Order1, n.ItemTypeIdentificationCode, Literal(PIA[2][1])))
if PIAComponents[2] >= 3:
g.add((Order1, n.CodeListIdentificationCode, Literal(PIA[2][2])))
if PIAComponents[2] >= 4:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PIA[2][3])))
if PIAElements >= 4:
g.add((Order1, n.ItemIdentifier, Literal(PIA[3][0])))
if PIAComponents[3] >= 2:
g.add((Order1, n.ItemTypeIdentificationCode, Literal(PIA[3][1])))
if PIAComponents[3] >= 3:
g.add((Order1, n.CodeListIdentificationCode, Literal(PIA[3][2])))
if PIAComponents[3] >= 4:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PIA[3][3])))
if PIAElements >= 5:
g.add((Order1, n.ItemIdentifier, Literal(PIA[4][0])))
if PIAComponents[4] >= 2:
g.add((Order1, n.ItemTypeIdentificationCode, Literal(PIA[4][1])))
if PIAComponents[4] >= 3:
g.add((Order1, n.CodeListIdentificationCode, Literal(PIA[4][2])))
if PIAComponents[4] >= 4:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PIA[4][3])))
if PIAElements >= 6:
g.add((Order1, n.ItemIdentifier, Literal(PIA[5][0])))
if PIAComponents[5] >= 2:
g.add((Order1, n.ItemTypeIdentificationCode, Literal(PIA[5][1])))
if PIAComponents[5] >= 3:
g.add((Order1, n.CodeListIdentificationCode, Literal(PIA[5][2])))
if PIAComponents[5] >= 4:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PIA[5][3])))
if PIAElements >= 7:
g.add((Order1, n.ItemIdentifier, Literal(PIA[6][0])))
if PIAComponents[6] >= 2:
g.add((Order1, n.ItemTypeIdentificationCode, Literal(PIA[6][1])))
if PIAComponents[6] >= 3:
g.add((Order1, n.CodeListIdentificationCode, Literal(PIA[6][2])))
if PIAComponents[6] >= 4:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PIA[6][3])))
#GEI, GRP28
if GEIElements >= 2:
g.add((Order1, n.ProcessingInformationCodeQualifier, Literal(GEI[1][0])))
if GEIElements >= 3:
g.add((Order1, n.ProcessingIndicatorDescriptionCode, Literal(GEI[2][0])))
if GEIComponents[2] >= 2:
g.add((Order1, n. CodeListIdentificationCode , Literal(GEI[2][1])))
if GEIComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAagencyCode, Literal(GEI[2][2])))
if GEIComponents[2] >= 4:
g.add((Order1, n.ProcessingIndicatorDescription, Literal(GEI[2][3])))
if GEIElements >= 4:
g.add((Order1, n.ProcessTypeDescriptionCode, Literal(GEI[3][0])))
#GIN, GRP28
if GINElements >= 2:
g.add((Order1, n.ObjectIdentificationCodeQualifier, Literal(GIN[1][0])))
if GINElements >= 3:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[2][0])))
if GINComponents[2] >= 2:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[2][1])))
if GINElements >= 4:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[3][0])))
if GINComponents[3] >= 2:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[3][1])))
if GINElements >= 5:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[4][0])))
if GINComponents[4] >= 2:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[4][1])))
if GINElements >= 6:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[5][0])))
if GINComponents[5] >= 2:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[5][1])))
if GINElements >= 7:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[6][0])))
if GINComponents[6] >= 2:
g.add((Order1, n.ObjectIdentifier, Literal(GIN[6][1])))
#EQD
if EQDElements >= 2:
g.add((Order1, n.EquipmentTypeCodeQualifier, Literal(EQD[1][0])))
if EQDElements >= 3:
if EQDComponents[2] >= 1:
g.add((Order1, n.EquipmentIdentifier, Literal(EQD[2][0])))
if EQDComponents[2] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(EQD[2][1])))
if EQDComponents[2] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(EQD[2][2])))
if EQDComponents[2] >= 4:
g.add((Order1, n.CountryNameCode, Literal(EQD[2][3])))
if EQDElements >= 4:
if EQDComponents[3] >= 1:
g.add((Order1, n.EquipmentSizeAndTypeDescriptionCode, Literal(EQD[3][0])))
if EQDComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(EQD[3][1])))
if EQDComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(EQD[3][2])))
if EQDComponents[3] >= 4:
g.add((Order1, n.EquipmentSizeAndTypeDescription, Literal(EQD[3][3])))
if EQDElements >= 5:
g.add((Order1, n.EquipmentSupplierCode, Literal(EQD[4][0])))
if EQDElements >= 6:
g.add((Order1, n.EquipmentStatusCode, Literal(EQD[5][0])))
if EQDElements >= 7:
g.add((Order1, n.FullOrEmptyIndicatorCode, Literal(EQD[6][0])))
# QVR, GRP28
if QVRElements >= 2:
g.add((Order1, n.VarianceQuantity, Literal(QVR[1][0])))
if QVRComponents[1] >= 2:
g.add((Order1, n.QuantityTypeCodeQualifier, Literal(QVR[1][1])))
if QVRElements >= 3:
g.add((Order1, n.DiscrepancyNatureIdentificationCode, Literal(QVR[2][0])))
if QVRElements >= 4:
g.add((Order1, n.ChangeReasonDescriptionCode, Literal(QVR[3][0])))
if QVRComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(QVR[3][1])))
if QVRComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(QVR[3][2])))
if QVRComponents[3] >= 4:
g.add((Order1, n.ChangeReasonDescription, Literal(QVR[3][3])))
# DOC, GRP28
if DOCElements >= 2:
g.add((Order1, n.DocumentNameCode, Literal(DOC[1][0])))
if DOCComponents[1] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(DOC[1][1])))
if DOCComponents[1] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(DOC[1][2])))
if DOCComponents[1] >= 4:
g.add((Order1, n.DocumentName, Literal(DOC[1][3])))
if DOCElements >= 3:
g.add((Order1, n.DocumentIdentifier, Literal(DOC[2][0])))
if DOCComponents[2] >= 2:
g.add((Order1, n.DocumentStatusCode, Literal(DOC[2][1])))
if DOCComponents[2] >= 3:
g.add((Order1, n.DocumentSourceDescription, Literal(DOC[2][2])))
if DOCComponents[2] >= 4:
g.add((Order1, n.LanguageNameCode, Literal(DOC[2][3])))
if DOCComponents[2] >= 5:
g.add((Order1, n.VersionIdentifier, Literal(DOC[2][4])))
if DOCComponents[2] >= 6:
g.add((Order1, n.RevisionIdentifier, Literal(DOC[2][5])))
if DOCElements >= 4:
g.add((Order1, n.CommunicationMediumTypeCode, Literal(DOC[3][0])))
if DOCElements >= 5:
g.add((Order1, n.DocumentCopiesRequiredQuantity, Literal(DOC[4][0])))
if DOCElements >= 6:
g.add((Order1, n.DocumentOriginalsRequiredQuantity, Literal(DOC[5][0])))
# MTD, GRP28
if MTDElements >= 2:
g.add((Order1, n.ObjectTypeCodeQualifier, Literal(MTD[1][0])))
if MTDElements >= 3:
g.add((Order1, n.MaintenanceOperationCode, Literal(MTD[2][0])))
if MTDElements >= 4:
g.add((Order1, n.MaintenanceOperationOperatorCode, Literal(MTD[3][0])))
if MTDElements >= 5:
g.add((Order1, n.MaintenanceOperationPayerCode, Literal(MTD[4][0])))
#CCI, GRP29
if CCIElements >= 2:
g.add((Order1, n.ClassTypeCode, Literal(CCI[1][0])))
if CCIElements >= 3:
g.add((Order1, n.MeasuredAttributeCode, Literal(CCI[2][0])))
if CCIComponents[2] >= 2:
g.add((Order1, n.MeasurementSignificanceCode, Literal(CCI[2][1])))
if CCIComponents[2] >= 3:
g.add((Order1, n.NonDiscreteMeasurementNameCode, Literal(CCI[2][2])))
if CCIComponents[2] >= 4:
g.add((Order1, n.NonDiscreteMeasurementName, Literal(CCI[2][3])))
if CCIElements >= 4:
g.add((Order1, n.CharacteristicDescriptionCode, Literal(CCI[3][0])))
if CCIComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(CCI[3][1])))
if CCIComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode , Literal(CCI[3][2])))
if CCIComponents[3] >= 4:
g.add((Order1, n.CharacteristicDescription, Literal(CCI[3][3])))
if CCIComponents[3] >= 5:
g.add((Order1, n.CharacteristicDescription, Literal(CCI[3][4])))
if CCIElements >= 5:
g.add((Order1, n.CharacteristicRelevanceCode, Literal(CCI[4][0])))
#CAV, GRP29
if CAVElements >= 4:
g.add((Order1, n.CharacteristicValueDescriptionCode, Literal(CAV[3][0])))
if CAVComponents[3] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(CAV[3][1])))
if CAVComponents[3] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(CAV[3][2])))
if CAVComponents[3] >= 4:
g.add((Order1, n.CharacteristicValueDescription, Literal(CAV[3][3])))
if CAVComponents[3] >= 5:
g.add((Order1, n.CharacteristicValueDescription, Literal(CAV[3][4])))
#PRI, GRP32
if PRIElements >= 2:
g.add((Order1, n.PriceCodeQualifier, Literal(PRI[1][0])))
if PRIComponents[1] >= 2:
g.add((Order1, n.PriceAmount, Literal(PRI[1][1])))
if PRIComponents[1] >= 3:
g.add((Order1, n.PriceTypeCode, Literal(PRI[1][2])))
if PRIComponents[1] >= 4:
g.add((Order1, n.PriceSpecificationCode, Literal(PRI[1][3])))
if PRIComponents[1] >= 5:
g.add((Order1, n.UnitPriceBasisQuantity, Literal(PRI[1][4])))
if PRIComponents[1] >= 6:
g.add((Order1, n.MeasurementUnitCode, Literal(PRI[1][5])))
if PRIElements >= 3:
g.add((Order1, n.SubLineItemPriceChangeOperationCode, Literal(PRI[2][0])))
#PCI, GRP36
if PCIElements >= 2:
g.add((Order1, n.MarkingInstructionsCode, Literal(PCI[1][0])))
if PCIElements >= 3:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][0])))
if PCIComponents[2] >= 2:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][1])))
if PCIComponents[2] >= 3:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][2])))
if PCIComponents[2] >= 4:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][3])))
if PCIComponents[2] >= 5:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][4])))
if PCIComponents[2] >= 6:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][5])))
if PCIComponents[2] >= 7:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][6])))
if PCIComponents[2] >= 8:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][7])))
if PCIComponents[2] >= 9:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][8])))
if PCIComponents[2] >= 10:
g.add((Order1, n.ShippingMarksDescription, Literal(PCI[2][9])))
if PCIElements >= 4:
g.add((Order1, n.ContainerOrPackageContentsIndicatorCode, Literal(PCI[3][0])))
if PCIElements >= 5:
g.add((Order1, n.MarkingTypeCode, Literal(PCI[4][0])))
if PCIComponents[4] >= 2:
g.add((Order1, n.CodeListIdentificationCode, Literal(PCI[4][1])))
if PCIComponents[4] >= 3:
g.add((Order1, n.CodeListResponsibleAgencyCode, Literal(PCI[4][2])))
# STG
if STGElements >= 2:
g.add((Order1, n.ProcessStageCodeQualifier, Literal(STG[1][0])))
if STGElements >= 3:
g.add((Order1, n.ProcessStagesQuantity, Literal(STG[2][0])))
if STGElements >= 4:
g.add((Order1, n.ProcessStagesActualQuantity, Literal(STG[3][0])))
# Add triples using store's add method.
# Iterate over triples in store and print them out.
import sys
sys.stdout = open('outputfile1-2003.ttl', 'w')
# For each foaf:Person in the store print out its mbox property.
for person in g.subjects(RDF.type, FOAF.Person):
for mbox in g.objects(person, FOAF.mbox):
print(mbox)
# Bind a few prefix, namespace pairs for more readable output
g.bind("dc", DC)
g.bind("foaf", FOAF)
print( g.serialize(format='n3') )
|
[
"noreply@github.com"
] |
noreply@github.com
|
0672494c6a8366c0cd36200301233ebad4414e09
|
8e008a799fb5dc64f9c6b68f01255483e9140f7f
|
/apps/operation/migrations/0001_initial.py
|
5d0316c6f1b6638b0509a3907d828d2b4a5b1772
|
[] |
no_license
|
eggsyy/EggXonline
|
5ec72c811b8b9041274486eb9def70033f947ebc
|
b115aa5f5b647c195a79d71db276656dca586229
|
refs/heads/master
| 2021-01-19T16:45:29.627382
| 2017-09-04T12:53:50
| 2017-09-04T12:53:50
| 101,025,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,711
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-08-23 05:28
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('courses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CourseComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comments', models.CharField(max_length=200, verbose_name='\u8bc4\u8bba')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u6dfb\u52a0\u65f6\u95f4')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='\u8bfe\u7a0b')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237')),
],
options={
'verbose_name': '\u8bfe\u7a0b\u8bc4\u8bba',
'verbose_name_plural': '\u8bfe\u7a0b\u8bc4\u8bba',
},
),
migrations.CreateModel(
name='UserAsk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='\u59d3\u540d')),
('mobile', models.CharField(max_length=11, verbose_name='\u624b\u673a')),
('course_name', models.CharField(max_length=50, verbose_name='\u8bfe\u7a0b\u540d')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u6dfb\u52a0\u65f6\u95f4')),
],
options={
'verbose_name': '\u7528\u6237\u54a8\u8be2',
'verbose_name_plural': '\u7528\u6237\u54a8\u8be2',
},
),
migrations.CreateModel(
name='UserCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u6dfb\u52a0\u65f6\u95f4')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Course', verbose_name='\u8bfe\u7a0b')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237')),
],
options={
'verbose_name': '\u7528\u6237\u8bfe\u7a0b',
'verbose_name_plural': '\u7528\u6237\u8bfe\u7a0b',
},
),
migrations.CreateModel(
name='UserFavorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fav_id', models.IntegerField(default=0, verbose_name='\u6570\u636eid')),
('fav_type', models.IntegerField(choices=[(1, '\u8bfe\u7a0b'), (2, '\u8bfe\u7a0b\u673a\u6784'), (3, '\u8bb2\u5e08')], default=1, verbose_name='\u6536\u85cf\u7c7b\u578b')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u6dfb\u52a0\u65f6\u95f4')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237')),
],
options={
'verbose_name': '\u7528\u6237\u6536\u85cf',
'verbose_name_plural': '\u7528\u6237\u6536\u85cf',
},
),
migrations.CreateModel(
name='UserMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.IntegerField(default=0, verbose_name='\u63a5\u6536\u7528\u6237')),
('message', models.CharField(max_length=500, verbose_name='\u6d88\u606f\u5185\u5bb9')),
('has_read', models.BooleanField(default=False, verbose_name='\u662f\u5426\u5df2\u8bfb')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\u6dfb\u52a0\u65f6\u95f4')),
],
options={
'verbose_name': '\u7528\u6237\u6d88\u606f',
'verbose_name_plural': '\u7528\u6237\u6d88\u606f',
},
),
]
|
[
"464063606@qq.com"
] |
464063606@qq.com
|
0889b53048122ca1f6ccc0de3350bdcedd767098
|
3432ac89d63221d61eb14ee0a3c6871e84cb66be
|
/p3ong.py
|
b3e6ccacf8107122e159c3624341acdccba0c0f9
|
[] |
no_license
|
eliseong/Othello
|
7e7b1109d5d58512c3358a387fe5ded2c368b9ae
|
934b076a1eaeebf97411456043999bdb08183241
|
refs/heads/master
| 2022-03-20T07:06:28.181837
| 2019-09-03T09:07:26
| 2019-09-03T09:07:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,347
|
py
|
# Jan 12, 0952 version
import sys
import random
import math
EMPTY, BLACK, WHITE, OUTER = '.', '@', 'o', '?'
# To refer to neighbor squares we can add a direction to a square.
N, S, E, W = -10, 10, 1, -1
NE, SE, NW, SW = N + E, S + E, N + W, S + W
DIRECTIONS = (N, NE, E, SE, S, SW, W, NW)
PLAYERS = {BLACK: "Black", WHITE: "White"}
CORNERS = (11, 18, 81, 88)
# SCORE_MATRIX = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
# 0, 160, -20, 20, 5, 5, 20, -20, 160, 0,
# 0, -20, -50, -5, -5, -5, -5, -50, -20, 0,
# 0, 20, -5, 15, 3, 3, 15, -5, 20, 0,
# 0, 5, -5, 3, 3, 3, 3, -5, 5, 0,
# 0, 5, -5, 3, 3, 3, 3, -5, 5, 0,
# 0, 20, -5, 15, 3, 3, 15, -5, 20, 0,
# 0, -20, -50, -5, -5, -5, -5, -50, -20, 0,
# 0, 160, -20, 20, 5, 5, 20, -20, 160, 0,
# 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
SCORE_MATRIX = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 210, -20, 30, 15, 15, 30, -20, 210, 0,
0, -20, -50, -5, -5, -5, -5, -50, -20, 0,
0, 30, -5, 15, 3, 3, 15, -5, 30, 0,
0, 15, -5, 3, 3, 3, 3, -5, 15, 0,
0, 15, -5, 3, 3, 3, 3, -5, 15, 0,
0, 30, -5, 15, 3, 3, 15, -5, 30, 0,
0, -20, -50, -5, -5, -5, -5, -50, -20, 0,
0, 210, -20, 30, 15, 15, 30, -20, 210, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
########## ########## ########## ########## ########## ##########
# The strategy class for your AI
# You must implement this class
# and the method best_strategy
# Do not tamper with the init method's parameters, or best_strategy's parameters
# But you can change anything inside this you want otherwise
#############################################################
class Node:
def __init__(self, board, move = None, score = None): # may/may not have move or score argument
self.board = board
self.move = move
self.score = score
def __lt__(self, other):
return self.score < other.score
class Strategy():
def __init__(self):
# FILL IN
self.board = self.get_starting_board() ## MAY NEED TO FIX THIS
def get_starting_board(self):
# Create new board with initial black and white positions filled
topbottom = 10*OUTER
midempty = OUTER+(8*EMPTY)+OUTER
r4 = OUTER+(3*EMPTY)+WHITE+BLACK+(3*EMPTY)+OUTER
half = topbottom+(3*midempty)+r4
board = half+half[::-1] # front half and reverse of front half
return board
##########################################################
def convert_size8_to_size10(self, board8):
# HELPER -- format
# converts 8x8 board to 10x10 board (with border)
topbottom = 10*OUTER
middle = "".join([OUTER + board8[x*8:(x+1)*8] + OUTER for x in range(8)])
newBoard = topbottom + middle + topbottom
newBoard = newBoard.replace("X", "@")
newBoard = newBoard.replace("O", "o")
return newBoard
def convert_size10_to_size8(self, board10):
newBoard = "".join([x for x in board10 if x != OUTER])
return newBoard
def convert_index10_to_index8(self, i):
deduct = 11+2*(int(i/10)-1)
return i-deduct
def get_pretty_board(self, board):
size = int(math.sqrt(len(board)))
pretty = "".join([" ".join(board[x*size:(x+1)*size])+'\n' for x in range(size)])
pretty = pretty[:len(pretty)-1]
return pretty
##########################################################
def opponent(self, player):
# HELPER
# Get player's opponent
if player == BLACK: return WHITE
elif player == WHITE: return BLACK
def find_match(self, board, player, square, direction):
# HELPER
# assumes square is blank, looking for existing piece to close bracket
# returns None if no square exists to close bracket
opp = self.opponent(player)
stepD = 1
sq = board[square+stepD*direction]
while sq == opp:
stepD+=1
sq = board[square+stepD*direction]
if sq == player and stepD != 1:
return square+stepD*direction # if possible to close bracket
return None
def is_move_valid(self, board, player, move):
# Is move legal for current player? AKA any possible bracket if that move is made?
assert (board[move] == EMPTY) # check is move position empty
for d in DIRECTIONS: # check all dirs from move for a bracket
if self.find_match(board, player, move, d) is not None: # if bracket exists
return True
return False
def make_move(self, board, player, move): # move is blank space
# Update board to reflect the move by current player
# returns a new board/string
boardList = list(board)
for d in DIRECTIONS:
m = self.find_match(board, player, move, d)
if m is not None: # AKA if bracket exists in that direction
for x in range(move, m, d): # for everything in btwn curr(move) and closing(m); step is direction
boardList[x] = player # changing colors of opponents w/in bracket to player's color
return "".join(boardList) # new board w/ updated spots
def get_valid_moves(self, board, player):
# Get a list of all legal moves for player
# For blank squares, check every dir; if match exists in dir, put in list of possible
possible = []
blankIndex = [x for x in range(len(board)) if board[x]==EMPTY]
for b in blankIndex:
if self.is_move_valid(board, player, b): # AKA if bracket exists if starting at current blank
possible.append(b)
return possible
def has_any_valid_moves(self, board, player):
# Can player make any moves?
return len(self.get_valid_moves(board, player)) > 0 # AKA if at least one possible move
def next_player(self, board, prev_player):
# Determines which player should move next? Returns None if no legal moves exist
if self.has_any_valid_moves(board, self.opponent(prev_player)): # if next player has a move
return self.opponent(prev_player)
else: # if next player has no move, it's orig player's turn again
if self.has_any_valid_moves(board, prev_player):
return prev_player
return None
def MAINscorechoose(self, board, player, myMoves):
count = 64-board.count(".")
if count < 16: # beginning of game -- mobility
return self.mobility_score(board, player, myMoves)
elif count in range(16, 65): # middle of game -- weighted positions
return self.weighted_score(board)
def mobility_score(self, board, player, myMoves):
posneg = {BLACK: 1, WHITE: -1}
opp = self.opponent(player)
newBoards = [self.make_move(board, player, m) for m in myMoves]
# oppposs = [self.get_valid_moves(b, opp) for b in newBoards]
# sum = 0
# for vm in oppposs:
# if len(set(CORNERS).intersection(vm)) == 0: # opponent can't go to corners
# sum+=len(vm)
# else:
# sum+=len(vm)*10
# return posneg[player] * (1000 - sum)
oppnumposs = [len(self.get_valid_moves(b, opp)) for b in newBoards]
# return posneg[player]*(1000-sum(oppnumposs))
return (sum(oppnumposs))
# END OFF HERE
def number_board(self, board):
VALS = {BLACK:1, WHITE:-1, EMPTY:0, OUTER:0}
boardList = [VALS[x] for x in board]
return boardList
def weighted_score(self, board, player=BLACK):
boardList = self.number_board(board)
a = boardList
b = SCORE_MATRIX
return sum( [a[x]*b[y] for x in range(len(a)) for y in range(len(b))] )
def tilescore(self, board, player=BLACK):
# Compute player's score (number of player's pieces minus opponent's).
playerNum = board.count(player)
oppNum = board.count(self.opponent(player))
return playerNum-oppNum
def game_over(self, board, player):
# Return true if player and opponent have no valid moves
if self.next_player(board, player) is None:
return True
return False
### Monitoring players
class IllegalMoveError(Exception):
def __init__(self, player, move, board):
self.player = player
self.move = move
self.board = board
def __str__(self):
return '%s cannot move to square %d' % (PLAYERS[self.player], self.move)
################ strategies #################
def alphabeta_minmax_search(self, node, player, depth, alpha, beta):
best = {BLACK: max, WHITE: min}
# posneg = {BLACK: 1, WHITE: -1}
board = node.board
my_moves = self.get_valid_moves(board, player)
# their_moves = self.get_valid_moves(board, self.opponent(player))
if depth == 0: # score the node and return it
node.score = self.MAINscorechoose(board, player, my_moves)
return node
children = []
for m in my_moves:
next_board = self.make_move(board, player, m)
next_player = self.next_player(next_board, player)
if next_player is None: # AKA if game over
c = Node(next_board, move = m, score = 1000*self.tilescore(next_board)) # want largest tile margin at end of game
children.append(c)
else: # AKA if game still going
c = Node(next_board, move = m)
c.score = self.alphabeta_minmax_search(c, next_player, depth-1, alpha, beta).score
children.append(c)
kid = children[-1]
if player == BLACK:
if kid.score > alpha: alpha = kid.score
elif player == WHITE:
if kid.score < beta: beta = kid.score
if alpha > beta: break
if m in CORNERS: return c
winner = best[player](children)
node.score = winner.score
return winner
def alphabeta_minmax_strategy(self, board, player, depth=3): # calls minmax_search; returns an integer move
n = Node(board)
newN = self.alphabeta_minmax_search(n, player, depth, -100000, 100000)
return newN
def corner_random_strategy(self, board, player):
moves = self.get_valid_moves(board, player)
for m in moves:
if m in CORNERS: return m
return random.choice(moves)
def best_strategy(self, board, player, best_move, running):
## THIS IS the public function you must implement
## Run your best search in a loop and update best_move.value
while (True):
if running.value:
n = 15
# best_move.value = self.corner_random_strategy(board, player)
best_move.value = self.alphabeta_minmax_strategy(board, player).move
if board.count('.') < n:
best_move.value = self.alphabeta_minmax_strategy(board, player, depth=65).move
# standard_strategy = random_strategy # may need to change this
standard_strategy = alphabeta_minmax_strategy
######################################################################################################
def main():
s = Strategy()
# test: ...........................OX......XO........................... X
# X...X...OOOOO..X.OXOO.X..XXOXXOX.OOXXXX..OOXXXXX..OOXXX...X.X.X. O
# ...oooooox.xoooooxxxxooooxoxxxoooxxxxooooxxoxooooxxxxxoooooooooo o
board8 = sys.argv[1].upper()
player = sys.argv[2].upper()
if player == "X": player = BLACK
elif player == "O": player = WHITE
board10 = s.convert_size8_to_size10(board8)
print()
# 2D representation of board
print(s.get_pretty_board(board8))
# list of possible moves
print()
possMovesList = [str(s.convert_index10_to_index8(x)) for x in s.get_valid_moves(board10, player)]
possMovesStr = " ".join(possMovesList)
print("Possible moves", possMovesStr)
# chooseMove = s.convert_index10_to_index8(s.corner_random_strategy(board10, player))
chooseMove = s.convert_index10_to_index8(s.alphabeta_minmax_strategy(board10, player).move)
print("My heuristic choice is", chooseMove)
# run minimax for full tree if there's less than n spaces left on board
n = 15
if board10.count('.') < n:
node = s.alphabeta_minmax_strategy(board10, player, depth=65)
move = s.convert_index10_to_index8(node.move)
if player == BLACK:
print("Minimax", int(node.score/1000), move)
elif player == WHITE:
print("Minimax", -1*int(node.score/1000), move)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
4c84bb0dd04ef0a5558dab96f89e9a850724abde
|
0386591b51fdbf5759faef6afb8729b64a3f1589
|
/layerserver/widgets/modificationdate.py
|
3b9aab935d42e5aa5a0047c815f565c8306afad5
|
[
"BSD-3-Clause"
] |
permissive
|
giscube/giscube-admin
|
1e155402e094eb4db1f7ca260a8d1402e27a31df
|
4ce285a6301f59a8e48ecf78d58ef83c3827b5e0
|
refs/heads/main
| 2023-07-11T17:23:56.531443
| 2023-02-06T15:12:31
| 2023-02-06T15:12:31
| 94,087,469
| 7
| 1
|
BSD-3-Clause
| 2023-07-07T13:22:09
| 2017-06-12T11:12:56
|
Python
|
UTF-8
|
Python
| false
| false
| 556
|
py
|
from datetime import datetime
from django.utils.timezone import get_current_timezone
from .date import DateWidget
class ModificationDateWidget(DateWidget):
base_type = 'date'
@staticmethod
def update(request, instance, validated_data, widget):
validated_data[widget['name']] = datetime.now(tz=get_current_timezone()).date()
@staticmethod
def is_valid(cleaned_data):
if not cleaned_data['readonly']:
return ModificationDateWidget.ERROR_READONLY_REQUIRED
return DateWidget.is_valid(cleaned_data)
|
[
"abusquets@gmail.com"
] |
abusquets@gmail.com
|
6cbd914c60db7675f24b394b6a519421c5b8c581
|
791b0bfa6e62991c50c77aa3c0da54704633725b
|
/eguchi_etal_2019_natgeo.py
|
dbc0109169d38951b0501475eb8ea8a93beccaae
|
[] |
no_license
|
sherry0451/kan
|
d78851d2d053ee6a2383271d2024ebc5f05f8a38
|
88ff568672fcd1b4d51c767f7c7af2fb024c511b
|
refs/heads/master
| 2022-04-21T06:50:26.005403
| 2020-04-24T00:47:46
| 2020-04-24T00:47:46
| 258,367,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,208
|
py
|
# Python Script to run C-O cycle model from Eguchi et al. 2019 Nature Geoscience. Model is designed to track
#C fluxes, reservoirs, and isotopes, as well as atmospheric O levels. Model was
#designed to investigate the relationship between large atmospheric
#oxygenation events and large C isotope excursions. In the model, these events are
#driven by changing the C emissions at Mid-ocean ridges, which in turn will
#change the flux of C leaving the atmosphere as carbonates and organic C.
#Changes in organic C reservoirs will drive changes in atmospheric O levels.
#C isotope excursion is driven by the relatively quick release of carbonates at
#arcs and the delayed release of organic C at ocean island volcanoes. See
#Eguchi et al. 2019 Nature Geoscience for more model details.
#
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
# time domain
# model time domain
t0 = 0 # Model start time [Myr]
tf = 5000 # Model end time [Myr]
t = np.linspace(t0,tf,(tf-t0+1))
tnum=len(t)
onset=1000 #time of first tectonic transtion [Myr]
tchange=2700 #time of second tectonic change [Myr]
crb_tau=30 #delay time for release of carbonates at arcs
org_tau=350#delay time for release of organic C at OIBs
# Constants
k=0.1 # weathering constant
forg=0.2 #fraction of C from atmosphere as organics
xcrb=1-forg # fraction of carbonates
chi_org=0.6 # fraction of organics that are subducted from the surface
alpha_org=0.0 # fraction of subducted organics that come out at arcs
orgc4=0.0 # fraction that remains in the mantle
chi_carb=chi_org # fraction of carbonates that are subducted from teh surface
alpha_crb=1.0 # fraction of subducted carbonates that come out at arcs
crbc4=0.0 # fraction that remains in the mantle
arcc1=0.0 # scalar of how much primitive c outgasses at arc
# initial conditions
c_atmi=0 # mass of atm-ocean C reservoir [g]
c_crbi=0 # mass of crustal carbonate C reservoir [g]
c_orgi=0 # mass of crustal organic C reservoir [g]
c_mcrbi=0 # mass of mantle carbonate C reservoir [g]
c_morgi=0 # mass of mantle organic C reservoir [g]
c_mntli=1e23 # mass of mantle primordial C reservoir [g]
d13C_atmi=-5 # d13c of C in atm-ocean [permill]
d13C_crbi=0 # d13c of C in carbonate [permill]
d13C_orgi=-25 # d13c of C in organic C [permill]
d13C_prim=-5 # d13c of C in primitive mantle C [permill]
F_mori=1e13#intial MORB C flux [g/Myr]
F_oibi=1e13#initial OIB flux [g/Myr]
F_arci=1e13#initial arc flux [g/Myr]
#Changes to MORB C fluxes to drive transitions
morb_change1=1e16 # MORB flux after Initial transition [g/Myr]
morb_change2=1e19 #MORB flux after 2nd tectonic transition [g/Myr]
#-----------No Need to change anything after this line-------------------------
F_orgi=forg*k*c_atmi
F_crbi=xcrb*k*c_atmi
F_sorgi=0
F_scrbi=0
# reservoirs
c_atm=np.zeros(tnum)
c_atm[0]=c_atmi
o_atm=np.zeros(tnum)
c_crb=np.zeros(tnum)
c_crb[0]=c_crbi
c_org=np.zeros(tnum)
c_org[0]=c_orgi
c_mcrb=np.zeros(tnum)
for index, item in enumerate(c_mcrb):
c_mcrb[index] = c_mcrbi
c_morg=np.zeros(tnum)
for index, item in enumerate(c_morg):
c_morg[index] = c_morgi
c_mntl=np.zeros(tnum)
c_mntl[0]=c_mntli
# isotopes
d13C_atm=np.zeros(tnum)
d13C_atm[0]=d13C_atmi
d13C_crb=np.zeros(tnum)
d13C_crb[0]=d13C_crbi
d13C_org=np.zeros(tnum)
d13C_org[0]=d13C_orgi
d13C_prm=np.ones(tnum)
d13C_prm=d13C_prm*d13C_prim
d13C_arc=np.zeros(tnum)
d13C_oib=np.zeros(tnum)
d13C_oib[0]=d13C_prim
d13C_mor=np.zeros(tnum)
for index, item in enumerate(d13C_mor):
d13C_mor[index] = -5
# fluxes
F_org=np.zeros(tnum)
F_org[0]=F_orgi
F_crb=np.zeros(tnum)
F_crb[0]=F_crbi
F_sorg=np.zeros(tnum)
F_sorg[0]=F_sorgi
F_scrb=np.zeros(tnum)
F_scrb[0]=F_scrbi
F_mor=np.zeros(tnum)
for index, item in enumerate(F_mor):
F_mor[index] =F_mori
F_mor[0]=F_mori
F_oib=np.zeros(tnum)
F_oib[0]=F_oibi
F_arc=np.zeros(tnum)
F_arc[0]=F_arci
for index, item in enumerate(F_arc):
F_arc[index]=F_arci
F_tot=np.zeros(tnum)
F_tot[0]=F_oib[0]+F_arc[0]+F_mor[0]
# iterate through model
# preconvection
for time in range(1,tnum):
if (t0+time)<(onset):
c_atm[time]=c_atm[time-1]+(F_tot[time-1]-(F_org[time-1]+F_crb[time-1]))
c_mntl[time]=c_mntl[time-1]-F_oib[time-1]
c_crb[time]=c_crb[time-1]+(F_crb[time-1]-F_scrb[time-1])
c_org[time]=c_org[time-1]+(F_org[time-1]-F_sorg[time-1])
F_oib[time]=F_oibi
F_arc[time]=F_arci
F_tot[time]=F_oib[time]
F_org[time]=forg*k*c_atm[time]
F_crb[time]=xcrb*k*c_atm[time]
F_tot[time]=F_oib[time]+F_arc[time]+F_mor[time]
d13C_oib[time]=d13C_prim
d13C_atm[time]=d13C_oib[time]
d13C_crb[time]=d13C_atm[time]+5
d13C_org[time]=d13C_atm[time]-20
elif (t0+time)<(onset+crb_tau):
atm_Fout=F_org[time-1]+F_crb[time-1]
c_atm[time]=c_atm[time-1]+(F_tot[time-1]-(atm_Fout))
c_crb[time]=c_crb[time-1]+(F_crb[time-1]-chi_carb*F_scrb[time-1])
c_org[time]=c_org[time-1]+(F_org[time-1]-chi_org*F_sorg[time-1])
c_mcrb[time]=c_mcrb[time-1]+F_scrb[time-1]
c_morg[time]=c_morg[time-1]+F_sorg[time-1]
c_mntl[time]=c_mntl[time-1]-F_oib[time-1]-F_mor[time-1]
F_oib[time]=F_oibi
F_mor[time]=morb_change1
F_org[time]=forg*k*c_atm[time]
F_crb[time]=xcrb*k*c_atm[time]
F_scrb[time]=chi_carb*F_crb[time]
F_sorg[time]=chi_org*F_org[time]
F_tot[time]=F_oib[time]+F_arc[time]+F_mor[time]
d13C_oib[time]=d13C_prim
d13C_atm[time]=d13C_oib[time]
d13C_crb[time]=d13C_atm[time]+5
d13C_org[time]=d13C_atm[time]-20
# outgassing only at arcs
elif (t0+time)<(onset+org_tau):
atm_Fout=F_org[time-1]+F_crb[time-1]
c_atm[time]=c_atm[time-1]+(F_tot[time-1]-(atm_Fout))
c_crb[time]=c_crb[time-1]+(F_crb[time-1]-F_scrb[time-1])
c_org[time]=c_org[time-1]+(F_org[time-1]-F_sorg[time-1])
c_mcrb[time]=c_mcrb[time-1]+F_scrb[time-1]-(1-crbc4)*alpha_crb*F_scrb[time-crb_tau]
c_morg[time]=c_morg[time-1]+F_sorg[time-1]-(1-orgc4)*alpha_org*F_sorg[time-crb_tau]
c_mntl[time]=c_mntl[time-1]-F_oib[time-1]-F_mor[time-1]-arcc1*(alpha_crb*F_scrb[time-crb_tau]+alpha_org*F_sorg[time-crb_tau])
F_oib[time]=F_oibi
F_mor[time]=morb_change1
F_org[time]=forg*k*c_atm[time]
F_crb[time]=xcrb*k*c_atm[time]
F_scrb[time]=chi_carb*F_crb[time]
F_sorg[time]=chi_org*F_org[time]
Farccrb=alpha_crb*F_scrb[time-crb_tau]
Farcorg=alpha_org*F_sorg[time-crb_tau]
Farcmntl=(arcc1)*(Farccrb+Farcorg)
F_arc[time]=Farcmntl+Farccrb+Farcorg
F_tot[time]=F_oib[time]+F_arc[time]+F_mor[time]
d13C_oib[time]=d13C_prim
d13C_arcorg=Farcorg/F_arc[time]*d13C_org[time-crb_tau]
d13C_arccrb=Farccrb/F_arc[time]*d13C_crb[time-crb_tau]
d13C_arcmntl=Farcmntl/F_arc[time]*d13C_prm[time]
d13C_arc[time]=d13C_arcorg+d13C_arccrb+d13C_arcmntl
d13C_atm[time]=F_oib[time]/F_tot[time]*d13C_oib[time]+F_arc[time]/F_tot[time]*d13C_arc[time]+F_mor[time]/F_tot[time]*d13C_mor[time]
d13C_crb[time]=d13C_atm[time]+5
d13C_org[time]=d13C_atm[time]-20
# have all systems going
elif ((t0+time)>=(onset+org_tau)) and ((t0+time)<tchange):
#else:
atm_Fout=F_org[time-1]+F_crb[time-1]
c_atm[time]=c_atm[time-1]+(F_tot[time-1]-(atm_Fout))
c_crb[time]=c_crb[time-1]+(F_crb[time-1]-F_scrb[time-1])
c_org[time]=c_org[time-1]+(F_org[time-1]-F_sorg[time-1])
c_mcrb[time]=c_mcrb[time-1]+F_scrb[time-1]-(1-crbc4)*alpha_crb*F_scrb[time-crb_tau]-(1-crbc4)*(1-alpha_crb)*F_scrb[time-org_tau]
c_morg[time]=c_morg[time-1]+F_sorg[time-1]-(1-orgc4)*alpha_org*F_sorg[time-crb_tau]-(1-orgc4)*(1-alpha_org)*F_sorg[time-org_tau]
c_mntl[time]=c_mntl[time-1]-F_oib[time-1]*0-F_mor[time-1]-arcc1*(alpha_crb*F_scrb[time-crb_tau]+alpha_org*F_sorg[time-crb_tau])-arcc1*(crbc4*F_scrb[time-org_tau]+orgc4*F_sorg[time-org_tau])
F_mor[time]=morb_change1
F_org[time]=forg*k*c_atm[time]
F_crb[time]=xcrb*k*c_atm[time]
F_scrb[time]=chi_carb*F_crb[time]
F_sorg[time]=chi_org*F_org[time]
Farccrb=alpha_crb*F_scrb[time-crb_tau]
Farcorg=alpha_org*F_sorg[time-crb_tau]
Farcmntl=(arcc1)*(Farccrb+Farcorg)
F_arc[time]=Farcmntl+Farccrb+Farcorg
Foibcrb=(1-alpha_crb)*F_scrb[time-org_tau]
Foiborg=(1-alpha_org)*F_sorg[time-org_tau]
Foibmntl=F_oibi
F_oib[time]=Foibcrb+Foiborg+Foibmntl
F_tot[time]=F_oib[time]+F_arc[time]+F_mor[time]
d13C_oiborg=Foiborg/F_oib[time]*d13C_org[time-org_tau]
d13C_oibcrb=Foibcrb/F_oib[time]*d13C_crb[time-org_tau]
d13C_oibmntl=Foibmntl/F_oib[time]*d13C_prm[time]
d13C_oib[time]=d13C_oiborg+d13C_oibcrb+d13C_oibmntl
d13C_arcorg=Farcorg/F_arc[time]*d13C_org[time-crb_tau]
d13C_arccrb=Farccrb/F_arc[time]*d13C_crb[time-crb_tau]
d13C_arcmntl=Farcmntl/F_arc[time]*d13C_prm[time]
d13C_arc[time]=d13C_arcorg+d13C_arccrb+d13C_arcmntl
d13C_atm[time]=F_oib[time]/F_tot[time]*d13C_oib[time]+F_arc[time]/F_tot[time]*d13C_arc[time]+F_mor[time]/F_tot[time]*d13C_mor[time]
d13C_crb[time]=d13C_atm[time]+5
d13C_org[time]=d13C_atm[time]-20
elif (t0+time)>=tchange:
#else:
atm_Fout=F_org[time-1]+F_crb[time-1]
F_mor[time]=morb_change2
c_atm[time]=c_atm[time-1]+(F_tot[time-1]-(atm_Fout))
c_crb[time]=c_crb[time-1]+(F_crb[time-1]-F_scrb[time-1])
c_org[time]=c_org[time-1]+(F_org[time-1]-F_sorg[time-1])
c_mcrb[time]=c_mcrb[time-1]+F_scrb[time-1]-(1-crbc4)*alpha_crb*F_scrb[time-crb_tau]-(1-crbc4)*(1-alpha_crb)*F_scrb[time-org_tau]
c_morg[time]=c_morg[time-1]+F_sorg[time-1]-(1-orgc4)*alpha_org*F_sorg[time-crb_tau]-(1-orgc4)*(1-alpha_org)*F_sorg[time-org_tau]
c_mntl[time]=c_mntl[time-1]-F_oib[time-1]*0-F_mor[time-1]-arcc1*(alpha_crb*F_scrb[time-crb_tau]+alpha_org*F_sorg[time-crb_tau])-arcc1*(crbc4*F_scrb[time-org_tau]+orgc4*F_sorg[time-org_tau])
F_org[time]=forg*k*c_atm[time]
F_crb[time]=xcrb*k*c_atm[time]
F_scrb[time]=chi_carb*F_crb[time]
F_sorg[time]=chi_org*F_org[time]
Farccrb=alpha_crb*F_scrb[time-crb_tau]
Farcorg=alpha_org*F_sorg[time-crb_tau]
Farcmntl=(arcc1)*(Farccrb+Farcorg)
F_arc[time]=Farcmntl+Farccrb+Farcorg
Foibcrb=(1-alpha_crb)*F_scrb[time-org_tau]
Foiborg=(1-alpha_org)*F_sorg[time-org_tau]
Foibmntl=F_oibi
F_oib[time]=(Foibcrb+Foiborg+Foibmntl)
F_tot[time]=F_oib[time]+F_arc[time]+F_mor[time]
d13C_oiborg=Foiborg/F_oib[time]*d13C_org[time-org_tau]
d13C_oibcrb=Foibcrb/F_oib[time]*d13C_crb[time-org_tau]
d13C_oibmntl=Foibmntl/F_oib[time]*d13C_prm[time]
d13C_oib[time]=d13C_oiborg+d13C_oibcrb+d13C_oibmntl
d13C_arcorg=Farcorg/F_arc[time]*d13C_org[time-crb_tau]
d13C_arccrb=Farccrb/F_arc[time]*d13C_crb[time-crb_tau]
d13C_arcmntl=Farcmntl/F_arc[time]*d13C_prm[time]
d13C_arc[time]=d13C_arcorg+d13C_arccrb+d13C_arcmntl
d13C_atm[time]=F_oib[time]/F_tot[time]*d13C_oib[time]+F_arc[time]/F_tot[time]*d13C_arc[time]+F_mor[time]/F_tot[time]*d13C_mor[time]
d13C_crb[time]=d13C_atm[time]+5
d13C_org[time]=d13C_atm[time]-20
nt = t[1:7]
new_d13C_crb = d13C_crb[1:7]
# visualize data
fig =plt.figure(1,[12,10])
ax = fig.add_subplot(2,2,1)
# fig, ax = plt.subplots(2,2,figsize=(15,7))
# xmajorLocator = MultipleLocator(5) #将x主刻度标签设置为20的倍数
# xmajorFormatter = FormatStrFormatter('%5.1f') #设置x轴标签文本的格式
# xminorLocator = MultipleLocator(1) #将x轴次刻度标签设置为5的倍数
# ymajorLocator = MultipleLocator(5) #将y轴主刻度标签设置为0.5的倍数
# ymajorFormatter = FormatStrFormatter('%1.1f') #设置y轴标签文本的格式
# yminorLocator = MultipleLocator(1) #将此y轴次刻度标签设置为0.1的倍数
# plt.rcParams['savefig.dpi'] = 300 #图片像素
# plt.rcParams['figure.dpi'] = 300 #分辨率
# fig1, ax = plt.subplot(2,2,1)
# minor ticks.
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
# For the minor ticks, use no labels; default NullFormatter.
ax.xaxis.set_minor_locator(MultipleLocator(0.2))
plt.plot(t, d13C_crb)
# plt.plot(t, d13C_crb1, alpha=0)
# plt.plot(t, d13C_crb2, alpha=0)
# plt.fill_between(t, d13C_crb1, d13C_crb2, alpha=0.3, facecolor="yellow")
plt.errorbar(nt, new_d13C_crb,yerr=3,fmt='o',ecolor='r',color='b',elinewidth=2,capsize=4)
plt.xlim([t[0],t[8]])
plt.ylim([-10,15])
# plt.xticks(new_ticks)
# set tick labels
# plt.yticks([-1, -0.5, 0.5,1])
plt.title('Isotopes')
plt.legend(frameon=False)
plt.subplot(2,2,2)
plt.semilogy(t,(c_morg+c_org)/1e21)
plt.xlim([t0,tf])
plt.title(r'Atmospheric $O_2$')
plt.legend(frameon=False)
plt.subplot(2,2,3)
plt.semilogy(t,F_oib,'k',label='oib')
plt.semilogy(t,F_arc,'b',label='arc')
plt.semilogy(t,F_mor,'y',label='mor')
plt.semilogy(t,F_crb+F_org,'r',label='weathering')
plt.semilogy(t,F_tot,'g',label='tot volc')
plt.xlim([t0,tf])
plt.title('Fluxes')
plt.legend(frameon=False)
plt.subplot(2,2,4)
plt.semilogy(t,c_atm,'b',label='c atm ocean')
plt.semilogy(t,c_org,'r',label='c crustal org')
plt.semilogy(t,c_morg,'g',label='c mantle org')
plt.semilogy(t,c_mntl,'y',label='Prim mantle C')
plt.semilogy(t,c_crb,'m',label='crustal carb')
plt.semilogy(t,c_mcrb,'c',label='mantle carb')
plt.legend(frameon=False)
plt.xlim([t0,tf])
plt.subplots_adjust(wspace =0, hspace =0)
plt.show()
# plt.savefig('high.png', dpi=300) #指定分辨
# # visualize data
# fig=plt.figure(1,[4,8])
# plt.subplot(4,1,1)
# plt.plot(t,d13C_crb)
# plt.xlim([t0,tf])
# plt.ylim([-10,15])
# plt.title('Isotopes')
# plt.legend(frameon=False)
#
#
# plt.subplot(4,1,2)
# plt.semilogy(t,(c_morg+c_org)/1e21)
# plt.xlim([t0,tf])
# plt.title(r'Atmospheric $O_2$')
# plt.legend(frameon=False)
#
#
# plt.subplot(4,1,3)
# plt.semilogy(t,F_oib,'k',label='oib')
# plt.semilogy(t,F_arc,'b',label='arc')
# plt.semilogy(t,F_mor,'y',label='mor')
# plt.semilogy(t,F_crb+F_org,'r',label='weathering')
# plt.semilogy(t,F_tot,'g',label='tot volc')
# plt.xlim([t0,tf])
# plt.title('Fluxes')
# plt.legend(frameon=False)
#
#
# plt.subplot(4,1,4)
# plt.semilogy(t,c_atm,'b',label='c atm ocean')
# plt.semilogy(t,c_org,'r',label='c crustal org')
# plt.semilogy(t,c_morg,'g',label='c mantle org')
# plt.semilogy(t,c_mntl,'y',label='Prim mantle C')
# plt.semilogy(t,c_crb,'m',label='crustal carb')
# plt.semilogy(t,c_mcrb,'c',label='mantle carb')
# plt.legend(frameon=False)
# plt.xlim([t0,tf])
# plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
afdfc45217af92feca35e8df5f3b06c51cf1a18f
|
32cb84dd41e4be24c065bb205f226f9b121a6db2
|
/feedback/urls.py
|
523511566940bbd365ca5900079a62fd10f87512
|
[] |
no_license
|
InformatykaNaStart/staszic-sio2
|
b38fda84bd8908472edb2097774838ceed08fcfa
|
60a127e687ef8216d2ba53f9f03cfaa201c59e26
|
refs/heads/master
| 2022-06-29T11:09:28.765166
| 2022-06-13T21:56:19
| 2022-06-13T21:56:19
| 115,637,960
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from django.conf.urls import patterns, include, url
import views
noncontest_patterns = [url(r'^staszic/judging/(?P<jid>\d+)/$', views.judging)]
|
[
"hugo@staszic.waw.pl"
] |
hugo@staszic.waw.pl
|
897ec77c9b38bdda3418a663f5ce762681600b2d
|
7cd586f68f38d8d3be11b0909a1c0cd84ff9fa7a
|
/posts/migrations/0034_slider_color.py
|
7117ec0321b91b65cd941f53d4ee9ff25b0f655a
|
[] |
no_license
|
mehmetsan/WebsiteProject
|
f0732d7567b67184a8f2a58d3c0ca6978f9fbf32
|
a384047551e349fc5ddadb6b9a1d821c8d4b17c7
|
refs/heads/main
| 2023-03-01T09:45:37.672996
| 2021-02-13T15:17:31
| 2021-02-13T15:17:31
| 332,440,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
# Generated by Django 3.1.3 on 2021-01-14 11:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0033_auto_20210114_1425'),
]
operations = [
migrations.AddField(
model_name='slider',
name='color',
field=models.CharField(blank=True, max_length=300, null=True),
),
]
|
[
"trabzonpower@gmail.com"
] |
trabzonpower@gmail.com
|
8a4434b6cc0d7ce9aba791e1092bf4db1b672060
|
69ac2db29e1953d34d1965b9b09cbb6b2e44622a
|
/ase/gui/gui.py
|
d2e7015af3314323f4dc7eaeba5a70c856766657
|
[] |
no_license
|
ArianFluido/Project-2-PHYS-A0140
|
01e13dbf30ce55f46f8060ffe23bc9f29bcb87fe
|
6d7ec3b1a09d83f330434df1ce91e6c52491cd22
|
refs/heads/master
| 2022-08-02T14:36:34.647227
| 2020-05-26T10:42:56
| 2020-05-26T10:42:56
| 264,162,598
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48,937
|
py
|
# husk:
# Exit*2? remove pylab.show()
# close button
# DFT
# ADOS
# grey-out stuff after one second: vmd, rasmol, ...
# Show with ....
# rasmol: set same rotation as ag
# Graphs: save, Python, 3D
# start from python (interactive mode?)
# ascii-art option (colored)|
# option -o (output) and -f (force overwrite)
# surfacebuilder
# screen-dump
# icon
# ag-community-server
# translate option: record all translations,
# and check for missing translations.
#TODO: Add possible way of choosing orinetations. \
#TODO: Two atoms defines a direction, three atoms their normal does
#TODO: Align orientations chosen in Rot_selected v unselcted
#TODO: Get the atoms_rotate_0 thing string
#TODO: Use set atoms instead og the get atoms
#TODO: Arrow keys will decide how the orientation changes
#TODO: Undo redo que should be implemented
#TODO: Update should have possibility to change positions
#TODO: Window for rotation modes and move moves which can be chosen
#TODO: WHen rotate and move / hide the movie menu
import os
import sys
import weakref
import pickle
from gettext import gettext as _
from gettext import ngettext
import numpy as np
import pygtk
pygtk.require("2.0")
import gtk
from ase.gui.view import View
from ase.gui.status import Status
from ase.gui.widgets import pack, help, Help, oops
from ase.gui.settings import Settings
from ase.gui.crystal import SetupBulkCrystal
from ase.gui.surfaceslab import SetupSurfaceSlab
from ase.gui.nanoparticle import SetupNanoparticle
from ase.gui.nanotube import SetupNanotube
from ase.gui.graphene import SetupGraphene
from ase.gui.calculator import SetCalculator
from ase.gui.energyforces import EnergyForces
from ase.gui.minimize import Minimize
from ase.gui.scaling import HomogeneousDeformation
from ase.gui.quickinfo import QuickInfo
from ase.gui.save import SaveWindow
from ase.version import version
ui_info = """\
<ui>
<menubar name='MenuBar'>
<menu action='FileMenu'>
<menuitem action='Open'/>
<menuitem action='New'/>
<menuitem action='Save'/>
<separator/>
<menuitem action='Quit'/>
</menu>
<menu action='EditMenu'>
<menuitem action='SelectAll'/>
<menuitem action='Invert'/>
<menuitem action='SelectConstrained'/>
<menuitem action='SelectImmobile'/>
<separator/>
<menuitem action='Copy'/>
<menuitem action='Paste'/>
<separator/>
<menuitem action='HideAtoms'/>
<menuitem action='ShowAtoms'/>
<separator/>
<menuitem action='Modify'/>
<menuitem action='AddAtoms'/>
<menuitem action='DeleteAtoms'/>
<separator/>
<menuitem action='First'/>
<menuitem action='Previous'/>
<menuitem action='Next'/>
<menuitem action='Last'/>
</menu>
<menu action='ViewMenu'>
<menuitem action='ShowUnitCell'/>
<menuitem action='ShowAxes'/>
<menuitem action='ShowBonds'/>
<menuitem action='ShowVelocities'/>
<menuitem action='ShowForces'/>
<menu action='ShowLabels'>
<menuitem action='NoLabel'/>
<menuitem action='AtomIndex'/>
<menuitem action='MagMom'/>
<menuitem action='Element'/>
</menu>
<separator/>
<menuitem action='QuickInfo'/>
<menuitem action='Repeat'/>
<menuitem action='Rotate'/>
<menuitem action='Colors'/>
<menuitem action='Focus'/>
<menuitem action='ZoomIn'/>
<menuitem action='ZoomOut'/>
<menu action='ChangeView'>
<menuitem action='ResetView'/>
<menuitem action='xyPlane'/>
<menuitem action='yzPlane'/>
<menuitem action='zxPlane'/>
<menuitem action='yxPlane'/>
<menuitem action='zyPlane'/>
<menuitem action='xzPlane'/>
<menuitem action='a2a3Plane'/>
<menuitem action='a3a1Plane'/>
<menuitem action='a1a2Plane'/>
<menuitem action='a3a2Plane'/>
<menuitem action='a2a1Plane'/>
<menuitem action='a1a3Plane'/>
</menu>
<menuitem action='Settings'/>
<menuitem action='VMD'/>
<menuitem action='RasMol'/>
<menuitem action='XMakeMol'/>
<menuitem action='Avogadro'/>
</menu>
<menu action='ToolsMenu'>
<menuitem action='Graphs'/>
<menuitem action='Movie'/>
<menuitem action='EModify'/>
<menuitem action='Constraints'/>
<menuitem action='RenderScene'/>
<menuitem action='MoveAtoms'/>
<menuitem action='RotateAtoms'/>
<menuitem action='OrientAtoms'/>
<menuitem action='DFT'/>
<menuitem action='NEB'/>
<menuitem action='BulkModulus'/>
</menu>
<menu action='SetupMenu'>
<menuitem action='Bulk'/>
<menuitem action='Surface'/>
<menuitem action='Nanoparticle'/>
<menuitem action='Graphene'/>
<menuitem action='Nanotube'/>
</menu>
<menu action='CalculateMenu'>
<menuitem action='SetCalculator'/>
<separator/>
<menuitem action='EnergyForces'/>
<menuitem action='Minimize'/>
<menuitem action='Scaling'/>
</menu>
<menu action='HelpMenu'>
<menuitem action='About'/>
<menuitem action='Webpage'/>
<menuitem action='Debug'/>
</menu>
</menubar>
</ui>"""
class GUI(View, Status):
def __init__(self, images, rotations='', show_unit_cell=True,
show_bonds=False):
# Try to change into directory of file you are viewing
try:
os.chdir(os.path.split(sys.argv[1])[0])
# This will fail sometimes (e.g. for starting a new session)
except:
pass
self.images = images
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
#self.window.set_icon(gtk.gdk.pixbuf_new_from_file('guiase.png'))
self.window.set_position(gtk.WIN_POS_CENTER)
#self.window.connect("destroy", lambda w: gtk.main_quit())
self.window.connect('delete_event', self.exit)
vbox = gtk.VBox()
self.window.add(vbox)
if gtk.pygtk_version < (2, 12):
self.set_tip = gtk.Tooltips().set_tip
actions = gtk.ActionGroup("Actions")
actions.add_actions([
('FileMenu', None, _('_File')),
('EditMenu', None, _('_Edit')),
('ViewMenu', None, _('_View')),
('ToolsMenu', None, _('_Tools')),
# TRANSLATORS: Set up (i.e. build) surfaces, nanoparticles, ...
('SetupMenu', None, _('_Setup')),
('CalculateMenu', None, _('_Calculate')),
('HelpMenu', None, _('_Help')),
('Open', gtk.STOCK_OPEN, _('_Open'), '<control>O',
_('Create a new file'),
self.open),
('New', gtk.STOCK_NEW, _('_New'), '<control>N',
_('New ase.gui window'),
lambda widget: os.system('ase-gui &')),
('Save', gtk.STOCK_SAVE, _('_Save'), '<control>S',
_('Save current file'),
self.save),
('Quit', gtk.STOCK_QUIT, _('_Quit'), '<control>Q',
_('Quit'),
self.exit),
('SelectAll', None, _('Select _all'), None,
'',
self.select_all),
('Invert', None, _('_Invert selection'), None,
'',
self.invert_selection),
('SelectConstrained', None, _('Select _constrained atoms'), None,
'',
self.select_constrained_atoms),
('SelectImmobile', None, _('Select _immobile atoms'), '<control>I',
'',
self.select_immobile_atoms),
('Copy', None, _('_Copy'), '<control>C',
_('Copy current selection and its orientation to clipboard'),
self.copy_atoms),
('Paste', None, _('_Paste'), '<control>V',
_('Insert current clipboard selection'),
self.paste_atoms),
('Modify', None, _('_Modify'), '<control>Y',
_('Change tags, moments and atom types of the selected atoms'),
self.modify_atoms),
('AddAtoms', None, _('_Add atoms'), '<control>A',
_('Insert or import atoms and molecules'),
self.add_atoms),
('DeleteAtoms', None, _('_Delete selected atoms'), 'BackSpace',
_('Delete the selected atoms'),
self.delete_selected_atoms),
('First', gtk.STOCK_GOTO_FIRST, _('_First image'), 'Home',
'',
self.step),
('Previous', gtk.STOCK_GO_BACK, _('_Previous image'), 'Page_Up',
'',
self.step),
('Next', gtk.STOCK_GO_FORWARD, _('_Next image'), 'Page_Down',
'',
self.step),
('Last', gtk.STOCK_GOTO_LAST, _('_Last image'), 'End',
'',
self.step),
('ShowLabels', None, _('Show _Labels')),
('HideAtoms', None, _('Hide selected atoms'), None,
'',
self.hide_selected),
('ShowAtoms', None, _('Show selected atoms'), None,
'',
self.show_selected),
('QuickInfo', None, _('Quick Info ...'), None,
'',
self.quick_info_window),
('Repeat', None, _('Repeat ...'), None,
'',
self.repeat_window),
('Rotate', None, _('Rotate ...'), None,
'',
self.rotate_window),
('Colors', None, _('Colors ...'), None, '',
self.colors_window),
# TRANSLATORS: verb
('Focus', gtk.STOCK_ZOOM_FIT, _('Focus'), 'F',
'',
self.focus),
('ZoomIn', gtk.STOCK_ZOOM_IN, _('Zoom in'), 'plus',
'',
self.zoom),
('ZoomOut', gtk.STOCK_ZOOM_OUT, _('Zoom out'), 'minus',
'',
self.zoom),
('ChangeView', None, _('Change View')),
('ResetView', None, _('Reset View'), 'equal',
'',
self.reset_view),
('xyPlane', None, _('\'xy\' Plane'), 'z', '', self.set_view),
('yzPlane', None, _('\'yz\' Plane'), 'x', '', self.set_view),
('zxPlane', None, _('\'zx\' Plane'), 'y', '', self.set_view),
('yxPlane', None, _('\'yx\' Plane'), '<alt>z', '', self.set_view),
('zyPlane', None, _('\'zy\' Plane'), '<alt>x', '', self.set_view),
('xzPlane', None, _('\'xz\' Plane'), '<alt>y', '', self.set_view),
('a2a3Plane', None, _('\'a2 a3\' Plane'), '1', '', self.set_view),
('a3a1Plane', None, _('\'a3 a1\' Plane'), '2', '', self.set_view),
('a1a2Plane', None, _('\'a1 a2\' Plane'), '3', '', self.set_view),
('a3a2Plane', None, _('\'a3 a2\' Plane'), '<alt>1', '', self.set_view),
('a1a3Plane', None, _('\'a1 a3\' Plane'), '<alt>2', '', self.set_view),
('a2a1Plane', None, _('\'a2 a1\' Plane'), '<alt>3', '', self.set_view),
('Settings', gtk.STOCK_PREFERENCES, _('Settings ...'), None,
'',
self.settings),
('VMD', None, _('VMD'), None,
'',
self.external_viewer),
('RasMol', None, _('RasMol'), None,
'',
self.external_viewer),
('XMakeMol', None, _('xmakemol'), None,
'',
self.external_viewer),
('Avogadro', None, _('avogadro'), None,
'',
self.external_viewer),
('Graphs', None, _('Graphs ...'), None,
'',
self.plot_graphs),
('Movie', None, _('Movie ...'), None,
'',
self.movie),
('EModify', None, _('Expert mode ...'), '<control>E',
'',
self.execute),
('Constraints', None, _('Constraints ...'), None,
'',
self.constraints_window),
('RenderScene', None, _('Render scene ...'), None,
'',
self.render_window),
('DFT', None, _('DFT ...'), None,
'',
self.dft_window),
('NEB', None, _('NE_B'), None,
'',
self.NEB),
('BulkModulus', None, _('B_ulk Modulus'), None,
'',
self.bulk_modulus),
('Bulk', None, _('_Bulk Crystal'), None,
_("Create a bulk crystal with arbitrary orientation"),
self.bulk_window),
('Surface', None, _('_Surface slab'), None,
_("Create the most common surfaces"),
self.surface_window),
('Nanoparticle', None, _('_Nanoparticle'), None,
_("Create a crystalline nanoparticle"),
self.nanoparticle_window),
('Nanotube', None, _('Nano_tube'), None,
_("Create a nanotube"),
self.nanotube_window),
('Graphene', None, _('Graphene'), None,
_("Create a graphene sheet or nanoribbon"),
self.graphene_window),
('SetCalculator', None, _('Set _Calculator'), None,
_("Set a calculator used in all calculation modules"),
self.calculator_window),
('EnergyForces', None, _('_Energy and Forces'), None,
_("Calculate energy and forces"),
self.energy_window),
('Minimize', None, _('Energy _Minimization'), None,
_("Minimize the energy"),
self.energy_minimize_window),
('Scaling', None, _('Scale system'), None,
_("Deform system by scaling it"),
self.scaling_window),
('About', None, _('_About'), None,
None,
self.about),
('Webpage', gtk.STOCK_HELP, _('Webpage ...'), None, None, webpage),
('Debug', None, _('Debug ...'), None, None, self.debug)])
actions.add_toggle_actions([
('ShowUnitCell', None, _('Show _unit cell'), '<control>U',
'Bold',
self.toggle_show_unit_cell,
show_unit_cell > 0),
('ShowAxes', None, _('Show _axes'), None,
'Bold',
self.toggle_show_axes,
True),
('ShowBonds', None, _('Show _bonds'), '<control>B',
'Bold',
self.toggle_show_bonds,
show_bonds),
('ShowVelocities', None, _('Show _velocities'),
'<control>G', 'Bold',
self.toggle_show_velocities,
False),
('ShowForces', None, _('Show _forces'), '<control>F',
'Bold',
self.toggle_show_forces,
False),
('MoveAtoms', None, _('_Move atoms'), '<control>M',
'Bold',
self.toggle_move_mode,
False),
('RotateAtoms', None, _('_Rotate atoms'), '<control>R',
'Bold',
self.toggle_rotate_mode,
False),
('OrientAtoms', None, _('Orien_t atoms'), '<control>T',
'Bold',
self.toggle_orient_mode,
False)
])
actions.add_radio_actions((
('NoLabel', None, _('_None'), None, None, 0),
('AtomIndex', None, _('Atom _Index'), None, None, 1),
('MagMom', None, _('_Magnetic Moments'), None, None, 2),
('Element', None, _('_Element Symbol'), None, None, 3)),
0, self.show_labels)
self.ui = ui = gtk.UIManager()
ui.insert_action_group(actions, 0)
self.window.add_accel_group(ui.get_accel_group())
try:
mergeid = ui.add_ui_from_string(ui_info)
except gobject.GError, msg:
print _('building menus failed: %s') % msg
vbox.pack_start(ui.get_widget('/MenuBar'), False, False, 0)
View.__init__(self, vbox, rotations)
Status.__init__(self, vbox)
vbox.show()
#self.window.set_events(gtk.gdk.BUTTON_PRESS_MASK)
self.window.connect('key-press-event', self.scroll)
self.window.connect('scroll_event', self.scroll_event)
self.window.show()
self.graphs = [] # List of open pylab windows
self.graph_wref = [] # List of weakrefs to Graph objects
self.movie_window = None
self.vulnerable_windows = []
self.simulation = {} # Used by modules on Calculate menu.
self.module_state = {} # Used by modules to store their state.
def run(self, expr=None):
self.set_colors()
self.set_coordinates(self.images.nimages - 1, focus=True)
if self.images.nimages > 1:
self.movie()
if expr is None and not np.isnan(self.images.E[0]):
expr = self.config['gui_graphs_string']
if expr is not None and expr != '' and self.images.nimages > 1:
self.plot_graphs(expr=expr)
gtk.main()
def step(self, action):
d = {'First': -10000000,
'Previous': -1,
'Next': 1,
'Last': 10000000}[action.get_name()]
i = max(0, min(self.images.nimages - 1, self.frame + d))
self.set_frame(i)
if self.movie_window is not None:
self.movie_window.frame_number.value = i
def _do_zoom(self, x):
"""Utility method for zooming"""
self.scale *= x
self.draw()
def zoom(self, action):
"""Zoom in/out on keypress or clicking menu item"""
x = {'ZoomIn': 1.2, 'ZoomOut':1 /1.2}[action.get_name()]
self._do_zoom(x)
def scroll_event(self, window, event):
"""Zoom in/out when using mouse wheel"""
SHIFT = event.state == gtk.gdk.SHIFT_MASK
x = 1.0
if event.direction == gtk.gdk.SCROLL_UP:
x = 1.0 + (1-SHIFT)*0.2 + SHIFT*0.01
elif event.direction == gtk.gdk.SCROLL_DOWN:
x = 1.0 / (1.0 + (1-SHIFT)*0.2 + SHIFT*0.01)
self._do_zoom(x)
def settings(self, menuitem):
Settings(self)
def scroll(self, window, event):
from copy import copy
CTRL = event.state == gtk.gdk.CONTROL_MASK
SHIFT = event.state == gtk.gdk.SHIFT_MASK
dxdydz = {gtk.keysyms.KP_Add: ('zoom', 1.0 + (1-SHIFT)*0.2 + SHIFT*0.01, 0),
gtk.keysyms.KP_Subtract: ('zoom', 1 / (1.0 + (1-SHIFT)*0.2 + SHIFT*0.01), 0),
gtk.keysyms.Up: ( 0, +1 - CTRL, +CTRL),
gtk.keysyms.Down: ( 0, -1 + CTRL, -CTRL),
gtk.keysyms.Right: (+1, 0, 0),
gtk.keysyms.Left: (-1, 0, 0)}.get(event.keyval, None)
try:
inch = chr(event.keyval)
except:
inch = None
sel = []
atom_move = self.ui.get_widget('/MenuBar/ToolsMenu/MoveAtoms'
).get_active()
atom_rotate = self.ui.get_widget('/MenuBar/ToolsMenu/RotateAtoms'
).get_active()
atom_orient = self.ui.get_widget('/MenuBar/ToolsMenu/OrientAtoms'
).get_active()
if dxdydz is None:
return
dx, dy, dz = dxdydz
if dx == 'zoom':
self._do_zoom(dy)
return
d = self.scale * 0.1
tvec = np.array([dx, dy, dz])
dir_vec = np.dot(self.axes, tvec)
if (atom_move):
rotmat = self.axes
s = 0.1
if SHIFT:
s = 0.01
add = s * dir_vec
for i in range(len(self.R)):
if self.atoms_to_rotate_0[i]:
self.R[i] += add
for jx in range(self.images.nimages):
self.images.P[jx][i] += add
elif atom_rotate:
from rot_tools import rotate_about_vec, \
rotate_vec
sel = self.images.selected
if sum(sel) == 0:
sel = self.atoms_to_rotate_0
nsel = sum(sel)
# this is the first one to get instatiated
if nsel != 2:
self.rot_vec = dir_vec
change = False
z_axis = np.dot(self.axes, np.array([0, 0, 1]))
if self.atoms_to_rotate == None:
change = True
self.z_axis_old = z_axis.copy()
self.dx_change = [0, 0]
self.atoms_to_rotate = self.atoms_to_rotate_0.copy()
self.atoms_selected = sel.copy()
self.rot_vec = dir_vec
if nsel != 2 or sum(self.atoms_to_rotate) == 2:
self.dx_change = [0, 0]
for i in range(len(sel)):
if sel[i] != self.atoms_selected[i]:
change = True
cz = [dx, dy+dz]
if cz[0] or cz[1]:
change = False
if not(cz[0] * (self.dx_change[1])):
change = True
for i in range(2):
if cz[i] and self.dx_change[i]:
self.rot_vec = self.rot_vec * cz[i] * self.dx_change[i]
if cz[1]:
change = False
if np.prod(self.z_axis_old != z_axis):
change = True
self.z_axis_old = z_axis.copy()
self.dx_change = copy(cz)
dihedral_rotation = len(self.images.selected_ordered) == 4
if change:
self.atoms_selected = sel.copy()
if nsel == 2 and sum(self.atoms_to_rotate) != 2:
asel = []
for i, j in enumerate(sel):
if j:
asel.append(i)
a1, a2 = asel
rvx = self.images.P[self.frame][a1] - \
self.images.P[self.frame][a2]
rvy = np.cross(rvx,
np.dot(self.axes,
np.array([0, 0, 1])))
self.rot_vec = rvx * dx + rvy * (dy + dz)
self.dx_change = [dx, dy+dz]
# dihedral rotation?
if dihedral_rotation:
sel = self.images.selected_ordered
self.rot_vec = (dx+dy+dz)*(self.R[sel[2]]-self.R[sel[1]])
rot_cen = np.array([0.0, 0.0, 0.0])
if dihedral_rotation:
sel = self.images.selected_ordered
rot_cen = self.R[sel[1]].copy()
elif nsel:
for i, b in enumerate( sel):
if b:
rot_cen += self.R[i]
rot_cen /= float(nsel)
degrees = 5 * (1 - SHIFT) + SHIFT
degrees = abs(sum(dxdydz)) * 3.1415 / 360.0 * degrees
rotmat = rotate_about_vec(self.rot_vec, degrees)
# now rotate the atoms that are to be rotated
for i in range(len(self.R)):
if self.atoms_to_rotate[i]:
self.R[i] -= rot_cen
for jx in range(self.images.nimages):
self.images.P[jx][i] -= rot_cen
self.R[i] = rotate_vec(rotmat, self.R[i])
for jx in range(self.images.nimages):
self.images.P[jx][i] = rotate_vec(rotmat, self.images.P[jx][i])
self.R[i] += rot_cen
for jx in range(self.images.nimages):
self.images.P[jx][i] += rot_cen
elif atom_orient:
to_vec = np.array([dx, dy, dz])
from rot_tools import rotate_vec_into_newvec
rot_mat = rotate_vec_into_newvec(self.orient_normal, to_vec)
self.axes = rot_mat
self.set_coordinates()
else:
self.center -= (dx * 0.1 * self.axes[:, 0] -
dy * 0.1 * self.axes[:, 1])
self.draw()
def copy_atoms(self, widget):
"Copies selected atoms to a clipboard."
clip = gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD)
if self.images.selected.any():
atoms = self.images.get_atoms(self.frame)
lena = len(atoms)
for i in range(len(atoms)):
li = lena-1-i
if not self.images.selected[li]:
del(atoms[li])
for i in atoms:
i.position = np.dot(self.axes.T,i.position)
ref = atoms[0].position
for i in atoms:
if i.position[2] < ref[2]:
ref = i.position
atoms.reference_position = ref
clip.set_text(pickle.dumps(atoms, 0))
def paste_atoms(self, widget):
"Inserts clipboard selection into the current frame using the add_atoms window."
clip = gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD)
try:
atoms = pickle.loads(clip.wait_for_text())
self.add_atoms(widget, data='Paste', paste=atoms)
except:
pass
def add_atoms(self, widget, data=None, paste=None):
"""
Presents a dialogbox to the user, that allows him to add atoms/molecule to the current slab
or to paste the clipboard.
The molecule/atom is rotated using the current rotation of the coordinate system.
The molecule/atom can be added at a specified position - if the keyword auto+Z is used,
the COM of the selected atoms will be used as COM for the moleculed. The COM is furthermore
translated Z ang towards the user.
If no molecules are selected, the COM of all the atoms will be used for the x-y components of the
active coordinate system, while the z-direction will be chosen from the nearest atom position
along this direction.
Note: If this option is used, all frames except the active one are deleted.
"""
if data == 'load':
chooser = gtk.FileChooserDialog(
_('Open ...'), None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_filename(_("<<filename>>"))
ok = chooser.run()
if ok == gtk.RESPONSE_OK:
filename = chooser.get_filename()
chooser.destroy()
else:
chooser.destroy()
return
if data == 'OK' or data == 'load':
import ase
if data == 'load':
molecule = filename
else:
molecule = self.add_entries[1].get_text()
tag = self.add_entries[2].get_text()
mom = self.add_entries[3].get_text()
pos = self.add_entries[4].get_text().lower()
if paste is not None:
a = paste.copy()
else:
a = None
if a is None:
try:
a = ase.Atoms([ase.Atom(molecule)])
except:
try:
import ase.data.molecules
a = ase.data.molecules.molecule(molecule)
except:
try:
a = ase.io.read(molecule, -1)
except:
self.add_entries[1].set_text('?' + molecule)
return ()
directions = np.transpose(self.axes)
if a != None:
for i in a:
try:
i.set('tag',int(tag))
except:
self.add_entries[2].set_text('?' + tag)
return ()
try:
i.magmom = float(mom)
except:
self.add_entries[3].set_text('?' + mom)
return ()
if self.origin_radio.get_active() and paste:
a.translate(-paste.reference_position)
# apply the current rotation matrix to A
for i in a:
i.position = np.dot(self.axes, i.position)
# find the extent of the molecule in the local coordinate system
if self.centre_radio.get_active():
a_cen_pos = np.array([0.0, 0.0, 0.0])
m_cen_pos = 0.0
for i in a.positions:
a_cen_pos[0] += np.dot(directions[0], i)
a_cen_pos[1] += np.dot(directions[1], i)
a_cen_pos[2] += np.dot(directions[2], i)
m_cen_pos = max(np.dot(-directions[2], i), m_cen_pos)
a_cen_pos[0] /= len(a.positions)
a_cen_pos[1] /= len(a.positions)
a_cen_pos[2] /= len(a.positions)
a_cen_pos[2] -= m_cen_pos
else:
a_cen_pos = np.array([0.0, 0.0, 0.0])
# now find the position
cen_pos = np.array([0.0, 0.0, 0.0])
if sum(self.images.selected) > 0:
for i in range(len(self.R)):
if self.images.selected[i]:
cen_pos += self.R[i]
cen_pos /= sum(self.images.selected)
elif len(self.R) > 0:
px = 0.0
py = 0.0
pz = -1e6
for i in range(len(self.R)):
px += np.dot(directions[0], self.R[i])
py += np.dot(directions[1], self.R[i])
pz = max(np.dot(directions[2], self.R[i]), pz)
px = (px/float(len(self.R)))
py = (py/float(len(self.R)))
cen_pos = directions[0] * px + \
directions[1] * py + \
directions[2] * pz
if 'auto' in pos:
pos = pos.replace('auto', '')
import re
pos = re.sub('\s', '', pos)
if '(' in pos:
sign = eval('%s1' % pos[0])
a_cen_pos -= sign * np.array(eval(pos[1:]), float)
else:
a_cen_pos -= float(pos) * directions[2]
else:
cen_pos = np.array(eval(pos))
for i in a:
i.position += cen_pos - a_cen_pos
# and them to the molecule
atoms = self.images.get_atoms(self.frame)
atoms = atoms + a
self.new_atoms(atoms, init_magmom=True)
# and finally select the new molecule for easy moving and rotation
for i in range(len(a)):
self.images.selected[len(atoms) - i - 1] = True
self.draw()
self.add_entries[0].destroy()
if data == 'Cancel':
self.add_entries[0].destroy()
if data == None or data == 'Paste':
from ase.gui.widgets import pack
molecule = ''
tag = '0'
mom = '0'
pos = 'auto+1'
self.add_entries = []
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.add_entries.append(window)
window.set_title(_('Add atoms'))
if data == 'Paste':
molecule = paste.get_chemical_symbols(True)
window.set_title(_('Paste'))
vbox = gtk.VBox(False, 0)
window.add(vbox)
vbox.show()
packed = False
for i, j in [[_('Insert atom or molecule'), molecule],
[_('Tag'), tag],
[_('Moment'), mom],
[_('Position'), pos]]:
label = gtk.Label(i)
if not packed:
vbox.pack_start(label, True, True, 0)
else:
packed = True
vbox.add(label)
label.show()
entry = gtk.Entry()
entry.set_text(j)
self.add_entries.append(entry)
entry.set_max_length(50)
entry.show()
vbox.add(entry)
pack(vbox,[gtk.Label('atom/molecule reference:')])
self.centre_radio = gtk.RadioButton(None, "centre ")
self.origin_radio = gtk.RadioButton(self.centre_radio, "origin")
pack(vbox,[self.centre_radio, self.origin_radio])
if data == 'Paste':
self.origin_radio.set_active(True)
self.add_entries[1].set_sensitive(False)
if data == None:
button = gtk.Button(_('_Load molecule'))
button.connect('clicked', self.add_atoms, 'load')
button.show()
vbox.add(button)
button = gtk.Button(_('_OK'))
button.connect('clicked', self.add_atoms, 'OK', paste)
button.show()
vbox.add(button)
button = gtk.Button(_('_Cancel'))
button.connect('clicked', self.add_atoms, 'Cancel')
button.show()
vbox.add(button)
window.show()
def modify_atoms(self, widget, data=None):
"""
Presents a dialog box where the user is able to change the atomic type, the magnetic
moment and tags of the selected atoms. An item marked with X will not be changed.
"""
if data:
if data == 'OK':
import ase
symbol = self.add_entries[1].get_text()
tag = self.add_entries[2].get_text()
mom = self.add_entries[3].get_text()
a = None
if symbol != 'X':
try:
a = ase.Atoms([ase.Atom(symbol)])
except:
self.add_entries[1].set_text('?' + symbol)
return ()
y = self.images.selected.copy()
# and them to the molecule
atoms = self.images.get_atoms(self.frame)
for i in range(len(atoms)):
if self.images.selected[i]:
if a:
atoms[i].symbol = symbol
try:
if tag != 'X':
atoms[i].tag = int(tag)
except:
self.add_entries[2].set_text('?' + tag)
return ()
try:
if mom != 'X':
atoms[i].magmom = float(mom)
except:
self.add_entries[3].set_text('?' + mom)
return ()
self.new_atoms(atoms, init_magmom=True)
# Updates atomic labels
cv = self.ui.get_action_groups()[0].\
get_action("NoLabel").get_current_value()
self.ui.get_action_groups()[0].\
get_action("NoLabel").set_current_value(0)
self.ui.get_action_groups()[0].\
get_action("NoLabel").set_current_value(cv)
# and finally select the new molecule for easy moving and rotation
self.images.selected = y
self.draw()
self.add_entries[0].destroy()
if data == None and sum(self.images.selected):
atoms = self.images.get_atoms(self.frame)
s_tag = ''
s_mom = ''
s_symbol = ''
# Get the tags, moments and symbols of the selected atomsa
for i in range(len(atoms)):
if self.images.selected[i]:
if not(s_tag):
s_tag = str(atoms[i].tag)
elif s_tag != str(atoms[i].tag):
s_tag = 'X'
if not(s_mom):
s_mom = ("%2.2f" % (atoms[i].magmom))
elif s_mom != ("%2.2f" % (atoms[i].magmom)):
s_mom = 'X'
if not(s_symbol):
s_symbol = str(atoms[i].symbol)
elif s_symbol != str(atoms[i].symbol):
s_symbol = 'X'
self.add_entries = []
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.add_entries.append(window)
window.set_title(_('Modify'))
vbox = gtk.VBox(False, 0)
window.add(vbox)
vbox.show()
pack = False
for i, j in [[_('Atom'), s_symbol],
[_('Tag'), s_tag],
[_('Moment'), s_mom]]:
label = gtk.Label(i)
if not pack:
vbox.pack_start(label, True, True, 0)
else:
pack = True
vbox.add(label)
label.show()
entry = gtk.Entry()
entry.set_text(j)
self.add_entries.append(entry)
entry.set_max_length(50)
entry.show()
vbox.add(entry)
button = gtk.Button(_('_OK'))
button.connect('clicked', self.modify_atoms, 'OK')
button.show()
vbox.add(button)
button = gtk.Button(_('_Cancel'))
button.connect('clicked', self.modify_atoms, 'Cancel')
button.show()
vbox.add(button)
window.show()
def delete_selected_atoms(self, widget=None, data=None):
if data == 'OK':
atoms = self.images.get_atoms(self.frame)
lena = len(atoms)
for i in range(len(atoms)):
li = lena-1-i
if self.images.selected[li]:
del(atoms[li])
self.new_atoms(atoms)
self.draw()
if data:
self.delete_window.destroy()
if not(data) and sum(self.images.selected):
nselected = sum(self.images.selected)
self.delete_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.delete_window.set_title(_('Confirmation'))
self.delete_window.set_border_width(10)
self.box1 = gtk.HBox(False, 0)
self.delete_window.add(self.box1)
self.button1 = gtk.Button(ngettext('Delete selected atom?',
'Delete selected atoms?',
nselected))
self.button1.connect("clicked", self.delete_selected_atoms, "OK")
self.box1.pack_start(self.button1, True, True, 0)
self.button1.show()
self.button2 = gtk.Button(_("Cancel"))
self.button2.connect("clicked", self.delete_selected_atoms, "Cancel")
self.box1.pack_start(self.button2, True, True, 0)
self.button2.show()
self.box1.show()
self.delete_window.show()
def debug(self, x):
from ase.gui.debug import Debug
Debug(self)
def execute(self, widget=None):
from ase.gui.execute import Execute
Execute(self)
def constraints_window(self, widget=None):
from ase.gui.constraints import Constraints
Constraints(self)
def dft_window(self, widget=None):
from ase.gui.dft import DFT
DFT(self)
def select_all(self, widget):
self.images.selected[:] = True
self.draw()
def invert_selection(self, widget):
self.images.selected[:] = ~self.images.selected
self.draw()
def select_constrained_atoms(self, widget):
self.images.selected[:] = ~self.images.dynamic
self.draw()
def select_immobile_atoms(self, widget):
if self.images.nimages > 1:
R0 = self.images.P[0]
for R in self.images.P[1:]:
self.images.selected[:] =~ (np.abs(R - R0) > 1.0e-10).any(1)
self.draw()
def movie(self, widget=None):
from ase.gui.movie import Movie
self.movie_window = Movie(self)
def plot_graphs(self, x=None, expr=None):
from ase.gui.graphs import Graphs
g = Graphs(self)
if expr is not None:
g.plot(expr=expr)
self.graph_wref.append(weakref.ref(g))
def plot_graphs_newatoms(self):
"Notify any Graph objects that they should make new plots."
new_wref = []
found = 0
for wref in self.graph_wref:
ref = wref()
if ref is not None:
ref.plot()
new_wref.append(wref) # Preserve weakrefs that still work.
found += 1
self.graph_wref = new_wref
return found
def NEB(self, action):
from ase.gui.neb import NudgedElasticBand
NudgedElasticBand(self.images)
def bulk_modulus(self, action):
from ase.gui.bulk_modulus import BulkModulus
BulkModulus(self.images)
def open(self, button=None, filenames=None):
if filenames == None:
chooser = gtk.FileChooserDialog(
_('Open ...'), None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_filename(_("<<filename>>"))
# Add a file type filter
name_to_suffix = {}
types = gtk.combo_box_new_text()
for name, suffix in [(_('Automatic'), None),
(_('Dacapo netCDF output file'),'dacapo'),
(_('Virtual Nano Lab file'),'vnl'),
(_('ASE pickle trajectory'),'traj'),
(_('ASE bundle trajectory'),'bundle'),
(_('GPAW text output'),'gpaw-text'),
(_('CUBE file'),'cube'),
(_('XCrySDen Structure File'),'xsf'),
(_('Dacapo text output'),'dacapo-text'),
(_('XYZ-file'),'xyz'),
(_('VASP POSCAR/CONTCAR file'),'vasp'),
(_('VASP OUTCAR file'),'vasp_out'),
(_('Protein Data Bank'),'pdb'),
(_('CIF-file'),'cif'),
(_('FHI-aims geometry file'),'aims'),
(_('FHI-aims output file'),'aims_out'),
(_('TURBOMOLE coord file'),'tmol'),
(_('exciting input'),'exi'),
(_('WIEN2k structure file'),'struct'),
(_('DftbPlus input file'),'dftb'),
(_('ETSF format'),'etsf.nc'),
(_('CASTEP geom file'),'cell'),
(_('CASTEP output file'),'castep'),
(_('CASTEP trajectory file'),'geom'),
(_('DFTBPlus GEN format'),'gen')
]:
types.append_text(name)
name_to_suffix[name] = suffix
types.set_active(0)
img_vbox = gtk.VBox()
pack(img_vbox, [gtk.Label(_('File type:')), types])
img_vbox.show()
chooser.set_extra_widget(img_vbox)
ok = chooser.run() == gtk.RESPONSE_OK
if ok:
filenames = [chooser.get_filename()]
filetype = types.get_active_text()
chooser.destroy()
if not ok:
return
n_current = self.images.nimages
self.reset_tools_modes()
self.images.read(filenames, slice(None), name_to_suffix[filetype])
self.set_colors()
self.set_coordinates(self.images.nimages - 1, focus=True)
def import_atoms (self, button=None, filenames=None):
if filenames == None:
chooser = gtk.FileChooserDialog(
_('Open ...'), None, gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
ok = chooser.run()
if ok == gtk.RESPONSE_OK:
filenames = [chooser.get_filename()]
chooser.destroy()
if not ok:
return
self.images.import_atoms(filenames, self.frame)
self.set_colors()
self.set_coordinates(self.images.nimages - 1, focus=True)
def save(self, menuitem):
SaveWindow(self)
def quick_info_window(self, menuitem):
QuickInfo(self)
def bulk_window(self, menuitem):
SetupBulkCrystal(self)
def surface_window(self, menuitem):
SetupSurfaceSlab(self)
def nanoparticle_window(self, menuitem):
SetupNanoparticle(self)
def graphene_window(self, menuitem):
SetupGraphene(self)
def nanotube_window(self, menuitem):
SetupNanotube(self)
def calculator_window(self, menuitem):
SetCalculator(self)
def energy_window(self, menuitem):
EnergyForces(self)
def energy_minimize_window(self, menuitem):
Minimize(self)
def scaling_window(self, menuitem):
HomogeneousDeformation(self)
def new_atoms(self, atoms, init_magmom=False):
"Set a new atoms object."
self.reset_tools_modes()
rpt = getattr(self.images, 'repeat', None)
self.images.repeat_images(np.ones(3, int))
self.images.initialize([atoms], init_magmom=init_magmom)
self.frame = 0 # Prevent crashes
self.images.repeat_images(rpt)
self.set_colors()
self.set_coordinates(frame=0, focus=True)
self.notify_vulnerable()
def prepare_new_atoms(self):
"Marks that the next call to append_atoms should clear the images."
self.images.prepare_new_atoms()
def append_atoms(self, atoms):
"Set a new atoms object."
#self.notify_vulnerable() # Do this manually after last frame.
frame = self.images.append_atoms(atoms)
self.set_coordinates(frame=frame-1, focus=True)
def notify_vulnerable(self):
"""Notify windows that would break when new_atoms is called.
The notified windows may adapt to the new atoms. If that is not
possible, they should delete themselves.
"""
new_vul = [] # Keep weakrefs to objects that still exist.
for wref in self.vulnerable_windows:
ref = wref()
if ref is not None:
new_vul.append(wref)
ref.notify_atoms_changed()
self.vulnerable_windows = new_vul
def register_vulnerable(self, obj):
"""Register windows that are vulnerable to changing the images.
Some windows will break if the atoms (and in particular the
number of images) are changed. They can register themselves
and be closed when that happens.
"""
self.vulnerable_windows.append(weakref.ref(obj))
def exit(self, button, event=None):
self.window.destroy()
gtk.main_quit()
return True
def xxx(self, x=None,
message1=_('Not implemented!'),
message2=_('do you really need it?')):
oops(message1, message2)
def about(self, action):
try:
dialog = gtk.AboutDialog()
dialog.set_version(version)
dialog.set_website(
'https://wiki.fysik.dtu.dk/ase/ase/gui/gui.html')
except AttributeError:
self.xxx()
else:
dialog.run()
dialog.destroy()
def webpage(widget):
import webbrowser
webbrowser.open('https://wiki.fysik.dtu.dk/ase/ase/gui/gui.html')
|
[
"kermana1@vdiubuntu020.org.aalto.fi"
] |
kermana1@vdiubuntu020.org.aalto.fi
|
c455b8a4833c85112dc148d24cb008acdef40d49
|
1851a11bca1071296fd2ac19c074601669a4f07d
|
/qwarie_customization/models/events.py
|
4c0a629271d4b6b134899dfcf6550049da097990
|
[] |
no_license
|
devalex365/odoo-xelaved
|
78e874357971ac0c1d969d380e8daf4ab9ac068c
|
3d63437c131641ce05ba23797908a1771857a6c1
|
refs/heads/master
| 2021-04-30T00:16:27.692712
| 2018-03-06T17:40:27
| 2018-03-06T17:40:27
| 121,572,089
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,531
|
py
|
# -*- coding: utf-8 -*-
from openerp import _, fields, models, api
from datetime import datetime
import calendar
import uuid
import urlparse
import logging
_logger = logging.getLogger(__name__)
class event_event(models.Model):
_inherit = 'event.event'
@api.model
def _default_course_id(self):
events = self.env['event.event'].search([])
qwarie_course_id = max([event['qwarie_course_id'] for event in events] or [0, ])
return qwarie_course_id + 1
@api.one
@api.depends('exam_survey_id')
def _get_exam_participants(self):
self.exam_survey_participants_ids = self.exam_survey_id.user_input_ids.search([
('event_id', '=', self.id), ('survey_id', '=', self.exam_survey_id.id)
])
@api.one
@api.depends('feedback_survey_id')
def _get_feedback_participants(self):
self.feedback_survey_participants_ids = self.feedback_survey_id.user_input_ids.search([
('event_id', '=', self.id), ('survey_id', '=', self.feedback_survey_id.id)
])
@api.one
@api.depends('certificate_id')
def _get_certificate_participants(self):
self.certificate_id = self.exam_survey_id
self.certificate_participants_ids = self.exam_survey_participants_ids
@api.one
@api.depends('address_id')
def _get_training_customer(self):
self.customer_id = self.address_id.parent_id or self.address_id
trainer_id = fields.Many2one('res.users', string='Trainer', default=lambda self: self.env.user)
assistant_id = fields.Many2one('res.users', string='Assistant Trainer')
delegate_quota = fields.Char(string='Delegate Quota', track_visibility='onchange')
available_seats = fields.Char(string='Available Seats', track_visibility='onchange')
training_leader = fields.Char(string='Training Leader', track_visibility='onchange')
customer_id = fields.Many2one('res.partner', string='Customer', compute='_get_training_customer')
# custom Print fields
printing_company = fields.Char(string='Printing company (URL)', track_visibility='onchange')
ordered_date = fields.Date(string='Order date', track_visibility='onchange')
ordered_by = fields.Many2one('res.partner', string='Ordered by', track_visibility='onchange')
price = fields.Monetary(string='Order price', track_visibility='onchange')
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id)
currency_id = fields.Many2one('res.currency', related='company_id.currency_id', track_visibility='onchange')
order_number = fields.Char(string='Purchase order number', track_visibility='onchange')
order_copies_number = fields.Integer(string='Number of copies ordered', track_visibility='onchange')
billed_to = fields.Selection([
('0', 'Wess International'),
('1', 'Qwarie Ltd'),
('2', 'Qwarie EMEA')], string='Billed to', track_visibility='onchange')
proposed_delivery_date = fields.Date(string='Proposed delivery date', track_visibility='onchange')
delivered_to = fields.Many2one('res.partner', string='Delivery to', track_visibility='onchange')
delivery_confirmed = fields.Datetime(string='Delivery confirmation', track_visibility='onchange')
event_service = fields.Many2one('product.template', 'Product', track_visibility='onchange')
qwarie_course_id = fields.Integer(string='Qwarie Course ID', default=_default_course_id)
tracking_id = fields.Char(string='Tracking ID', track_visibility='onchange')
tracking_link = fields.Char(string='Tracking Link', track_visibility='onchange')
total_docs = fields.Integer(string='Total number of documents', track_visibility='onchange')
paper_size = fields.Char(string='Paper Size', track_visibility='onchange')
print_sides = fields.Selection([
('single', 'Single sided'),
('double', 'Double sided')], string='Printed Sides', track_visibility='onchange')
ink_colour = fields.Selection([
('black', 'Black & White'),
('colour', 'Colour'),
('both', ('Colour and Black & White'))], string='Ink colour', track_visibility='onchange')
paper_colour = fields.Char(string='Paper Colour', track_visibility='onchange')
paper_finish = fields.Char(string='Paper Finish', track_visibility='onchange')
paper_weight = fields.Char(string='Paper Weight', track_visibility='onchange')
binding_type = fields.Char(string='Binding type', track_visibility='onchange')
binding_color = fields.Char(string='Binding Colour', track_visibility='onchange')
binding_position = fields.Char(string='Binding Position', track_visibility='onchange')
hole_punching = fields.Char(string='Hole Punching', track_visibility='onchange')
folding = fields.Char(string='Folding', track_visibility='onchange')
protection = fields.Char(string='Protection', track_visibility='onchange')
cover = fields.Char(string='Cover', track_visibility='onchange')
print_material_url = fields.Char(string='Print material URL', track_visibility='onchange')
responsible_person = fields.Char(string='Responsible person', track_visibility='onchange')
responsible_email = fields.Char(string='Responsible email', track_visibility='onchange')
day_begin = fields.Char(string='Start Day', compute='get_day_begin')
day_end = fields.Char(string='End Day', compute='get_day_end')
month_begin = fields.Char(string='Training Month', compute='get_month_begin')
month_end = fields.Char(string='Training Month', compute='get_month_end')
year_begin = fields.Char(string='Training year', compute='get_year_begin')
year_end = fields.Char(string='Training year', compute='get_year_end')
month = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
duration = fields.Integer('Duration', required=True, compute='get_duration')
duration_h = fields.Integer('Course Duration', compute='_get_duration')
training_subject = fields.Char('Training Subject')
@api.one
@api.depends('date_begin', 'date_end')
def _get_duration(self):
self.duration_h = self.duration * 7
@api.one
@api.depends('date_begin', 'date_end')
def get_duration(self):
start_date = fields.Date.from_string(self.date_begin)
end_date = fields.Date.from_string(self.date_end)
if start_date and end_date:
duration = end_date - start_date
self.duration = duration.days
@api.model
def get_day_begin(self):
self.day_begin = self.date_begin.split(' ')[0]
self.day_begin = self.day_begin.split('-')[2]
return self.day_begin
@api.model
def get_day_end(self):
self.day_end = self.date_end.split(' ')[0]
self.day_end = self.day_end.split('-')[2]
return self.day_end
@api.model
def get_month_begin(self):
self.month_begin = self.date_begin.split(' ')[0]
self.month_begin = self.month_begin.split('-')[1]
self.month_begin = self.month[int(self.month_begin)-1]
return self.month_begin
@api.model
def get_month_end(self):
self.month_end = self.date_end.split(' ')[0]
self.month_end = self.month_end.split('-')[1]
self.month_end = self.month[int(self.month_end)-1]
return self.month_end
@api.model
def get_year_begin(self):
self.year_begin = self.date_begin.split(' ')[0]
self.year_begin = self.year_begin.split('-')[0]
return self.year_begin
@api.model
def get_year_end(self):
self.year_end = self.date_end.split(' ')[0]
self.year_end = self.year_end.split('-')[0]
return self.year_end
attachment_ids = fields.Many2many('ir.attachment', 'events_ir_attachments_rel', 'event_id', 'attachment_id', 'Attachments')
#custom CTU fields
site_survey = fields.Selection([
('not_required', 'Not Required'),
('ordered', 'Ordered'),
('performed', 'Performed')], string='Site Survey', track_visibility='onchange')
ctu_order = fields.Selection([
('ctu_not_required', 'CTU not required'),
('ctu_without_internet', 'CTU without internet'),
('ctu_with_internet', 'CTU with internet'),
('mi-fi_only', 'Mi-Fi only')], string='CTU Order', default='ctu_not_required', track_visibility='onchange')
ctu_number = fields.Selection([
('none', '0'), ('1', '1'), ('2', '2'),
('3', '3'), ('4', '4'), ('5', '5'),
('6', '6'), ('7', '7'), ('8', '8'),
('9', '9'), ('10', '10')], string='CTU Number', default="none", track_visibility='onchange')
ctu_status = fields.Selection([
('unknown', 'Not known'), ('progress', 'In progress'),
('ready', 'Ready to dispatch'), ('transit', 'In Transit'),
('delivered', 'Delivered')], default='unknown', string='Status', track_visibility='onchange')
#survey links
exam_survey_id = fields.Many2one('survey.survey', string='Training Exam', track_visibility='onchange')
exam_survey_participants_ids = fields.One2many('survey.user_input', string='Participants', compute='_get_exam_participants', readonly=False)
feedback_survey_id = fields.Many2one('survey.survey', string='Training Feedback', track_visibility='onchange')
feedback_survey_participants_ids = fields.One2many('survey.user_input', string='Participants', compute='_get_feedback_participants', readonly=False)
certificate_id = fields.Many2one('survey.survey', string='Training Certificate', track_visibility='onchange')
certificate_participants_ids = fields.One2many('survey.user_input', string='Participants', compute='_get_certificate_participants', readonly=False)
travel_ids = fields.One2many('event.travel', 'event_id', string='Travel Arrangements', track_visibility='onchange')
accommodation_ids = fields.One2many('event.accommodation', 'event_id', string='Accommodation', track_visibility='onchange')
note_ids = fields.One2many('event.notes', 'event_id', string='Notes', track_visibility='onchange')
@api.model
def _default_event_mail_ids(self):
return False
# return [(0, 0, {
# 'interval_unit': 'now',
# 'interval_type': 'after_sub',
# 'template_id': self.env.ref('qwarie_customization.training_subscription')
# })]
@api.multi
@api.depends('name', 'date_begin', 'date_end')
def name_get(self):
result = []
for event in self:
date_begin = fields.Datetime.from_string(event.date_begin)
date_end = fields.Datetime.from_string(event.date_end)
dates = [fields.Date.to_string(fields.Datetime.context_timestamp(event, dt)) for dt in [date_begin, date_end] if dt]
dates = sorted(set(dates))
dates = [fields.Datetime.from_string(date).strftime('%a, %d %b %Y') for date in dates]
result.append((event.id, '{course} {dates}'.format(course=event.name, dates=' - '.join(dates))))
return result
@api.model
def create(self, vals):
res = super(event_event, self).create(vals)
if res.organizer_id:
res.message_unsubscribe([res.organizer_id.id])
return res
@api.multi
def write(self, vals):
# exam and feedback ids are computed one2many fields
# they are not store(allows for a more dynamic domain)
# unlink operations must be done manually
if vals.get('feedback_survey_participants_ids'):
for survey in vals.get('feedback_survey_participants_ids'):
operation, input_id, boolVal = survey
if operation == 2: # unlink operation id
user_input = self.env['survey.user_input'].browse(input_id)
user_input.unlink()
if vals.get('exam_survey_participants_ids'):
for survey in vals.get('exam_survey_participants_ids'):
operation, input_id, boolVal = survey
if operation == 2: # unlink operation id
user_input = self.env['survey.user_input'].browse(input_id)
user_input.unlink()
if vals.get('certificate_participants_ids'):
for survey in vals.get('certificate_participants_ids'):
operation, input_id, boolVal = survey
if operation == 2: # unlink operation id
user_input = self.env['survey.user_input'].browse(input_id)
user_input.unlink()
# when changing the course exam
if vals.get('exam_survey_id'):
for delegate in self.registration_ids:
# remove delagetes from the previous survey
delegate_survey = self.env['survey.user_input'].search([
('survey_id', '=', self.exam_survey_id.id),
('event_id', '=', self.id),
('participant_name', '=', delegate.name),
('email', '=', delegate.email)])
if delegate_survey:
delegate_survey.unlink()
# create new entry for delegate to the new survey
token = uuid.uuid4().__str__()
self.env['survey.user_input'].create({
'survey_id': vals['exam_survey_id'],
'event_id': self.id,
'date_create': datetime.now(),
'type': 'link',
'state': 'new',
'token': token,
'participant_name': delegate.name,
'email': delegate.email
})
# when changing the course feedback
if vals.get('feedback_survey_id'):
for delegate in self.registration_ids:
# remove delagetes from the previous survey
delegate_survey = self.env['survey.user_input'].search([
('survey_id', '=', self.feedback_survey_id.id),
('event_id', '=', self.id),
('participant_name', '=', delegate.name),
('email', '=', delegate.email)])
if delegate_survey:
delegate_survey.unlink()
# create new entry for delegate to the new survey
token = uuid.uuid4().__str__()
self.env['survey.user_input'].create({
'survey_id': vals['feedback_survey_id'],
'event_id': self.id,
'date_create': datetime.now(),
'type': 'link',
'state': 'new',
'token': token,
'participant_name': delegate.name,
'email': delegate.email
})
# when changing the course feedback
if vals.get('certificate_id'):
for delegate in self.registration_ids:
# remove delagetes from the previous survey
delegate_survey = self.env['survey.user_input'].search([
('survey_id', '=', self.certificate_id.id),
('event_id', '=', self.id),
('participant_name', '=', delegate.name),
('email', '=', delegate.email)])
if delegate_survey:
delegate_survey.unlink()
# create new entry for delegate to the new survey
token = uuid.uuid4().__str__()
self.env['survey.user_input'].create({
'survey_id': vals['certificate_id'],
'event_id': self.id,
'date_create': datetime.now(),
'type': 'link',
'state': 'new',
'token': token,
'participant_name': delegate.name,
'email': delegate.email
})
res = super(event_event, self).write(vals)
return res
@api.one
def email_survey(self):
survey_type = self.env.context.get('survey_type')
if survey_type == 'exam':
survey_id = self.exam_survey_id.id
else:
survey_id = self.feedback_survey_id.id
# send survey via email to every enrolled delegate
for delegate in self.registration_ids:
delegate_survey = self.env['survey.user_input'].search([
('survey_id', '=', survey_id),
('event_id', '=', self.id),
('participant_name', '=', delegate.name),
('email', '=', delegate.email)], limit=1, order="id desc")
if delegate_survey and delegate_survey.token:
delegate_survey.email_survey()
@api.multi
def view_exam_results(self):
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'target': 'new',
'url': '{url}/{id}'.format(url=self.exam_survey_id.result_url, id=self.id)
}
@api.multi
def view_feedback_results(self):
self.ensure_one()
return {
'type': 'ir.actions.act_url',
'name': 'Course Feedback Results',
'target': 'new',
'url': '{url}/{id}'.format(url=self.feedback_survey_id.result_url, id=self.id)
}
class event_registration(models.Model):
_inherit = 'event.registration'
email = fields.Char(string='E-mail', readonly=False)
name = fields.Char(string='Attendee Name', index=True)
event_id = fields.Many2one(
'event.event', string='Event', required=False,
readonly=True, states={'draft': [('readonly', False)]})
@api.model
def create(self, vals):
# strip spaces from front and back... people are lazy
if vals.get('email'):
vals['email'] = vals['email'].strip()
if vals.get('name'):
vals['name'] = vals['name'].strip()
# when adding delegate, also add them as participants to exam
res = super(event_registration, self).create(vals)
# nameParts = res.name.split(' ')
# [firstName, lastName] = [nameParts[0], nameParts[len(nameParts) - 1] if len(nameParts) > 1 else False]
# registration = self.env['mail.mass_mailing.contact'].search([('name', '=', firstName), ('last_name', '=', lastName),('email', '=', res.email), ('list_id', '=', 2)])
# if len(registration) == 0:
# self.env['mail.mass_mailing.contact'].create({
# 'list_id': 2,
# 'name': firstName,
# 'last_name': lastName,
# 'email': res.email,
# })
if res.event_id.exam_survey_id:
token = uuid.uuid4().__str__()
self.env['survey.user_input'].create({
'survey_id': res.event_id.exam_survey_id.id,
'event_id': res.event_id.id,
'date_create': datetime.now(),
'type': 'link',
'state': 'new',
'token': token,
'participant_name': res.name,
'email': res.email
})
if res.event_id.feedback_survey_id:
token = uuid.uuid4().__str__()
self.env['survey.user_input'].create({
'survey_id': res.event_id.feedback_survey_id.id,
'event_id': res.event_id.id,
'date_create': datetime.now(),
'type': 'link',
'state': 'new',
'token': token,
'participant_name': res.name,
'email': res.email
})
return res
@api.multi
def unlink(self):
# when removing delegates also remove them as participants to exam and feedback
for delegate in self:
if (delegate.event_id.exam_survey_id):
delegate_survey = self.env['survey.user_input'].search([
('survey_id', '=', delegate.event_id.exam_survey_id.id),
('event_id', '=', delegate.event_id.id),
('participant_name', '=', delegate.name),
('email', '=', delegate.email)])
if (delegate_survey):
delegate_survey.unlink()
if (delegate.event_id.feedback_survey_id):
delegate_survey = self.env['survey.user_input'].search([
('survey_id', '=', delegate.event_id.feedback_survey_id.id),
('event_id', '=', delegate.event_id.id),
('participant_name', '=', delegate.name),
('email', '=', delegate.email)])
if (delegate_survey.id):
delegate_survey.unlink()
registration = self.env['event.registration'].search([('name', '=', delegate.name), ('email', '=', delegate.email)])
nameParts = delegate.name.split(' ')
[firstName, lastName] = [nameParts[0], nameParts[len(nameParts) - 1] if len(nameParts) > 1 else False]
subscription = self.env['mail.mass_mailing.contact'].search([('name', '=', firstName), ('last_name', '=', lastName), ('email', '=', delegate.email), ('list_id', '=', 2)])
if len(subscription) > 0 and len(registration) == 1:
subscription.unlink()
return super(event_registration, self).unlink()
@api.multi
def write(self, vals):
# when modifying delegates name/email also modify their entry in the to exam and feedback
if vals.get('name') or vals.get('email'):
change = {}
if vals.get('name'):
change['participant_name'] = vals['name']
nameParts = self.name.split(' ')
[firstName, lastName] = [nameParts[0], nameParts[len(nameParts) - 1] if len(nameParts) > 1 else False]
newNameParts = vals.get('name').split(' ')
[newFirstName, newLastName] = [nameParts[0], newNameParts[len(newNameParts) - 1] if len(newNameParts) > 1 else False]
subscription = self.env['mail.mass_mailing.contact'].search([('name', '=', firstName), ('last_name', '=', lastName), ('email', '=', self.email), ('list_id', '=', 2)])
if subscription:
subscription.write({
'name': newFirstName,
'last_name': newLastName,
'email': vals.get('email') or self.email,
})
if vals.get('email'):
change['email'] = vals['email']
if (self.event_id.exam_survey_id):
delegate_survey = self.env['survey.user_input'].search([
('survey_id', '=', self.event_id.exam_survey_id.id),
('event_id', '=', self.event_id.id),
('participant_name', '=', self.name),
('email', '=', self.email)
])
if delegate_survey:
delegate_survey.write(change)
if (self.event_id.feedback_survey_id):
delegate_survey = self.env['survey.user_input'].search([
('survey_id', '=', self.event_id.feedback_survey_id.id),
('event_id', '=', self.event_id.id),
('participant_name', '=', self.name),
('email', '=', self.email)
])
if delegate_survey:
delegate_survey.write(change)
res = super(event_registration, self).write(vals)
return res
class event_travel(models.Model):
_name = 'event.travel'
_description = 'Travel Arrangement'
_inherit = ['mail.thread', 'ir.needaction_mixin']
event_id = fields.Many2one('event.event', string='Event', ondelete='cascade', required=True)
name = fields.Text(string='Name')
travel_by = fields.Selection([
('air', 'Airplane'),
('rail', 'Rail'),
('car_private', 'Private Car'),
('car_rental', 'Rental Car')
], string='Travel form', track_visibility='onchange')
travel_type = fields.Selection([('one_way', "One Way Trip"), ('round', 'Round Trip')], string='Status', track_visibility='onchange')
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id)
travel_cost = fields.Monetary(string='Cost of travel', track_visibility='onchange')
currency_id = fields.Many2one('res.currency', related='company_id.currency_id', required=True, store=True)
# Outbound
travel_departure_time = fields.Datetime(string='Departure time', track_visibility='onchange')
travel_arrival_time = fields.Datetime(string='Arrival time', track_visibility='onchange')
outbound_from = fields.Char(string='Leaving From', track_visibility='onchange')
outbound_to = fields.Char(string='Going to', track_visibility='onchange')
outbound_carrier = fields.Char(string='Carrier', track_visibility='onchange')
outbound_flight_number = fields.Char(string='Flight number', track_visibility='onchange')
outbound_last_checkin = fields.Datetime(string='Last check-in', track_visibility='onchange')
# Inbound
inbound_travel_departure_time = fields.Datetime(string='Departure time', track_visibility='onchange')
inbound_travel_arrival_time = fields.Datetime(string='Arrival time', track_visibility='onchange')
inbound_from = fields.Char(string='Leaving From', track_visibility='onchange')
inbound_to = fields.Char(string='Going to', track_visibility='onchange')
inbound_carrier = fields.Char(string='Carrier', track_visibility='onchange')
inbound_flight_number = fields.Char(string='Flight number', track_visibility='onchange')
inbound_last_checkin = fields.Datetime(string='Last check-in', track_visibility='onchange')
# type: air
outbound_departure_airport_id = fields.Many2one('airport.airport', string='Departure airport')#, track_visibility='onchange')
outbound_arrival_airport_id = fields.Many2one('airport.airport', string='Arrival airport')#, track_visibility='onchange')
inbound_departure_airport_id = fields.Many2one('airport.airport', string='Departure airport')#, track_visibility='onchange')
inbound_arrival_airport_id = fields.Many2one('airport.airport', string='Arrival airport')#, track_visibility='onchange')
# type: rail
outbound_rail_class = fields.Char(string='Class', track_visibility='onchange')
inbound_rail_class = fields.Char(string='Class', track_visibility='onchange')
rail_discount = fields.Char(string='Railcards Discount', track_visibility='onchange')
#type: ride
car_type = fields.Char(string='Car type', track_visibility='onchange')
car_company = fields.Char(string='Rental car company', track_visibility='onchange')
travel_notes = fields.Text(string='Notes', track_visibility='onchange')
@api.model
def create(self, vals):
event = self.env['event.event'].browse(vals['event_id'])
vals['name'] = '{course} ({date_start} - {date_end}) Travel Arrangement'.format(
course=event.name,
date_start=fields.Datetime.from_string(event.date_begin).strftime('%a, %d %b %Y'),
date_end=fields.Datetime.from_string(event.date_end).strftime('%a, %d %b %Y'))
res = super(event_travel, self).create(vals)
event.message_post(type="comment", subtype='mail.mt_note', notify=True, body='Travel Arrangement added')
for follower in event.message_follower_ids:
res.message_subscribe(partner_ids=[follower.partner_id.id], subtype_ids=[subtype.id for subtype in follower.subtype_ids])
return res
class event_accommodation(models.Model):
_name = 'event.accommodation'
_description = 'Event Accommodation'
_inherit = ['mail.thread', 'ir.needaction_mixin']
event_id = fields.Many2one('event.event', string='Event', ondelete='cascade', required=True)
name = fields.Text(string='Name')
accommodation_type = fields.Selection([
('5star', '5 star Hotel'),
('4star', '4 star Hotel'),
('3star', '3 star Hotel'),
('2star', '2 star Hotel'),
('1star', '1 star Hotel'),
('airbnb', 'Airbnb')
], string='Accommodation type', track_visibility='onchange')
accommodation_name = fields.Char(string='Property name', track_visibility='onchange')
accommodation_check_in = fields.Datetime(string='Check In', track_visibility='onchange')
accommodation_check_out = fields.Datetime(string='Check Out', track_visibility='onchange')
accommodation_price = fields.Monetary(string='Price', track_visibility='onchange')
accommodation_status = fields.Selection([('booked', "Only Booked"), ('paid', 'Paid')], string='Status', track_visibility='onchange')
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id)
currency_id = fields.Many2one('res.currency', related='company_id.currency_id', required=True, store=True)
accommodation_notes = fields.Text(string='Notes', track_visibility='onchange')
@api.model
def create(self, vals):
event = self.env['event.event'].browse(vals['event_id'])
vals['name'] = '{course} ({date_start} - {date_end}) Accommodation'.format(
course=event.name,
date_start=fields.Datetime.from_string(event.date_begin).strftime('%a, %d %b %Y'),
date_end=fields.Datetime.from_string(event.date_end).strftime('%a, %d %b %Y'))
res = super(event_accommodation, self).create(vals)
event.message_post(type="comment", subtype='mail.mt_note', notify=True, body='Accommodation added')
for follower in event.message_follower_ids:
res.message_subscribe(partner_ids=[follower.partner_id.id], subtype_ids=[subtype.id for subtype in follower.subtype_ids])
return res
class event_notes(models.Model):
_name = 'event.notes'
_description = 'Event Note'
_inherit = ['mail.thread', 'ir.needaction_mixin']
name = fields.Text(string='Name')
event_id = fields.Many2one('event.event', string='Event', ondelete='cascade', required=True)
description = fields.Text(string="Description", required=True, track_visibility='onchange')
@api.model
def create(self, vals):
event = self.env['event.event'].browse(vals['event_id'])
vals['name'] = '{course} ({date_start} - {date_end}) Note'.format(
course=event.name,
date_start=fields.Datetime.from_string(event.date_begin).strftime('%a, %d %b %Y'),
date_end=fields.Datetime.from_string(event.date_end).strftime('%a, %d %b %Y'))
res = super(event_notes, self).create(vals)
event.message_post(type="comment", subtype='mail.mt_note', notify=True, body='Note added')
for follower in event.message_follower_ids:
res.message_subscribe(partner_ids=[follower.partner_id.id], subtype_ids=[subtype.id for subtype in follower.subtype_ids])
return res
class calendar_event(models.Model):
_inherit = 'calendar.event'
qw_event_id = fields.Many2one('event.event', string='Related Event', track_visibility='onchange')
|
[
"noreply@github.com"
] |
noreply@github.com
|
d668a2191518d04bcc9df704986600a8dfe6d936
|
d6088b0160d1c0fc70a9c8a865a0825141703ea9
|
/urls.py
|
197d17e671d1a43dcc2f6d02d5eed97cbeb64ec3
|
[] |
no_license
|
pattern/simple-django-application
|
1067cbc912e19d6e381882d5927b0d425c970239
|
32eb8f9f10c2cea21976dedf116efd72c16379d5
|
refs/heads/master
| 2020-05-16T02:05:52.367785
| 2011-08-11T19:14:04
| 2011-08-11T19:14:04
| 2,125,199
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
from django.conf.urls.defaults import patterns, include, url
from simple-django-application.views import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', home, name='home'),
# url(r'^simple-django-application/', include('simple-django-application.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
[
"pickelman@gmail.com"
] |
pickelman@gmail.com
|
55b6530372d2b8c8b937644e163e34066b11837f
|
f7db1796a9430e4c23e3368be2af49cd533c14bd
|
/hidef/logger.py
|
4e229eae8ae32e57d6fdeab2d13535762a30b4f4
|
[
"BSD-3-Clause"
] |
permissive
|
fanzheng10/HiDeF
|
145640898d75a34bb6fae0b78987697a2ec258f7
|
bf2f1a67b21fdaa375a5c1d428004cd7c9686341
|
refs/heads/master
| 2023-05-14T18:24:51.041018
| 2023-05-09T15:09:59
| 2023-05-09T15:09:59
| 347,454,671
| 7
| 5
|
BSD-3-Clause
| 2023-05-09T15:10:00
| 2021-03-13T18:59:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 10,547
|
py
|
"""This module defines class that can be used a package wide logger."""
import sys
import math
import time
import os.path
import logging
import datetime
import logging.handlers
import numbers
__all__ = ['PackageLogger', 'LOGGING_LEVELS']
LOGGING_PROGRESS = logging.INFO + 5
LOGGING_LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'progress': LOGGING_PROGRESS,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'none': logging.CRITICAL}
LOGGING_INVERSE = {}
for key, value in LOGGING_LEVELS.items(): # PY3K: OK
LOGGING_INVERSE[value] = key
now = datetime.datetime.now
class PackageLogger(object):
"""A class for package wide logging functionality."""
def __init__(self, name, **kwargs):
"""Start logger for the package. Returns a logger instance.
:arg prefix: prefix to console log messages, default is ``'@> '``
:arg console: log level for console (``sys.stderr``) messages,
default is ``'debug'``
:arg info: prefix to log messages at *info* level
:arg warning: prefix to log messages at *warning* level, default is
``'WARNING '``
:arg error: prefix to log messages at *error* level, default is
``'ERROR '``
"""
self._level = logging.DEBUG
self._logger = logger = logging.getLogger(name)
logger.setLevel(self._level)
for handler in logger.handlers:
handler.close()
logger.handlers = []
console = logging.StreamHandler()
console.setLevel(LOGGING_LEVELS[kwargs.get('console', 'debug')])
logger.addHandler(console)
self.prefix = kwargs.get('prefix', '@> ')
self._info = kwargs.get('info', '')
self._warning = kwargs.get('warning', 'WARNING ')
self._error = kwargs.get('error', 'ERROR ')
self._n = None
self._last = None
self._barlen = None
self._prev = None
self._line = None
self._times = {}
self._n_progress = 0
# ====================
# Attributes
# ====================
def _getverbosity(self):
return LOGGING_INVERSE.get(self._logger.handlers[0].level)
def _setverbosity(self, level):
lvl = LOGGING_LEVELS.get(str(level).lower(), None)
if lvl is None:
self.warn('{0} is not a valid log level.'.format(level))
else:
self._logger.handlers[0].level = lvl
self._level = lvl
verbosity = property(_getverbosity, _setverbosity, doc=
"""Verbosity *level* of the logger, default level is **debug**. Log
messages are written to ``sys.stderr``. Following logging levers are
recognized:
======== =============================================
Level Description
======== =============================================
debug Everything will be printed to the sys.stderr.
info Only brief information will be printed.
warning Only warning messages will be printed.
none Nothing will be printed.
======== =============================================""")
def _getprefix(self):
return self._prefix
def _setprefix(self, prefix):
self._prefix = str(prefix)
prefix += '%(message)s'
self._logger.handlers[0].setFormatter(logging.Formatter(prefix))
prefix = property(_getprefix, _setprefix, doc='String prepended to console'
' log messages.')
# ====================
# Logging methods
# ====================
def info(self, msg):
"""Log *msg* with severity 'INFO'."""
self.clear()
self._logger.info(msg)
def critical(self, msg):
"""Log *msg* with severity 'CRITICAL'."""
self.clear()
self._logger.critical(msg)
def debug(self, msg):
"""Log *msg* with severity 'DEBUG'."""
self.clear()
self._logger.debug(msg)
def warning(self, msg):
"""Log *msg* with severity 'WARNING'."""
self.clear()
self._logger.warning(self._warning + msg)
warn = warning
def error(self, msg):
"""Log *msg* with severity 'ERROR' and terminate with status 2."""
self.clear()
self._logger.error(self._error + msg)
self.exit(2)
def write(self, line):
"""Write *line* into ``sys.stderr``."""
self._line = str(line)
if self._level < logging.WARNING:
sys.stderr.write(self._line)
sys.stderr.flush()
def clear(self):
"""Clear current line in ``sys.stderr``."""
if self._level != LOGGING_PROGRESS:
if self._line and self._level < logging.WARNING:
sys.stderr.write('\r' + ' ' * (len(self._line)) + '\r')
self._line = ''
def exit(self, status=0):
"""Exit the interpreter."""
sys.exit(status)
# ====================
# Handlers & logfiles
# ====================
def addHandler(self, hdlr):
"""Add the specified handler to this logger."""
self._logger.addHandler(hdlr)
def getHandlers(self):
"""Returns handlers."""
return self._logger.handlers
def delHandler(self, index):
"""Remove handler at given *index* from the logger instance."""
self._logger.handlers.pop(index)
def start(self, filename, **kwargs):
"""Start a logfile. If *filename* does not have an extension.
:file:`.log` will be appended to it.
:arg filename: name of the logfile
:arg mode: mode in which logfile will be opened, default is "w"
:arg backupcount: number of existing *filename.log* files to
backup, default is 1"""
filename = str(filename)
if os.path.splitext(filename)[1] == '':
filename += '.log'
rollover = False
# if mode='a' is provided, rollover is not performed
if os.path.isfile(filename) and kwargs.get('mode', None) != 'a':
rollover = True
logfile = logging.handlers.RotatingFileHandler(filename,
mode=kwargs.get('mode', 'a'), maxBytes=0,
backupCount=kwargs.get('backupcount', 1))
logfile.setLevel(LOGGING_LEVELS[kwargs.get('loglevel', 'debug')])
logfile.setFormatter(logging.Formatter('%(message)s'))
self.info("Logging into file: {0}".format(filename))
self._logger.addHandler(logfile)
if rollover:
logfile.doRollover()
self.info("Logging started at {0}".format(str(now())))
def close(self, filename):
"""Close logfile *filename*."""
filename = str(filename)
if os.path.splitext(filename)[1] == '':
filename += '.log'
for index, handler in enumerate(self.getHandlers()):
if isinstance(handler, logging.handlers.RotatingFileHandler):
if handler.stream.name in (filename,os.path.abspath(filename)):
self.info("Logging stopped at {0}".format(str(now())))
handler.close()
self.delHandler(index)
self.info("Closing logfile: {0}".format(filename))
return
self.warning("Logfile '{0}' was not found.".format(filename))
# ====================
# Progress and timing
# ====================
def progress(self, msg, steps, label=None, **kwargs):
"""Instantiate a labeled process with message and number of steps."""
assert isinstance(steps, numbers.Integral) and steps > 0, \
'steps must be a positive integer'
self._steps = steps
self._last = 0
self._times[label] = time.time()
self._prev = (-1, 0)
self._msg = msg
self._line = ''
if not hasattr(self, '_verb'):
self._verb = self._getverbosity()
self._setverbosity('progress')
self._n_progress += 1
def update(self, step, msg=None, label=None):
"""Update progress status to current line in the console."""
assert isinstance(step, numbers.Integral), 'step must be a positive integer'
if msg is not None:
self._msg = msg
n = self._steps
i = step
if self._level < logging.WARNING and n > 0 and i <= n and \
i > self._last:
start = self._times[label]
self._last = i
percent = 100 * i / n
#if percent > 3:
seconds = int(math.ceil((time.time()-start) * (n-i)/i))
prev = (percent, seconds)
#else:
#prev = (percent, 0)
#if self._prev == prev:
# return
sys.stderr.write('\r' + ' ' * (len(self._line)) + '\r')
#if percent > 3:
line = self._prefix + self._msg + ' [%3d%%] %ds' % (percent, seconds)
#else:
# line = self._prefix + self._msg + ' [%3d%%]' % percent
sys.stderr.write(line)
sys.stderr.flush()
self._prev = prev
self._line = line
def finish(self):
self._n_progress -= 1
if self._n_progress < 0:
self._n_progress = 0
if self._n_progress == 0:
if hasattr(self, '_verb'):
self._setverbosity(self._verb)
del self._verb
self.clear()
def sleep(self, seconds, msg=''):
"""Sleep for seconds while updating screen message every second.
Message will start with ``'Waiting for Xs '`` followed by *msg*."""
msg = str(msg)
for second in range(int(seconds), 0, -1):
self.write('Waiting for {0}s {1}'.format(second, msg))
time.sleep(1)
self.clear()
def timeit(self, label=None):
"""Start timing a process. Use :meth:`timing` and :meth:`report` to
learn and report timing, respectively."""
self._times[label] = time.time()
def timing(self, label=None):
"""Returns timing for a labeled or default (**None**) process."""
return time.time() - self._times.get(label, 0)
def report(self, msg='Completed in %.2fs.', label=None):
"""Write *msg* with timing information for a labeled or default process
at *debug* logging level."""
self.debug(msg % (time.time() - self._times[label]))
|
[
"shz66@pitt.edu"
] |
shz66@pitt.edu
|
ce9aad8e1b77a986dc7ea57c08f77bee0d66e97c
|
600191b4a3106c27f9c73bb5cd7df5c1e3d377f4
|
/Webcam-Face-Detect/webcam.py
|
98a736c64ae4944d1aadf8e87d2f1d444d2434f7
|
[] |
no_license
|
migueleci/Voice-Recognition
|
79c8ea3f3df3a38167e6d42d48756caa24dfa924
|
af21ee1e6346830ea416399d7624a84a6262dee9
|
refs/heads/master
| 2021-01-10T06:31:05.035623
| 2015-10-08T04:26:57
| 2015-10-08T04:26:57
| 43,508,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,237
|
py
|
import cv2
import sys
import os
import numpy as np
from PIL import Image
#cascPath = sys.argv[1]
cascPath = '/home/lenovo/Documentos/Webcam-Face-Detect/haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
recognizer = cv2.createLBPHFaceRecognizer()
recognizer1 = cv2.createEigenFaceRecognizer()
#recognizer2 = cv2.createFisherFaceRecognizer()
def get_images_and_labels2(path):
image_paths = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]
images = []
labels = []
count = 2000
for image_path in image_paths:
counter = 0
image_pil = Image.open(image_path).convert('L')
image2 = cv2.imread(image_path)
image = np.array(image_pil, 'uint8')
gray = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
name=int(os.path.split(image_path)[1].split(".")[0])
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(60, 60),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
images.append(image[y: y + h, x: x + w])
counter +=1
count += 1
labels.append(name*10+counter)
cv2.cv.SaveImage('{0}.jpg'.format(count), cv2.cv.fromarray(image[y: y + h, x: x + w]))
print (str(name*10+counter))
cv2.waitKey(150)
return images, labels
def get_images_and_labels(path):
# Append all the absolute image paths in a list image_paths
# We will not read the image with the .sad extension in the training set
# Rather, we will use them to test our accuracy of the training
image_paths = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]
# images will contains face images
images = []
# labels will contains the label that is assigned to the image
labels = []
for image_path in image_paths:
counter = 0
# Read the image and convert to grayscale
image_pil = Image.open(image_path).convert('L')
# Convert the image format into numpy array
image = np.array(image_pil, 'uint8')
# Get the label of the image
nbr = int(os.path.split(image_path)[1].split(".")[0])
# Detect the face in the image
faces = faceCascade.detectMultiScale(image)
# If face is detected, append the face to images and the label to labels
for (x, y, w, h) in faces:
images.append(image[y: y + h, x: x + w])
counter+=1
labels.append(nbr*10+counter)
cv2.imshow("Adding faces to traning set...", image[y: y + h, x: x + w])
cv2.waitKey(50)
# return the images list and labels list
return images, labels
def recognize(path):
image_paths = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]
for image_path in image_paths:
predict_image_pil = Image.open(image_path).convert('L')
predict_image = np.array(predict_image_pil, 'uint8')
faces = faceCascade.detectMultiScale(predict_image)
for (x, y, w, h) in faces:
nbr_actual = os.path.split(image_path)[1].split(".")[0]
nbr_predicted, conf = recognizer.predict(predict_image[y: y + h, x: x + w])
print nbr_predicted, conf, nbr_actual
#nbr_predicted, conf = recognizer1.predict(predict_image[y: y + h, x: x + w])
#print nbr_predicted, conf, nbr_actual
#nbr_predicted, conf = recognizer2.predict(predict_image[y: y + h, x: x + w])
#print nbr_predicted, conf, nbr_actual
'''
if nbr_actual == nbr_predicted:
print "{} is Correctly Recognized with confidence {}".format(nbr_actual, conf)
else:
print "{} is Incorrectly Recognized as {}".format(nbr_actual, nbr_predicted)'''
cv2.imshow("Recognizing Face", predict_image[y: y + h, x: x + w])
cv2.waitKey(1000)
def webcam():
found = False
counter = 100
counter2 = 9900
while not found:
# Capture frame-by-frame
ret, frame = video_capture.read()
#cv2.cv.SaveImage('{0}.jpg'.format(counter2), cv2.cv.fromarray(frame))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(60, 60),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
nbr_predicted, conf = recognizer.predict(gray[y: y + h, x: x + w])
cv2.cv.SaveImage('{0}.jpg'.format(counter), cv2.cv.fromarray(gray[y: y + h, x: x + w]))
counter += 1
print nbr_predicted, conf
if conf < 35:
found = True
cv2.imshow('Identified', gray[y: y + h, x: x + w])
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
def database():
pathDB = '/home/lenovo/Documentos/Webcam-Face-Detect/DB/'
pathP = '/home/lenovo/Documentos/Webcam-Face-Detect/Photos/'
# The folder yalefaces is in the same folder as this python script
# Call the get_images_and_labels function and get the face images and the
# corresponding labels
#images, labels = get_images_and_labels(pathDB)
images, labels = get_images_and_labels2(pathDB)
cv2.destroyAllWindows()
recognizer.train(images, np.array(labels))
#recognizer1.train(images, np.array(labels))
#recognizer2.train(images, np.array(labels))
#recognize(pathP)
webcam()
database()
|
[
"miguelromero@MacBook-Air-de-Miguel.local"
] |
miguelromero@MacBook-Air-de-Miguel.local
|
acd4fb11db9c4c18e77570e8ced8ad3bfa510852
|
b5eba8d20db69624298bffba1c0572553174a9d7
|
/kattis/SolvingCarrots.py
|
adc50c5d88d1fb394172f76acccf236c681d6fe6
|
[] |
no_license
|
kvntma/coding-practice
|
a7ef1bde165df8ec3932a2f863b37c8f44de87a4
|
9e568e479b6cac144b1c7c87b1232093f2209090
|
refs/heads/master
| 2021-07-21T21:14:18.783812
| 2020-10-24T18:30:57
| 2020-10-24T18:30:57
| 225,339,540
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
import sys
carrots = sys.stdin.readline().split()
P = carrots[1]
print(P)
|
[
"kvntma@gmail.com"
] |
kvntma@gmail.com
|
7db2d4f92504ff0979701ebbff1d9799fdd40f3a
|
d5919e45e00543ea6abf1a65647fe9a32af9dc1e
|
/processmysteps/default_config.py
|
957dd940b6d9322e2868adb91f570021fc197113
|
[
"MIT"
] |
permissive
|
ruipgil/ProcessMySteps
|
4f899c297f0ad09403f9bab3aa601b5275163039
|
39bf630895630f95129a455c8dd4d944f5ac1a06
|
refs/heads/master
| 2021-01-16T23:37:37.073841
| 2017-05-01T21:27:01
| 2017-05-01T21:27:01
| 55,251,842
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
"""
Base line settings
"""
CONFIG = {
'input_path': None,
'backup_path': None,
'output_path': None,
'life_path': None,
'life_all': None,
'db': {
'host': None,
'port': None,
'name': None,
'user': None,
'pass': None
},
'default_timezone': 1,
'life_annotations': 'all', # all (for stays + trips), stays, trips
'smoothing': {
'use': True,
'algorithm': 'inverse',
'noise': 1000
},
'segmentation': {
'use': True,
'epsilon': 0.01,
'min_time': 60
},
'simplification': {
'use': True,
'max_dist_error': 2.0,
'max_speed_error': 1.0,
'eps': 0.000015
},
'location': {
'use': True,
'max_distance': 20,
'min_samples': 2,
'limit': 5,
'use_google': True,
'google_key': '',
'use_foursquare': True,
'foursquare_client_id': '',
'foursquare_client_secret': ''
},
'transportation': {
'use': True,
'remove_stops': False,
'min_time': 60,
'classifier_path': None
},
'trip_learning': {
'use': True,
'epsilon': 0.0
},
'trip_name_format': '%Y-%m-%d'
}
|
[
"ruipgil@gmail.com"
] |
ruipgil@gmail.com
|
e32b8417c5e29a4c4747ff2195cf73af2b296957
|
b1a3a981f1d7255a583fd04a9ba979d24310ef86
|
/6th-sem/OS/project/short_term_scheduler3.py
|
31453e803d0f4e8749950b906787a6b274281489
|
[] |
no_license
|
asutosh97/college-labs
|
ee2fb48db79370d51ed73a9f9b44960b451a2268
|
7ea6a4fb65785ae6a15efb92e97a7baafb7ac722
|
refs/heads/master
| 2021-09-11T15:35:59.281738
| 2018-04-09T11:00:05
| 2018-04-09T11:00:05
| 99,686,272
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,916
|
py
|
#!/usr/bin/python3
'''
Program to simulate different scheduling alogrithms
'''
from queue import PriorityQueue
from functools import reduce
import sys, operator
def insert_to_ready_queue(ready_queue, criteria, process, priority_counter):
priority_counter += 1
process['rr_priority'] = priority_counter
ready_queue.put((process[criteria], process))
return priority_counter
def set_queue(ready_queue, criteria, processes, start_time, end_time, priority_counter):
# assumes the processes are sorted in the list "processes" in ascending order of arrival time.
# note :- sorting must be stable so that 2 processes with same arrival time then process id lower should be 1st
given_range = range(start_time, end_time + 1)
for process in processes:
if process['arrival_time'] in given_range:
priority_counter = insert_to_ready_queue(ready_queue, criteria, process, priority_counter)
return priority_counter
def base_scheduler_step(processes, ready_queue, result, timer, time_quantum, is_preemptive, criteria, priority_counter, processes_left, gantt_chart):
_, process = ready_queue.get()
runtime = min(time_quantum, process['time_left']) if is_preemptive else process['time_left']
process['time_left'] -= runtime
old_timer = timer
timer += runtime
priority_counter = set_queue(ready_queue, criteria, processes, old_timer + 1, timer, priority_counter)
gantt_chart.append({'id': process['id'], 'start_time': old_timer, 'end_time': timer})
if process['time_left'] == 0:
process['turn_around_time'] = timer - process['arrival_time']
process['waiting_time'] = process['turn_around_time'] - process['burst_time']
processes_left -= 1
result.append(process)
else:
priority_counter = insert_to_ready_queue(ready_queue, criteria, process, priority_counter)
return priority_counter, timer, processes_left, gantt_chart
def fcfs_step(processes, ready_queue, result, timer, processes_left, gantt_chart, time_quantum, priority_counter):
return base_scheduler_step(processes, ready_queue, result, timer, 1, False, 'arrival_time', 0, processes_left, gantt_chart)
def sjf_step(processes, ready_queue, result, timer, processes_left, gantt_chart, time_quantum, priority_counter):
return base_scheduler_step(processes, ready_queue, result, timer, 1, False, 'burst_time', 0, processes_left, gantt_chart)
def srtf_step(processes, ready_queue, result, timer, processes_left, gantt_chart, time_quantum, priority_counter):
return base_scheduler_step(processes, ready_queue, result, timer, 1, True, 'time_left', 0, processes_left, gantt_chart)
def rr_step(processes, ready_queue, result, timer, processes_left, gantt_chart, time_quantum, priority_counter):
return base_scheduler_step(processes, ready_queue, result, timer, time_quantum, True, 'rr_priority', priority_counter, processes_left, gantt_chart)
def priority_non_preemptive_step(processes, ready_queue, result, timer, processes_left, gantt_chart, time_quantum, priority_counter):
return base_scheduler_step(processes, ready_queue, result, timer, 1, False, 'priority_value', 0, processes_left, gantt_chart)
def priority_preemptive_step(processes, ready_queue, result, timer, processes_left, gantt_chart, time_quantum, priority_counter):
return base_scheduler_step(processes, ready_queue, result, timer, 1, True, 'priority_value', 0, processes_left, gantt_chart)
def scheduling_algo_mapper(scheduling_algo):
return {
'fcfs': {'criteria': 'arrival_time', 'step_function': fcfs_step},
'sjf': {'criteria': 'burst_time', 'step_function': sjf_step},
'srtf': {'criteria': 'burst_time', 'step_function': srtf_step},
'rr': {'criteria': 'rr_priority', 'step_function': rr_step},
'priority_non_preemptive': {'criteria': 'priority_value', 'step_function': priority_non_preemptive_step},
'priority_preemptive': {'criteria': 'priority_value', 'step_function': priority_preemptive_step}
}[scheduling_algo]
def MLFQ_base(n_queues, queue_details, processes, time0):
# set processes of each level in the MLFQ
processes_copy = processes[:]
processes_set = []
for i in range(n_queues - 1):
processes_set.append(list(filter(queue_details[i]["condition"], processes_copy)))
processes_copy = [process for process in processes_copy if process not in processes_set[i]]
processes_set.append(processes_copy)
result = []
ready_queue = [PriorityQueue() for _ in range(n_queues)]
priority_counter = 0
gantt_chart = []
timer = time0
# setting up queues
for idx in range(n_queues):
processes_set[idx].sort(key = lambda process: process['arrival_time'])
priority_counter = set_queue(ready_queue[idx], scheduling_algo_mapper(queue_details[idx]['scheduling_algo'])['criteria'], processes_set[idx], 0, timer, 0)
processes_left = len(processes)
while processes_left:
entered_for = False
for idx in range(n_queues):
if not ready_queue[idx].empty():
entered_for = True
priority_counter, timer_new, processes_left, gantt_chart = scheduling_algo_mapper(queue_details[idx]['scheduling_algo'])['step_function'](processes_set[idx], ready_queue[idx], result, timer, processes_left, gantt_chart, queue_details[idx]['time_quantum'], priority_counter)
for step_idx in range(1, n_queues):
next_idx = (idx + step_idx) % n_queues
priority_counter = set_queue(ready_queue[next_idx], scheduling_algo_mapper(queue_details[next_idx]['scheduling_algo'])['criteria'], processes_set[next_idx], timer + 1, timer_new, priority_counter)
timer = timer_new
break
if not entered_for:
timer += 1
for idx in range(n_queues):
priority_counter = set_queue(ready_queue[idx], scheduling_algo_mapper(queue_details[idx]['scheduling_algo'])['criteria'], processes_set[idx], timer, timer, priority_counter)
return result, gantt_chart, timer
def base_scheduler(processes, scheduling_algo, time_quantum, time0):
queue_detail = [{'scheduling_algo': scheduling_algo, 'condition': lambda x: True, 'time_quantum': time_quantum}]
return MLFQ_base(1, queue_detail, processes, time0)
def fcfs(processes, time0):
return base_scheduler(processes, 'fcfs', 1, time0)
def sjf(processes, time0):
return base_scheduler(processes, 'sjf', 1, time0)
def srtf(processes, time0):
return base_scheduler(processes, 'srtf', 1, time0)
def rr(processes, time0):
return base_scheduler(processes, 'rr', int(input('Enter time quantum : ')), time0)
def priority_non_preemptive(processes, time0):
return base_scheduler(processes, 'priority_non_preemptive', 1, time0)
def priority_preemptive(processes, time0):
return base_scheduler(processes, 'priority_preemptive', 1, time0)
def switcher(scheduling_algo, processes):
return {
'1': fcfs,
'2': sjf,
'3': srtf,
'4': rr,
'5': priority_non_preemptive,
'6': priority_preemptive
}[scheduling_algo](processes, 0)
def main():
processes = []
for _ in range(int(input("Enter number of processes :- "))):
print("")
process = {}
process['id'] = _
process['arrival_time'] = int(input("Enter arrival time of p%d:- " % _))
process['burst_time'] = int(input("Enter burst time of p%d:- " % _))
process['priority_value'] = int(input("Enter priority value of p%d:- " % _))
process['time_left'] = process['burst_time']
process['waiting_time'] = 0
process['turn_around_time'] = 0
process['rr_priority'] = 0
processes.append(process)
print("Select your scheduling algorithm")
print("1. FCFS")
print("2. SJF")
print("3. SRTF")
print("4. RR")
print("5. Priority non-preemptive")
print("6. Priority preemptive")
result, gantt_chart, _ = switcher(input(), processes)
result = sorted(result, key=lambda process: process['id'])
print("PID\tAT\tBT\tPV\tTAT\tWT")
for process in result:
print("%d\t%d\t%d\t%d\t%d\t%d\t" % (process['id'], process['arrival_time'], process['burst_time'], process['priority_value'], process['turn_around_time'], process['waiting_time']))
print("")
for process in gantt_chart:
print(process)
if __name__ == "__main__":
main()
|
[
"asutoshsahoo786@gmail.com"
] |
asutoshsahoo786@gmail.com
|
b2b2047436cb6ef21e42f5f64cafea79172a6929
|
f4099d24541d368f30b687cc5004c1b5f92770d6
|
/src/setup.py
|
cbbfcd700c425e504703b66e876524704df54a5b
|
[
"Apache-2.0"
] |
permissive
|
pilgrim2go/synapse-tools
|
2545bf5e32bc9ce4d7f5faf2cad76672faaf6a46
|
e493ead34741c3a9ae2caedd5c1de4aea7181a8b
|
refs/heads/master
| 2021-01-12T15:18:01.950776
| 2016-05-18T03:47:24
| 2016-05-18T03:47:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='synapse-tools',
version='0.10.1',
provides=['synapse_tools'],
author='John Billings',
author_email='billings@yelp.com',
description='Synapse-related tools for use on Yelp machines',
packages=find_packages(exclude=['tests']),
setup_requires=['setuptools'],
include_package_data=True,
install_requires=[
# paasta tools pins this so we really can't have anything higher
# if paasta tools ever does a >= we can relax this constraint
'argparse==1.2.1',
'environment_tools>=1.1.0,<1.2.0',
'plumbum>=1.6.0,<1.7.0',
'psutil>=2.1.1,<2.2.0',
'PyYAML>=3.11,<4.0.0',
'pyroute2>=0.3.4,<0.4.0',
'paasta-tools==0.16.10',
],
entry_points={
'console_scripts': [
'configure_synapse=synapse_tools.configure_synapse:main',
'haproxy_synapse_reaper=synapse_tools.haproxy_synapse_reaper:main',
'synapse_qdisc_tool=synapse_tools.haproxy.qdisc_tool:main',
],
},
)
|
[
"krall@yelp.com"
] |
krall@yelp.com
|
b62b9d12528fab30ba13d52d4ab9d783c4f58689
|
e7c84801d7755806e58795d5fe51f7a924815ffc
|
/python-image-watermark/python-watermark-image.py
|
86d4a5401ae25cc33b68205ae57687d2b72853e3
|
[] |
no_license
|
c0c1/python-image
|
3454b37b3e0339fd3e204a38d7aa14c885e10e38
|
b785801589722571ac7ed8ad4428b4d04f518a2b
|
refs/heads/master
| 2023-06-04T23:36:17.974408
| 2021-06-21T12:38:23
| 2021-06-21T12:38:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
import os, sys
from PIL import Image, ImageDraw, ImageFont
img_dir = "images/non-watermark/"
dirs = os.listdir( img_dir )
for img in dirs:
if os.path.isfile(img_dir + img):
#Create an Image Object from an Image
im = Image.open(img_dir + img)
#Image width and height
width, height = im.size
#Image name
img_name = os.path.basename(img_dir + img)
#print(img_name)
text = "{roytuts.com}"
font = ImageFont.truetype('arial.ttf', 30)
draw = ImageDraw.Draw(im)
textwidth, textheight = draw.textsize(text, font)
#Right bottom corner with margin 5 from right
margin = 5
#x = width - textwidth - margin
#y = height - textheight - margin
#Center of the image
x = (width - textwidth)/2 #center
y = (height - textheight)/2 #center
#draw.text((x, y), text, font=font)
draw.text((x, y), text, font=font, fill=(254, 130, 75, 15))
#im.show() //Will display in the image window
#Save watermarked image
im.save('images/watermark/' + img_name)
|
[
"email@email.com"
] |
email@email.com
|
6f1547fab3b6b91f274d8e7a04e2ac3e28693ae2
|
3b593b412c663a34784b1f60ad07cd2ee6ef87d1
|
/month01/python base/day12/code03.py
|
19ca59f6f051da2f348473bcdba1941fb51fd14e
|
[] |
no_license
|
ShijieLiu-PR/Python_Learning
|
88694bd44aeed4f8b022202c1065342bd17c26d2
|
ed01cc0956120ea287c51667604db97ff563c829
|
refs/heads/master
| 2023-05-22T16:35:24.252313
| 2021-06-16T10:56:21
| 2021-06-16T10:56:21
| 337,445,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
"""
运算符重载
"""
print("a" + "b")
class Vector:
"""
向量类
"""
def __init__(self, x):
self.x = x
def __add__(self, other):
# self.x += other
# return self
return Vector(self.x + other)
def __str__(self):
return "Vector(%d)" % self.x
v01 = Vector(10)
v02 = v01 + 5
print(id(v01))
print(id(v02))
print(v01)
print(v02)
|
[
"shijie_liu@outlook.com"
] |
shijie_liu@outlook.com
|
5ce7593024740e2adbdcd509987557c207679cb2
|
edb527ecdc408ddbddd9750b0673997644d4011e
|
/evalml/utils/cli_utils.py
|
9416c1110f5422a5e89bfeb2fadc13176a045ae6
|
[
"BSD-3-Clause"
] |
permissive
|
ctwgL/evalml
|
5cf21d6f6ddcb729c0547fb21d1080eec4e3012e
|
b239dbc741bd77228921a677ab52a659a1b16876
|
refs/heads/main
| 2023-07-18T18:42:52.971123
| 2021-09-14T01:12:48
| 2021-09-14T01:12:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,293
|
py
|
import locale
import os
import platform
import struct
import sys
import pkg_resources
import psutil
from psutil._common import bytes2human
import evalml
from evalml.utils import get_logger
def print_info():
"""Prints information about the system, evalml, and dependencies of evalml.
Returns:
None
"""
logger = get_logger(__name__)
logger.info("EvalML version: %s" % evalml.__version__)
logger.info("EvalML installation directory: %s" % get_evalml_root())
print_sys_info()
print_deps()
def print_sys_info():
"""Prints system information.
Returns:
None
"""
logger = get_logger(__name__)
logger.info("\nSYSTEM INFO")
logger.info("-----------")
sys_info = get_sys_info()
for title, stat in sys_info:
logger.info("{title}: {stat}".format(title=title, stat=stat))
def print_deps():
"""Prints the version number of each dependency.
Returns:
None
"""
logger = get_logger(__name__)
logger.info("\nINSTALLED VERSIONS")
logger.info("------------------")
installed_packages = get_installed_packages()
for package, version in installed_packages.items():
logger.info("{package}: {version}".format(package=package, version=version))
# Modified from here
# https://github.com/pandas-dev/pandas/blob/d9a037ec4ad0aab0f5bf2ad18a30554c38299e57/pandas/util/_print_versions.py#L11
def get_sys_info():
"""Returns system information.
Returns:
List of tuples about system stats.
"""
blob = []
try:
(sysname, nodename, release, version, machine, processor) = platform.uname()
blob.extend(
[
("python", ".".join(map(str, sys.version_info))),
("python-bits", struct.calcsize("P") * 8),
("OS", "{sysname}".format(sysname=sysname)),
("OS-release", "{release}".format(release=release)),
("machine", "{machine}".format(machine=machine)),
("processor", "{processor}".format(processor=processor)),
("byteorder", "{byteorder}".format(byteorder=sys.byteorder)),
("LC_ALL", "{lc}".format(lc=os.environ.get("LC_ALL", "None"))),
("LANG", "{lang}".format(lang=os.environ.get("LANG", "None"))),
("LOCALE", ".".join(map(str, locale.getlocale()))),
("# of CPUS", "{cpus}".format(cpus=psutil.cpu_count())),
(
"Available memory",
"{memory}".format(
memory=bytes2human(psutil.virtual_memory().available)
),
),
]
)
except (KeyError, ValueError):
pass
return blob
def get_installed_packages():
"""Get dictionary mapping installed package names to their versions.
Returns:
Dictionary mapping installed package names to their versions.
"""
installed_packages = {}
for d in pkg_resources.working_set:
installed_packages[d.project_name.lower()] = d.version
return installed_packages
def get_evalml_root():
"""Gets location where evalml is installed.
Returns:
Location where evalml is installed.
"""
return os.path.dirname(evalml.__file__)
|
[
"noreply@github.com"
] |
noreply@github.com
|
a76fb96c1dee7dc23e37475cf091be28ee2f0534
|
be17b72807cb9b752dd36e7d1efc3f9bb058ed6a
|
/krom-oes/.bin/wlisten1
|
8f646d37e450e4c137a33bf82d852c6864480530
|
[
"MIT"
] |
permissive
|
lakshyabhaintwal/my-theme-collection
|
6835f5b75aa77a6e35aec79ceedf44a23a0c550d
|
73446015ef44ceb71cf6bdb8ba5f0b46071cf1a3
|
refs/heads/master
| 2022-01-08T19:30:06.349839
| 2019-06-17T06:06:34
| 2019-06-17T06:06:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
#!/usr/bin/python3
import Xlib
import Xlib.display
disp = Xlib.display.Display()
root = disp.screen().root
NET_WM_NAME = disp.intern_atom('_NET_WM_NAME')
NET_ACTIVE_WINDOW = disp.intern_atom('_NET_ACTIVE_WINDOW')
root.change_attributes(event_mask=Xlib.X.FocusChangeMask)
while True:
try:
window_id = root.get_full_property(NET_ACTIVE_WINDOW, Xlib.X.AnyPropertyType).value[0]
window = disp.create_resource_object('window', window_id)
window.change_attributes(event_mask=Xlib.X.PropertyChangeMask)
window_name = window.get_full_property(NET_WM_NAME, 0).value
except Xlib.error.XError:
window_name = None
print(window_name)
event = disp.next_event()
|
[
"hasbeeazam@gmail.com"
] |
hasbeeazam@gmail.com
|
|
400cc46db271da23fe460f3d4047f75ec0036f29
|
59e85251c447135f8cfba9eba5b3d94a8bde08a0
|
/python/prod.py
|
de77ae71fa08dd36bb2318a052154fffc10923a3
|
[] |
no_license
|
sigfreid666/minisab
|
e7c1481b2add0d23ad7e6365cc5ce12079598fdb
|
baf815895db94bb65b8e6a11ad9dbfb39ed6aa88
|
refs/heads/master
| 2021-05-21T22:56:33.623358
| 2018-06-09T10:47:21
| 2018-06-09T10:47:21
| 252,843,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
from minisab import create_app
app = create_app()
|
[
"guillaume.frutoso@gmail.com"
] |
guillaume.frutoso@gmail.com
|
b37e7ecbf4467d9d193cddf3e733018b8e0a8497
|
9980933e0a1d753efda7aa5b0a90b1ac4082ad59
|
/semaphores/philosophers_2.py
|
0ffe6c052704a2efb9469921b7c5b6aee96838e3
|
[] |
no_license
|
oscaru/concurrencia
|
faf9ff4e3c4bd45196ac170da6441ce0779bec51
|
b66e455e838fc7f4e4922ee7263851dd6b89c821
|
refs/heads/master
| 2023-02-14T22:56:27.207040
| 2021-01-03T11:04:34
| 2021-01-03T11:04:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,923
|
py
|
#! /usr/bin/env python
import threading
import time
PHILOSOPHERS = 5
EAT_COUNT = 100
THINKING = 0
HUNGRY = 1
EATING = 2
class Philosopher(threading.Thread):
mutex = threading.Lock()
status = []
sync = []
count = 0
def __init__(self):
super(Philosopher, self).__init__()
self.id = Philosopher.count
Philosopher.count += 1
Philosopher.status.append(THINKING)
Philosopher.sync.append(threading.Semaphore(0))
def right(self, i):
return (i - 1) % PHILOSOPHERS
def left(self, i):
return (i + 1) % PHILOSOPHERS
def canEat(self, i):
if Philosopher.status[i] == HUNGRY and Philosopher.status[self.left(i)] != EATING and Philosopher.status[self.right(i)] != EATING:
Philosopher.status[i] = EATING
Philosopher.sync[i].release()
def pick(self):
Philosopher.mutex.acquire()
Philosopher.status[self.id] = HUNGRY
self.canEat(self.id)
Philosopher.mutex.release()
Philosopher.sync[self.id].acquire()
def release(self):
Philosopher.mutex.acquire()
Philosopher.status[self.id] = THINKING
self.canEat(self.left(self.id))
self.canEat(self.right(self.id))
Philosopher.mutex.release()
def think(self):
time.sleep(0.05)
def eat(self):
print("{} start eat".format(self.id))
time.sleep(0.1)
print("{} end eat".format(self.id))
def run(self):
for i in range(EAT_COUNT):
self.think()
self.pick()
self.eat()
self.release()
def main():
philosophers = []
for i in range(PHILOSOPHERS):
philosophers.append(Philosopher())
# Start all threads
for p in philosophers:
p.start()
# Wait for all threads to complete
for p in philosophers:
p.join()
if __name__ == "__main__":
main()
|
[
"gallir@gmail.com"
] |
gallir@gmail.com
|
dcd1061b722b024dfeb58696864a3cc6655054ba
|
6f96c987689db877240ce3c1fc32b9e32252bda3
|
/Project_5/douyu/douyu/settings.py
|
d7b68c12b1a9ae3c8b4196281f21e2489b3db715
|
[] |
no_license
|
Mathilda11/Scrapy_Project
|
65d7281bdb75283fccc3795638e0957595d926f9
|
653db6cdb763eadfd886ae757f2777afd7946372
|
refs/heads/master
| 2021-10-24T05:12:37.462691
| 2019-03-22T08:04:44
| 2019-03-22T08:04:44
| 110,244,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,231
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for douyu project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'douyu'
SPIDER_MODULES = ['douyu.spiders']
NEWSPIDER_MODULE = 'douyu.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'douyu (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'User-Agent': 'DYZB/1 CFNetwork/808.2.16 Darwin/16.3.0'
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'douyu.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'douyu.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'douyu.pipelines.ImagesPipeline': 300,
}
IMAGES_STORE = "/home/python/spiderTest/douyu/Images"
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"yizzzhang11@163.com"
] |
yizzzhang11@163.com
|
2d24087778240384516917c28596440c2aed5e2b
|
8520c991dc543f5f4e1efe59ab401824173bb985
|
/332-reconstruct-itinerary/solution.py
|
9deb98ca04053efa355f326607f4c90351f51542
|
[] |
no_license
|
katryo/leetcode
|
d44f70f2853c4f5ea9a462d022feb0f5436c2236
|
0da45559271d3dba687858b8945b3e361ecc813c
|
refs/heads/master
| 2020-03-24T12:04:53.859047
| 2020-02-18T04:27:55
| 2020-02-18T04:27:55
| 142,703,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 924
|
py
|
from collections import defaultdict
import heapq
class Solution:
def findItinerary(self, tickets):
dests = defaultdict(list)
ans = []
for src, dest in tickets:
heapq.heappush(dests[src], dest)
def dfs(dep):
arrivals = dests[dep]
while arrivals:
dfs(heapq.heappop(arrivals))
ans.insert(0, dep)
dfs('JFK')
return ans
# def findItinerary(self, tickets):
# dests = defaultdict(list)
# for a, b in sorted(tickets)[::-1]:
# dests[a].append(b)
# ans = []
#
# def visit(start):
# while dests[start]:
# visit(dests[start].pop())
# ans.append(start)
#
# visit('JFK')
# return list(reversed(ans))
s = Solution()
print(s.findItinerary([["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]))
|
[
"katoryo55@gmail.com"
] |
katoryo55@gmail.com
|
24f7b3b1146c1b28f642b500a4ac01033669c4d6
|
78ad0b63571ea8d7dd56f6baf7d1dd8fd73f2380
|
/timeline/forms.py
|
a5790743df5a056511d91608ab9ca5a5f503e118
|
[] |
no_license
|
stoictyper/tenetblog
|
b6aacd31820880f52a1991ce7a0cfe6d88821fdd
|
8a35e95f26e932158802a0747c36f1bf51524d63
|
refs/heads/master
| 2022-12-12T05:03:06.816880
| 2020-09-16T09:14:47
| 2020-09-16T09:14:47
| 295,981,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 175
|
py
|
from django import forms
from .models import Timeline
class TimelineForm(forms.ModelForm):
class Meta:
model=Timeline
fields=["title","content","aimage"]
|
[
"alioguzdogru@gmail.com"
] |
alioguzdogru@gmail.com
|
ad7953da5cd8892ecfea3d2bac68630280a51843
|
69198c187d7d3be82164acb1fa4dd5bad8b14a1f
|
/src/auto_test/units/tools.py
|
c0388d0418c33ec3f5c4116ce12c31c2229be92c
|
[] |
no_license
|
dahai33/my_python
|
64067633f99369fdffa680afff185f2fb579e4c8
|
56a49b79c61fce828225971a8e2855a95540f954
|
refs/heads/master
| 2020-03-21T09:11:46.640149
| 2018-08-06T16:29:55
| 2018-08-06T16:29:55
| 138,387,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
#!/usr/bin/python3
#Author:刘海涛
# --*-- coding: uft-8 --*--
# @Time : 17:38
import os
import src.auto_test.units.log as L
import yaml
class devices_id(object):
def __init__(self, device_id=""):
if device_id == "":
self.device_id = ""
else:
self.device_id = "-s %s" % device_id
@staticmethod
def get_device_id():
device_list=[]
device_list=os.popen("adb get-serialno").read().splitlines()
return device_list
@staticmethod
def install_apk(apk_path):
if os.popen("adb install" + apk_path):
L.Log.d(apk_path,"apk包安装成功")
else:
L.Log.e(apk_path,"apk安装失败")
@staticmethod
def remove_apk(apk_name):
if os.popen("adb uninstall" + apk_name):
L.Log.d(apk_name,"apk包卸载成功")
else:
L.Log.e(apk_name,"apk包卸载失败")
if __name__=='__main__':
devices_id.remove_apk("cdd d")
devices_id.install_apk("d:/cont/cdent/com.tianxianyong.com")
p=os.popen("adb shell pm list packages moffice_eng").read().splitlines()
print(p)
|
[
"liuhaitaodahai@sina.com"
] |
liuhaitaodahai@sina.com
|
6520e18f02a020d437d9faab0c0f1894ec361f79
|
48997954c0e4af29dd8ae17cbfde6081fc225dcc
|
/PROJ_COD/Machine/kwon/vt_query.py
|
3fb2b3070a5e7e4c407c5c7aecb9c6d94ded43ae
|
[] |
no_license
|
annie522/kitri
|
7c7fd9e6ab015a94051b398704d39df09ba8966d
|
5ae2e8f5ee7e15cd1b0e828ee63e30a320746317
|
refs/heads/master
| 2021-01-22T10:57:01.778072
| 2017-07-26T00:40:25
| 2017-07-26T00:40:25
| 92,664,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,765
|
py
|
# Virustotal Module
#
# Below are a Virustotal.com's API options/tags (for k:v responses)
# response_code, verbose_msg
# resource, scan_id, md5, sha1, sha256
# scan_date
# scans
__author__ = 'Jacolon Walker'
import requests
import json
import hashlib
import sys
class Virustotal():
""" Virustotal API module """
def __init__(self):
self.host = "www.virustotal.com"
self.base = "https://www.virustotal.com/vtapi/v2/"
self.apikey = "bbc972972f5db6166251b2856d0ac39a8d64da57f7458e3ceae0a8d5ea9be9a4"
def md5(self ,filepath, blocksize=8192):
#print("dddd :" +filepath)
md5 = hashlib.md5()
try:
f = open(filepath, "rb")
except IOError as e:
print("file open error", e)
return
while True:
buf = f.read(blocksize)
if not buf:
break
md5.update(buf)
return md5.hexdigest()
def rscReport(self, fileMd5):
""" Get latest report of resource """
print("fileMd5 : {}".format(fileMd5))
base = self.base + 'file/report'
parameters = {"resource": fileMd5, "apikey": self.apikey} # 여기 수정해야함
r = requests.post(base, data=parameters)
resp = r.json()
results = parse_resp(resp)
print(self.md5)
return results
def urlReport(self, rsc, scan=0):
""" Get latest report URL scan report of resource """
base = self.base + 'url/report'
parameters = {"resource": rsc, "scan": scan, "apikey": self.apikey}
r = requests.post(base, data=parameters)
resp = r.json()
results = parse_resp(resp)
return results
def ipReport(self, rsc):
""" Get latest report for IP Address """
base = self.base + 'ip-address/report'
parameters = {"ip": rsc, "apikey": self.apikey}
r = requests.get(base, params=parameters)
resp = r.json()
results = parse_resp(resp)
return results
def domainReport(self, rsc):
""" Get latest report for IP Address """
base = self.base + 'domain/report'
parameters = {"domain": rsc, "apikey": self.apikey}
r = requests.get(base, params=parameters)
resp = r.json()
results = parse_resp(resp)
return results
def scanURL(self, rsc):
""" Send RSC/URL for scanning; Its encouraged to check for last scanusing urlReport()
To submit batch rsc should be example.com\nexample2.com"""
base = self.base + 'url/scan'
parameters = {"url": rsc, "apikey": self.apikey}
r = requests.post(base, data=parameters)
resp = r.json()
results = parse_resp(resp)
return results
def rscSubmit(self, rsc):
""" Submit potential malicious file to virustotal for analyzing """
base = self.base + 'file/scan'
f = open(rsc, 'rb')
parameters = {"apikey": self.apikey}
r = requests.post(base, data=parameters, files={'file': f})
resp = r.json()
results = parse_resp(resp)
return results
def rscRescan(self, rsc):
""" Rescan potential malicious file to virustotal for analyzing without uploading the file again """
base = self.base + 'file/rescan'
parameters = {"resource": rsc, "apikey": self.apikey}
r = requests.post(base, data=parameters)
resp = r.json()
results = parse_resp(resp)
return results
def postComment(self, rsc, comment):
""" Post comment to files or urls """
base = self.base + 'comments/put'
parameters = {"resource": rsc, "comment": comment, "apikey": self.apikey}
r = requests.post(base, data=parameters)
resp = r.json()
results = parse_resp(resp)
if results['response_code'] == 0:
print
"Oh no something happen...cant post comment"
else:
print
"Your comment was successfully posted"
call = self.rscReport(rsc)
for item in call:
if item == "permalink":
print
"Report link:", call[item]
def parse_resp(resp):
""" Parses the response from the requests.gets/posts()
then returns the data back to the function """
buf = {}
for item in resp:
buf[item] = resp[item]
return buf
# main = Virustotal()
# var = main.md5('pika.exe')
# dic = main.rscReport(var)
# dic = (main.rscReport(var))
#
# if dic['positives'] >= 10:
# print("VIRUS!!!")
# else:
# print("NOMAL FILE")
# print ("total = " , dic['total'],"positives = " , dic['positives'], dic['scans']['Ikarus']['detected'], dic['scans']['Ikarus']['result'])
|
[
"zzabzz21@naver.com"
] |
zzabzz21@naver.com
|
23c1aba0a6984f43717a229dae9794d4c3027e3f
|
9666666561bbca04c70325de07335b70c00a631e
|
/RNN/LSTM.py
|
8345fa1e6c518f5eb04e588a3cb5372afe6aef1d
|
[] |
no_license
|
deeshantk/machine_learn
|
c0720d6541e90507321b44f08d501683ef222625
|
57ec7252cc93cb48feb901cb995d4d880d633f2c
|
refs/heads/master
| 2020-06-30T15:19:51.554546
| 2019-10-23T16:06:38
| 2019-10-23T16:06:38
| 200,869,773
| 2
| 0
| null | 2019-08-07T20:05:05
| 2019-08-06T14:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 874
|
py
|
from keras.models import Sequential
from keras.layers import LSTM
from keras.layers import Dense, Dropout
from keras.optimizers import Adam
import keras
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train = keras.utils.normalize(X_train, axis=1)
X_test = keras.utils.normalize(X_test, axis=1)
model = Sequential()
#model.add(tf.keras.layers.Flatten())
model.add(LSTM(128, input_shape=(X_train.shape[1:]),activation='relu', return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
opt = Adam(lr=1e-3, decay=1e-5)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.fit(X_train, y_train, epochs=3, validation_data=(X_test, y_test))
|
[
"noreply@github.com"
] |
noreply@github.com
|
6c37fc6576700baa64bac68db215c8f8fece2007
|
e00941773e95212a1f76010c3f7b3c7a46f0c9a1
|
/DjangoGo/settings.py
|
d85b6c6c33c24e6f70dd39b455b12b2ea96a0f91
|
[] |
no_license
|
a6a18-Old/django
|
4dc08c322f127d7b7ce68ef088eb64fd91c07429
|
70a7dfdb50a4c10f2e2f39ab47327fac458d700b
|
refs/heads/master
| 2022-07-17T05:22:08.235754
| 2020-05-13T18:12:49
| 2020-05-13T18:12:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,138
|
py
|
"""
Django settings for DjangoGo project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'zroj$q1k%+de=^7x(l!zr%o)^h@@c149y-in1=jow$6+@+**7&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoGo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DjangoGo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"a6a18@cycu.org.tw"
] |
a6a18@cycu.org.tw
|
ad2e14b44f001a96a156b1baca80fb89acb0ee56
|
7fb1e6e0d3c452de2cda8e2338343e9862c6c88a
|
/28082019/000952.py
|
c53d6b23139aa81b05f59be1cd02d8cd7c94c449
|
[] |
no_license
|
RobertoCruzF/Intensivo-Nivelacion
|
3a10afc61f2744e7fd0d6019f0c572fb8b5deec9
|
c7dd1a4aaf1c419f3edb35d30f200c1c0b6a26a9
|
refs/heads/master
| 2020-07-05T11:50:13.582245
| 2019-08-30T14:35:42
| 2019-08-30T14:35:42
| 202,641,096
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
import numpy as np
b= np.array([(1,5,2,3),(4,5,6)])
print b # imprime el arreglo b con los elementos definidos dentroo de la lista
|
[
"rcruz@miuandes.cl"
] |
rcruz@miuandes.cl
|
f58dc5c06357b947dad8b998e8070480de396f5d
|
d47b841f7e64d83cebbe63a25bac47adc495a760
|
/test/test_box_score_teams_overall.py
|
74d000b20e37922744080d08c234957c32e396ab
|
[] |
no_license
|
CiscoNeville/cfbd-python
|
810029240de30a2b7a205cbc3bb009599481206c
|
5775ff7ce7464e881f1940a7c0a534b0c26c1ce8
|
refs/heads/master
| 2023-09-04T18:27:23.773119
| 2021-11-19T01:49:07
| 2021-11-19T01:49:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,071
|
py
|
# coding: utf-8
"""
College Football Data API
This is an API for accessing all sorts of college football data. It currently has a wide array of data ranging from play by play to player statistics to game scores and more. # noqa: E501
OpenAPI spec version: 2.4.1
Contact: admin@collegefootballdata.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cfbd
from cfbd.models.box_score_teams_overall import BoxScoreTeamsOverall # noqa: E501
from cfbd.rest import ApiException
class TestBoxScoreTeamsOverall(unittest.TestCase):
"""BoxScoreTeamsOverall unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBoxScoreTeamsOverall(self):
"""Test BoxScoreTeamsOverall"""
# FIXME: construct object with mandatory attributes with example values
# model = cfbd.models.box_score_teams_overall.BoxScoreTeamsOverall() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"radjewwj@gmail.com"
] |
radjewwj@gmail.com
|
cc81969fe3c3463a9a336a1e77f56a7592cde567
|
b91bd5b0954776fd186bf064a87fb8f7ffa4a58a
|
/python2/flask/flask_fun/flask_table/server.py
|
5c146559aa55798c1023ee96a350f5061e5a2f4d
|
[] |
no_license
|
ronaldaguerrero/practice
|
ddf1f41b693110cebe4d52e29910909f3ba21115
|
38627fddd8f79e6fb50c05a0e4e8d27a92146e1b
|
refs/heads/master
| 2023-01-23T17:06:18.642983
| 2019-09-13T05:01:48
| 2019-09-13T05:01:48
| 186,157,588
| 0
| 0
| null | 2023-01-07T09:40:40
| 2019-05-11T16:40:12
|
Python
|
UTF-8
|
Python
| false
| false
| 564
|
py
|
# import things
from flask_table import Table, Col
# Declare your table
class ItemTable(Table):
name = Col('Name')
description = Col('Description')
# Get some objects
class Item(object):
def __init__(self, name, description):
self.name = name
self.description = description
items = [Item('Name1', 'Description1'),
Item('Name2', 'Description2'),
Item('Name3', 'Description3')]
# Populate the table
table = ItemTable(items)
# Print the html
print(table.__html__())
# or just {{ table }} from within a Jinja template
|
[
"ronald.a.guerrero@gmail.com"
] |
ronald.a.guerrero@gmail.com
|
f364822bf049609b8285f53a3563caa9508f8624
|
47100b5e35f6f5ec40fd42f1681ec26b0ed89a30
|
/ajayidhikrullahscrumy/urls.py
|
2fc0ef441e0160da1d87758c843643082f50b9ad
|
[] |
no_license
|
ajayidhikrullah/linuxScrumyProject
|
d056b882f8ae8a3dc8c05252d44bfac08796659d
|
c9230b004d6816b347e6b5fdfb216f6e2586a3a0
|
refs/heads/master
| 2020-04-02T15:38:48.457952
| 2018-11-07T17:12:46
| 2018-11-07T17:12:46
| 154,576,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
from django.urls import path
from . import views
# from django.urls import *
urlpatterns = [
path('', views.index, name='index'),
path("scrumy_goals/", views.scrumy_goals), #/ajayidhikrullah/my_task/
path("goal_status/", views.goal_status), #/ajayidhikrullah/goal_status/
# extension of urls in ur webpages i.e. www.ajayi/sikiru/adekunle etc
#/ajayidhikrullahscrumy/move_goal/(int:goal_id)
path('move_goal/<int:goals_id>/', views.move_goal, name='move_goal'),
]
|
[
"ajayidhikrullah@gmail.com"
] |
ajayidhikrullah@gmail.com
|
2ee7655b0e28bb82c5b109d6432be9f8c3aff52b
|
32aced0459f10a165b51b16730b412b5f9b5293d
|
/August1st_Detect_Capital.py
|
2738d9060912a5a49f7a35eb4e067e6960c3f095
|
[] |
no_license
|
Wonjuny0804/leetcodechallenge
|
e08b741be4856f69cd4553b505c9da32b1fb9e4d
|
0f0ec19c3f48c001584e1e5bc92df51b15d8ba8a
|
refs/heads/master
| 2022-12-13T02:33:54.118285
| 2020-09-11T06:49:47
| 2020-09-11T06:49:47
| 284,678,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
"""
Given a word, you need to judge whether the usage of capitals in it is right or not.
We define the usage of capitals in a word to be right when one of the following cases holds:
All letters in this word are capitals, like "USA".
All letters in this word are not capitals, like "leetcode".
Only the first letter in this word is capital, like "Google".
Otherwise, we define that this word doesn't use capitals in a right way.
"""
class Solution(object):
def detectCapitalUse(self, word):
"""
:type word: str
:rtype: bool
"""
if len(word) < 2:
return True
else:
if word[0].isupper():
if word[1].islower():
if word[1:].islower():
return True
else:
return False
else:
if word[1:].isupper():
return True
else:
return False
else:
if word.islower():
return True
else:
return False
txt = 'THe Apple'
A = Solution()
print(A.detectCapitalUse(txt))
|
[
"wonjun84@naver.com"
] |
wonjun84@naver.com
|
f20231cfc5c8195e5135526087d532d334a0c5fa
|
9907b3dd74d1aedbed5243105649f0acd8e965d8
|
/demo/pytorch_laguerre.py
|
0aded5c456579f8f7de77004c4e2c77956273df5
|
[
"MIT"
] |
permissive
|
shubhampachori12110095/OrthNet
|
68c7442c448acdca2b0f2fbef0709efec280be4c
|
74824c1858e14f023d3f0251910f223d6b8672ce
|
refs/heads/master
| 2021-01-25T13:12:07.142646
| 2018-02-28T15:18:38
| 2018-02-28T15:18:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
import sys
sys.path.append('../')
from orthnet.pytorch import laguerre_tensor, multi_dim_laguerre_tensor
import torch
from torch.autograd import Variable
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
order1 = 5
order2 = 3
x1_data = np.linspace(-1, 1, 100).reshape((-1, 1))
x2_data = np.linspace(-1, 1, 100).reshape((-1, 1))
x1 = Variable(torch.Tensor(x1_data))
x2 = Variable(torch.Tensor(x2_data))
y1 = laguerre_tensor(n = order1, x = x1)
y2 = multi_dim_laguerre_tensor(n = order2, var = [x1, x2])
z1 = y1.data.numpy()
z2 = y2.data.numpy()
fig1 = plt.figure()
ax1 = fig1.gca()
for i in range(order1+1):
ax1.plot(x1_data, z1[:, i], label = 'n = '+str(i))
ax1.legend()
ax1.grid(True)
fig2 = plt.figure()
ax2 = fig2.gca(projection='3d')
x1_data, x2_data = np.meshgrid(x1_data, x2_data)
ax2.plot_surface(X = x1_data, Y = x2_data, Z = z2[:, -2])
plt.show()
|
[
"orcuslc@hotmail.com"
] |
orcuslc@hotmail.com
|
b3c410f4730e84fde0ef11b12ab700a67513a5a9
|
4c0fc477a3d961b35f26bbd5916ab327fa30b99e
|
/S1_ medium.py
|
5b148ceb45e027a77ac4db6bb9d81b2e9cae6212
|
[] |
no_license
|
sauravbasak/Codehall-Python
|
7be4f80a0e5dc46254d42018f35f03bc24088aa4
|
4859c95ead96ddb3bfc1aa79976121031659b94a
|
refs/heads/master
| 2022-12-02T07:19:27.810672
| 2020-08-21T17:59:29
| 2020-08-21T17:59:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
#takes input, input function always a string. so change it to int()
radius = int(input('input the radius of the circle'))
#** is a power operator. here calculating area
#used 3.14 to approximation
area = 3.14*radius**2
print('the area of the circle is about ' + str(area))
|
[
"noreply@github.com"
] |
noreply@github.com
|
a07b1f8608ca572ae0d5ccebcce1e3ff744dab7b
|
9b5b9e0ac038cafadccb2474a13aab21c9ed6c50
|
/app/views.py
|
0e6b1d94db87fc65f299731d348e669009083669
|
[] |
no_license
|
rkmarvin/test_blog
|
8a95dd7a6a52508497281cf7d5f171bcc1b1974b
|
acdb2fa5d5c3d479b7e9e5162f361b194d13a6d8
|
refs/heads/master
| 2020-04-06T03:41:07.092706
| 2015-07-02T19:04:12
| 2015-07-02T19:04:12
| 37,518,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,723
|
py
|
from django.contrib.auth.models import User
from django.db.models import Q
from django.views.generic import ListView, CreateView, RedirectView, DetailView
from app.models import BlogRecord, Subscription, SoubcrBlorRecorStatus
class ListUserBlogRecordsView(ListView):
def get_queryset(self):
return BlogRecord.objects.filter(user=self.request.user)
class UserBlogRecordDetailView(DetailView):
model = BlogRecord
class CreateBlogRecordView(CreateView):
model = BlogRecord
fields = ['title', 'text']
success_url = "/app/blog/"
def form_valid(self, form):
blog_record = form.save(commit=False)
blog_record.user = self.request.user
blog_record.save()
return super(CreateBlogRecordView, self).form_valid(form)
class BlogersListView(ListView):
template_name = "app/blogers_list.html"
def get_queryset(self):
return User.objects.filter(~Q(id=self.request.user.id))
class SubscribeView(RedirectView):
permanent = False
url = '/app/blog/blogers/'
def get(self, request, *args, **kwargs):
user_id = kwargs.get('user_id')
if user_id:
subscr = Subscription.objects.get_or_create(follower=request.user)[0]
subscr.masters.add(User.objects.get(id=user_id))
subscr.save()
return super(SubscribeView, self).get(request, *args, **kwargs)
class UnSubscribeView(RedirectView):
permanent = False
url = '/app/blog/blogers/'
def get(self, request, *args, **kwargs):
user_id = kwargs.get('user_id')
if user_id:
subscr = Subscription.objects.get(follower=request.user)
subscr.masters.remove(User.objects.get(id=user_id))
subscr.save()
SoubcrBlorRecorStatus.objects.filter(user=request.user).delete()
return super(UnSubscribeView, self).get(request, *args, **kwargs)
class NewsListView(ListView):
template_name = "app/news_list.html"
def get_queryset(self):
masters = Subscription.subsrc_objs.masters(self.request.user)
return BlogRecord.objects.filter(user__in=masters).order_by('-created')
class SetReadedView(RedirectView):
permanent = False
def get(self, request, *args, **kwargs):
record_id = kwargs.get('record_id')
if record_id:
SoubcrBlorRecorStatus.objects.get_or_create(user=request.user, record_id=record_id)
return super(SetReadedView, self).get(request, *args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
self.url = self.request.META.get("HTTP_REFERER")
if self.url:
return super(SetReadedView, self).get_redirect_url(*args, **kwargs)
return None
|
[
"rouslan.korkmazov@gmail.com"
] |
rouslan.korkmazov@gmail.com
|
f05897aa9e15a83463b6a5b80f3d7b2481815759
|
502de531a391a617ac947e947661d2c36a2ff2ff
|
/setup.py
|
631ba9a3865ba25ce48db03448fb4b57baa31b10
|
[] |
no_license
|
ingfcetina/testing-arcpy
|
17026fdcf185b919da9da3c7ab36a5bffead0722
|
c7f3b74c37c8f8673100bd532039e7a3d318bdf9
|
refs/heads/master
| 2021-06-18T05:14:54.239298
| 2017-07-09T14:15:05
| 2017-07-09T14:15:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
from setuptools import setup, find_packages
setup(
name='my_project',
version='1.0.0',
description='Sample project for arcpy applications',
url='https://github.com/lobsteropteryx/arcpy-testing',
author='Ian Firkin',
author_email='ian.firkin@gmail.com',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['arcpy'],
extras_require={
'test': ['pytest', 'pytest-cov', 'pytest-watch', 'pylint']
},
entry_points={
'console_scripts': []
},
)
|
[
"ian.firkin@homeinsteadinc.com"
] |
ian.firkin@homeinsteadinc.com
|
51e6d0b64816e845f3804107099f83eb52511405
|
030cea4006a4ff559f23cb3b3c31cd038ed2e332
|
/week11/hh_back/api/migrations/0001_initial.py
|
ff433e7b38b000547c461e4b1354c718d2bfa422
|
[] |
no_license
|
ayananygmetova/Web-Dev-2020
|
f8834e0ee26f0f0f06d0e3a282c73b373954a430
|
957bca91554f015e9a3d13b4ec12e64de7ac633e
|
refs/heads/master
| 2023-01-22T16:49:39.857983
| 2020-03-31T10:09:54
| 2020-03-31T10:09:54
| 236,937,810
| 1
| 0
| null | 2023-01-07T16:34:35
| 2020-01-29T08:41:10
|
Python
|
UTF-8
|
Python
| false
| false
| 669
|
py
|
# Generated by Django 3.0.4 on 2020-03-31 07:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('description', models.TextField(default='')),
('city', models.CharField(max_length=200)),
('address', models.TextField(default='')),
],
),
]
|
[
"ayananyfmetova@gmail.com"
] |
ayananyfmetova@gmail.com
|
bf67822d3bde7b6f73418549a4686a1a0b14a6cb
|
dc93b13ac77ad13d7969f00af22670cbb4401082
|
/meiduo_mall/meiduo_mall/apps/oauth/serializer.py
|
ca590e8bd9a41283edd33a84eb6a01c9d9c8c40b
|
[] |
no_license
|
zhangbk920209/TaobaoMall
|
852877cac1167b37dadfbfcd415c161d52de3796
|
c4bbc938357b91daa76fbfece72bb99179993a5d
|
refs/heads/master
| 2020-04-02T15:53:09.563700
| 2018-11-08T09:06:27
| 2018-11-08T09:06:27
| 154,587,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,652
|
py
|
import base64
import os
from django_redis import get_redis_connection
from rest_framework import serializers
from oauth.models import OAuthQQUser
from oauth.utils import OAuthQQ
from users.models import User
class OAuthQQUserSerializer(serializers.ModelSerializer):
mobile = serializers.RegexField(label='手机号', regex=r'1[3-9]\d{9}$', )
sms_code = serializers.CharField(label='短信验证码', write_only=True)
secret_openid = serializers.CharField(label='加密OpenID', write_only=True)
token = serializers.CharField(label='JWTToken', read_only=True)
class Meta:
model = User
fields = ('id', 'username', 'mobile', 'password', 'sms_code', 'secret_openid', 'token')
extra_kwargs = {
'password': {
'max_length': 20,
'min_length': 8,
'write_only': True,
'error_messages': {
'min_length': '仅允许8-20个字符的密码',
'max_length': '仅允许8-20个字符的密码',
}
},
'username': {
'read_only': True
}
}
def validate(self, attrs):
# 手机号格式已在字段定义过程进行验证
# 短信验证码
sms_code = attrs['sms_code']
mobile = attrs['mobile']
redis_coon = get_redis_connection('verify_codes')
real_sms_code = redis_coon.get('sms_%s' % mobile)
# 判断验证码是否过期
if not real_sms_code:
raise serializers.ValidationError('短信验证码已过期')
# 判断验证码是否正确
if sms_code != real_sms_code.decode():
raise serializers.ValidationError('短信验证码错误')
scret_openid = attrs['secret_openid']
# 对加密后的open_id 即access_token进行校验
openid = OAuthQQ.check_save_user_token(scret_openid)
if not openid:
raise serializers.ValidationError('Openid已失效')
attrs['openid'] = openid
# 如果用户存在,检查用户密码'
try:
user = User.objects.get(mobile=mobile)
except User.DoesNotExist:
user = None
else:
password = attrs['password']
if not user.check_password(password):
raise serializers.ValidationError('用户名密码错误')
attrs['user'] = user
return attrs
def create(self, validated_data):
user = validated_data['user']
if not user:
username = base64.b64encode(os.urandom(9)).decode()
mobile = validated_data['mobile']
password = validated_data['password']
user = User.objects.create_user(mobile=mobile, password=password, username=username)
# 获取类视图的对象,给类视图对象增加属性user,用来保存绑定用户对象
# 以便在类视图中可以直接通过`self.user`获取绑定用户对象
self.context['view'].user = user
openid = validated_data['openid']
OAuthQQUser.objects.create(openid=openid, user=user)
from rest_framework_jwt.settings import api_settings
# 创建组织payload载荷的方法
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
# 创建生成jwt token数据的方法
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
# 传入用户对象 生成载荷数据
payload = jwt_payload_handler(user)
# 传入载荷 生成token数据
token = jwt_encode_handler(payload)
user.token = token
return user
|
[
"zhangbk0209@163.com"
] |
zhangbk0209@163.com
|
4f75995da48db35b1989254d0fd3db739d8f7db4
|
1b49d99bf3b91ec45a57e4eabd3c868155fb932c
|
/code/model 1/utils.py
|
55a494916c8943cd8166c90f57f2b30f2048d458
|
[] |
no_license
|
Clement-Hardy/Image-captioning
|
3e365dbd1a20aedd608621c7d19594cb6e95fb26
|
86780413501ee64ffba849a15dc5d82b79e1fa17
|
refs/heads/master
| 2020-09-01T20:18:45.723144
| 2019-11-01T19:15:26
| 2019-11-01T19:15:26
| 219,047,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,752
|
py
|
from keras.applications.resnet50 import preprocess_input
import os
import cv2
import numpy as np
import warnings
from keras.preprocessing.text import Tokenizer
def create_dataset(path_legend, path_images, start_sentence, end_sentence):
file = open(path_legend, 'r')
legends = file.readlines()
list_names_images, list_legends = [], []
token = Tokenizer()
for legend in legends:
name_image, sentence = legend.split('\t')
name_image = name_image.split("#")[0]
sentence = sentence.split('\n')[0]
if os.path.exists(os.path.join(path_images, name_image)):
sentence = start_sentence + " " + sentence + " " + end_sentence
list_names_images.append(name_image)
list_legends.append(sentence)
else :
warnings.warn("The image doesn't {} doesn't exist, legends of this image aren't adding in dataset.".format(name_image))
token.fit_on_texts(list_legends)
list_legends_number = token.texts_to_sequences(list_legends)
max_length_legend = np.max([len(sentence) for sentence in list_legends_number])
return list_names_images, list_legends_number, max_length_legend, len(token.word_index)+1, token.word_index
def list_to_sentence(list_word, end_sentence):
sentence = list_word[0]
for i in np.arange(1, len(list_word)):
if list_word[i] == end_sentence:
break
sentence = sentence + ' ' + list_word[i]
return sentence
def build_dict_image_legend(names_images, legends):
name = names_images[0]
dict_image_legend = {}
dict_image_legend[name] = []
for i in np.arange(1,len(names_images)):
if name==names_images[i] and i>0:
dict_image_legend[name].append(legends[i])
else:
name = names_images[i]
dict_image_legend[name] = [legends[i]]
return dict_image_legend
def build_number_dict(word_dict):
number_dict = {}
for i in word_dict:
number_dict[word_dict[i]] = i
return number_dict
def number_to_word(legends, number_dict):
legends_word = []
for i in range(len(legends)):
legends_word.append([])
for j in range(len(legends[i])):
legends_word[i].append(number_dict[legends[i][j]])
return legends_word
def load_image(path_images, name_image, model_cnn="resnet50"):
if model_cnn=="resnet50":
input_shape = (224, 224)
elif model_cnn=="InceptionV3":
input_shape = (299, 299)
dir_image = os.path.join(path_images, name_image)
image = cv2.imread(dir_image)
image = cv2.resize(image, input_shape)
return preprocess_input(image)
|
[
"vicclem5999@live.fr"
] |
vicclem5999@live.fr
|
2538034794d182aad149414e83c631415a4e7db3
|
7634530365c88007bee507b639360ab0aa403d3f
|
/Python/004Median_of_Two_Sorted_Arrays.py
|
3751860c6f224065a7d0eb3829eb1238dc8d0c38
|
[] |
no_license
|
viewer010/leetcode
|
7389fbe4579da643fd4603f08553512b1520712c
|
0db94ec34e84caa6d870ce933a198f9e31ffb183
|
refs/heads/master
| 2021-06-14T11:36:51.312447
| 2017-04-21T13:36:23
| 2017-04-21T13:36:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 579
|
py
|
#coding:utf-8
'''
python sort函数应该是O(nlogn)
'''
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
add_num=nums1+nums2
add_num.sort()
if len(add_num)%2:
return add_num[len(add_num)/2]
else:
return ( add_num[len(add_num)/2-1] + add_num[len(add_num)/2])/2.0
if __name__ == '__main__':
s=Solution()
nums1=[1,2]
nums2=[3,4]
print s.findMedianSortedArrays(nums1,nums2)
|
[
"2638480742@qq.com"
] |
2638480742@qq.com
|
7b233741a8843a22b7e1fb668ee45d54bca9363e
|
6455e23a22dc13d1ca7ea5896904d7918c715054
|
/logprocessor.py
|
8c4016c76364dc9781c74e2e67154b8cd8c43910
|
[] |
no_license
|
Skidplays/LabLadder
|
6580a36f12d9907fc212bb46bfe1ac2ecf70ecef
|
64d78a82f1a4a079d51c246059e66d59c20aec1c
|
refs/heads/main
| 2023-07-15T04:39:56.425141
| 2021-08-21T05:43:27
| 2021-08-21T05:43:27
| 398,159,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,880
|
py
|
from datetime import datetime
START_LINE = ["Izaro: Ascend with precision.",
"Izaro: The Goddess is watching.",
"Izaro: Justice will prevail."]
FINISH_LINE = ["Izaro: Triumphant at last!",
"Izaro: You are free!",
"Izaro: I die for the Empire!",
"Izaro: The trap of tyranny is inescapable.",
"Izaro: Delight in your gilded dungeon, ascendant.",
"Izaro: Your destination is more dangerous than the journey, ascendant."]
SECTION_END_LINE = ["Izaro: By the Goddess! What ambition!",
"Izaro: Such resilience!",
"Izaro: You are inexhaustible!",
"Izaro: You were born for this!"]
DEATH_LINE = ["Izaro: Apparently, this is the ending that you deserve.",
"Izaro: For those who ascend too quickly, the fall is inevitable.",
"Izaro: Justice is served."]
lab_run_list =[]
start_list = []
finish_list = []
section_end_list = []
death_list = []
today_log = []
def process_log(client_log):
#Takes a list of lines
current_date = datetime.now().date().strftime("%Y/%m/%d") #Year/Month/Date
log_data = client_log.splitlines()
for x in log_data: #Find the logs specific to today's date and store in new list
if x.find(current_date) != -1:
today_log.append(x)
log_data.clear()
for x in today_log:
for lines in START_LINE:
if x.find(lines) != -1: #If any lines in START_LINE is found in x
start_list.append(x)
for lines in FINISH_LINE:
if x.find(lines) != -1:
finish_list.append(x)
for lines in SECTION_END_LINE:
if x.find(lines) != -1:
section_end_list.append(x)
for lines in DEATH_LINE:
if x.find(lines) != -1:
death_list.append(x)
combined_list = sorted(death_list+section_end_list+finish_list+start_list)
for x in combined_list:
for lines in START_LINE:
if x.find(lines) != -1: #If combined list does not have any lines from START LINE
start_time = datetime.strptime(combined_list[combined_list.index(x)][0:19], "%Y/%m/%d %H:%M:%S")
section = 1
break
for lines in SECTION_END_LINE:
if x.find(lines) != -1:
section += 1
break
for lines in FINISH_LINE:
if x.find(lines) != -1 and section == 3: #Make sure we are at the last aspirant trial
end_time = datetime.strptime(combined_list[combined_list.index(x)][0:19], "%Y/%m/%d %H:%M:%S")
difference = end_time - start_time
lab_run_list.append(difference.seconds)
break
for lines in DEATH_LINE:
if x.find(lines) != -1:
break
lab_run_list.sort()
if lab_run_list:
return lab_run_list[0]
return False
|
[
"noreply@github.com"
] |
noreply@github.com
|
7897e7d4cadfa5c63f6555c720fe7a1d117dfa50
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/statsmodels/2017/12/markov_regression.py
|
ef11b49627e507701b1babda3e3d9963f998bb8c
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 16,426
|
py
|
"""
Markov switching regression models
Author: Chad Fulton
License: BSD-3
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.regime_switching import markov_switching
class MarkovRegression(markov_switching.MarkovSwitching):
r"""
First-order k-regime Markov switching regression model
Parameters
----------
endog : array_like
The endogenous variable.
k_regimes : integer
The number of regimes.
trend : {'nc', 'c', 't', 'ct'}
Whether or not to include a trend. To include an intercept, time trend,
or both, set `trend='c'`, `trend='t'`, or `trend='ct'`. For no trend,
set `trend='nc'`. Default is an intercept.
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : integer, optional
The order of the model describes the dependence of the likelihood on
previous regimes. This depends on the model in question and should be
set appropriately by subclasses.
exog_tvtp : array_like, optional
Array of exogenous or lagged variables to use in calculating
time-varying transition probabilities (TVTP). TVTP is only used if this
variable is provided. If an intercept is desired, a column of ones must
be explicitly included in this array.
switching_trend : boolean or iterable, optional
If a boolean, sets whether or not all trend coefficients are
switching across regimes. If an iterable, should be of length equal
to the number of trend variables, where each element is
a boolean describing whether the corresponding coefficient is
switching. Default is True.
switching_exog : boolean or iterable, optional
If a boolean, sets whether or not all regression coefficients are
switching across regimes. If an iterable, should be of length equal
to the number of exogenous variables, where each element is
a boolean describing whether the corresponding coefficient is
switching. Default is True.
switching_variance : boolean, optional
Whether or not there is regime-specific heteroskedasticity, i.e.
whether or not the error term has a switching variance. Default is
False.
Notes
-----
This model is new and API stability is not guaranteed, although changes
will be made in a backwards compatible way if possible.
The model can be written as:
.. math::
y_t = a_{S_t} + x_t' \beta_{S_t} + \varepsilon_t \\
\varepsilon_t \sim N(0, \sigma_{S_t}^2)
i.e. the model is a dynamic linear regression where the coefficients and
the variance of the error term may be switching across regimes.
The `trend` is accomodated by prepending columns to the `exog` array. Thus
if `trend='c'`, the passed `exog` array should not already have a column of
ones.
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
"""
def __init__(self, endog, k_regimes, trend='c', exog=None, order=0,
exog_tvtp=None, switching_trend=True, switching_exog=True,
switching_variance=False, dates=None, freq=None,
missing='none'):
# Properties
self.trend = trend
self.switching_trend = switching_trend
self.switching_exog = switching_exog
self.switching_variance = switching_variance
# Exogenous data
self.k_exog, exog = markov_switching.prepare_exog(exog)
# Trend
nobs = len(endog)
self.k_trend = 0
self._k_exog = self.k_exog
trend_exog = None
if trend == 'c':
trend_exog = np.ones((nobs, 1))
self.k_trend = 1
elif trend == 't':
trend_exog = (np.arange(nobs) + 1)[:, np.newaxis]
self.k_trend = 1
elif trend == 'ct':
trend_exog = np.c_[np.ones((nobs, 1)),
(np.arange(nobs) + 1)[:, np.newaxis]]
self.k_trend = 2
if trend_exog is not None:
exog = trend_exog if exog is None else np.c_[trend_exog, exog]
self._k_exog += self.k_trend
# Initialize the base model
super(MarkovRegression, self).__init__(
endog, k_regimes, order=order, exog_tvtp=exog_tvtp, exog=exog,
dates=dates, freq=freq, missing=missing)
# Switching options
if self.switching_trend is True or self.switching_trend is False:
self.switching_trend = [self.switching_trend] * self.k_trend
elif not len(self.switching_trend) == self.k_trend:
raise ValueError('Invalid iterable passed to `switching_trend`.')
if self.switching_exog is True or self.switching_exog is False:
self.switching_exog = [self.switching_exog] * self.k_exog
elif not len(self.switching_exog) == self.k_exog:
raise ValueError('Invalid iterable passed to `switching_exog`.')
self.switching_coeffs = (
np.r_[self.switching_trend,
self.switching_exog].astype(bool).tolist())
# Parameters
self.parameters['exog'] = self.switching_coeffs
self.parameters['variance'] = [1] if self.switching_variance else [0]
def predict_conditional(self, params):
"""
In-sample prediction, conditional on the current regime
Parameters
----------
params : array_like
Array of parameters at which to perform prediction.
Returns
-------
predict : array_like
Array of predictions conditional on current, and possibly past,
regimes
"""
params = np.array(params, ndmin=1)
# Since in the base model the values are the same across columns, we
# only compute a single column, and then expand it below.
predict = np.zeros((self.k_regimes, self.nobs), dtype=params.dtype)
for i in range(self.k_regimes):
# Predict
if self._k_exog > 0:
coeffs = params[self.parameters[i, 'exog']]
predict[i] = np.dot(self.exog, coeffs)
return predict[:, None, :]
def _resid(self, params):
predict = np.repeat(self.predict_conditional(params),
self.k_regimes, axis=1)
return self.endog - predict
def _conditional_likelihoods(self, params):
"""
Compute likelihoods conditional on the current period's regime
"""
# Get residuals
resid = self._resid(params)
# Compute the conditional likelihoods
variance = params[self.parameters['variance']].squeeze()
if self.switching_variance:
variance = np.reshape(variance, (self.k_regimes, 1, 1))
conditional_likelihoods = (
np.exp(-0.5 * resid**2 / variance) / np.sqrt(2 * np.pi * variance))
return conditional_likelihoods
@property
def _res_classes(self):
return {'fit': (MarkovRegressionResults,
MarkovRegressionResultsWrapper)}
def _em_iteration(self, params0):
"""
EM iteration
Notes
-----
This uses the inherited _em_iteration method for computing the
non-TVTP transition probabilities and then performs the EM step for
regression coefficients and variances.
"""
# Inherited parameters
result, params1 = super(MarkovRegression, self)._em_iteration(params0)
tmp = np.sqrt(result.smoothed_marginal_probabilities)
# Regression coefficients
coeffs = None
if self._k_exog > 0:
coeffs = self._em_exog(result, self.endog, self.exog,
self.parameters.switching['exog'], tmp)
for i in range(self.k_regimes):
params1[self.parameters[i, 'exog']] = coeffs[i]
# Variances
params1[self.parameters['variance']] = self._em_variance(
result, self.endog, self.exog, coeffs, tmp)
# params1[self.parameters['variance']] = 0.33282116
return result, params1
def _em_exog(self, result, endog, exog, switching, tmp=None):
"""
EM step for regression coefficients
"""
k_exog = exog.shape[1]
coeffs = np.zeros((self.k_regimes, k_exog))
# First, estimate non-switching coefficients
if not np.all(switching):
nonswitching_exog = exog[:, ~switching]
nonswitching_coeffs = (
np.dot(np.linalg.pinv(nonswitching_exog), endog))
coeffs[:, ~switching] = nonswitching_coeffs
endog = endog - np.dot(nonswitching_exog, nonswitching_coeffs)
# Next, get switching coefficients
if np.any(switching):
switching_exog = exog[:, switching]
if tmp is None:
tmp = np.sqrt(result.smoothed_marginal_probabilities)
for i in range(self.k_regimes):
tmp_endog = tmp[i] * endog
tmp_exog = tmp[i][:, np.newaxis] * switching_exog
coeffs[i, switching] = (
np.dot(np.linalg.pinv(tmp_exog), tmp_endog))
return coeffs
def _em_variance(self, result, endog, exog, betas, tmp=None):
"""
EM step for variances
"""
k_exog = 0 if exog is None else exog.shape[1]
if self.switching_variance:
variance = np.zeros(self.k_regimes)
for i in range(self.k_regimes):
if k_exog > 0:
resid = endog - np.dot(exog, betas[i])
else:
resid = endog
variance[i] = (
np.sum(resid**2 *
result.smoothed_marginal_probabilities[i]) /
np.sum(result.smoothed_marginal_probabilities[i]))
else:
variance = 0
if tmp is None:
tmp = np.sqrt(result.smoothed_marginal_probabilities)
for i in range(self.k_regimes):
tmp_endog = tmp[i] * endog
if k_exog > 0:
tmp_exog = tmp[i][:, np.newaxis] * exog
resid = tmp_endog - np.dot(tmp_exog, betas[i])
else:
resid = tmp_endog
variance += np.sum(resid**2)
variance /= self.nobs
return variance
@property
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
Notes
-----
These are not very sophisticated and / or good. We set equal transition
probabilities and interpolate regression coefficients between zero and
the OLS estimates, where the interpolation is based on the regime
number. We rely heavily on the EM algorithm to quickly find much better
starting parameters, which are then used by the typical scoring
approach.
"""
# Inherited parameters
params = markov_switching.MarkovSwitching.start_params.fget(self)
# Regression coefficients
if self._k_exog > 0:
beta = np.dot(np.linalg.pinv(self.exog), self.endog)
variance = np.var(self.endog - np.dot(self.exog, beta))
if np.any(self.switching_coeffs):
for i in range(self.k_regimes):
params[self.parameters[i, 'exog']] = (
beta * (i / self.k_regimes))
else:
params[self.parameters['exog']] = beta
else:
variance = np.var(self.endog)
# Variances
if self.switching_variance:
params[self.parameters['variance']] = (
np.linspace(variance / 10., variance, num=self.k_regimes))
else:
params[self.parameters['variance']] = variance
return params
@property
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
# Inherited parameters
param_names = np.array(
markov_switching.MarkovSwitching.param_names.fget(self),
dtype=object)
# Regression coefficients
if np.any(self.switching_coeffs):
for i in range(self.k_regimes):
param_names[self.parameters[i, 'exog']] = [
'%s[%d]' % (exog_name, i) for exog_name in self.exog_names]
else:
param_names[self.parameters['exog']] = self.exog_names
# Variances
if self.switching_variance:
for i in range(self.k_regimes):
param_names[self.parameters[i, 'variance']] = 'sigma2[%d]' % i
else:
param_names[self.parameters['variance']] = 'sigma2'
return param_names.tolist()
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evalation.
"""
# Inherited parameters
constrained = super(MarkovRegression, self).transform_params(
unconstrained)
# Nothing to do for regression coefficients
constrained[self.parameters['exog']] = (
unconstrained[self.parameters['exog']])
# Force variances to be positive
constrained[self.parameters['variance']] = (
unconstrained[self.parameters['variance']]**2)
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evalution, to be
transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
# Inherited parameters
unconstrained = super(MarkovRegression, self).untransform_params(
constrained)
# Nothing to do for regression coefficients
unconstrained[self.parameters['exog']] = (
constrained[self.parameters['exog']])
# Force variances to be positive
unconstrained[self.parameters['variance']] = (
constrained[self.parameters['variance']]**0.5)
return unconstrained
class MarkovRegressionResults(markov_switching.MarkovSwitchingResults):
r"""
Class to hold results from fitting a Markov switching regression model
Parameters
----------
model : MarkovRegression instance
The fitted model instance
params : array
Fitted parameters
filter_results : HamiltonFilterResults or KimSmootherResults instance
The underlying filter and, optionally, smoother output
cov_type : string
The type of covariance matrix estimator to use. Can be one of 'approx',
'opg', 'robust', or 'none'.
Attributes
----------
model : Model instance
A reference to the model that was fit.
filter_results : HamiltonFilterResults or KimSmootherResults instance
The underlying filter and, optionally, smoother output
nobs : float
The number of observations used to fit the model.
params : array
The parameters of the model.
scale : float
This is currently set to 1.0 and not used by the model or its results.
"""
pass
class MarkovRegressionResultsWrapper(
markov_switching.MarkovSwitchingResultsWrapper):
pass
wrap.populate_wrapper(MarkovRegressionResultsWrapper, MarkovRegressionResults)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
4da9236790e55b245e9e279660657cfffce90b75
|
f10042f062537becffa4de4286eac5945461bf10
|
/python-tests/img-test.py
|
4b4325e0314867c2e73a48053f6da632ae49533c
|
[] |
no_license
|
major-phyo-san/raw-codes
|
38bacba381f1d8f3e5e84c2b734a849b34806044
|
34c7b9acd7020a8363826f75d336bc09e413fc36
|
refs/heads/master
| 2022-03-09T07:56:21.794703
| 2022-02-25T04:45:20
| 2022-02-25T04:45:20
| 207,994,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
import numpy as np
import cv2
import imutils
imagePaths = ["images/m2.JPG","images/m1.JPG","images/m3.JPG"]
images = []
i=1
for imagePath in imagePaths:
images.append(cv2.imread(imagePath))
cv2.imshow("image"+str(i),images[i-1])
cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()
i = i+1
stitcher = cv2.createStitcher(try_use_gpu=False)
(status, stitched) = stitcher.stitch(images)
if status == 0:
stitched = cv2.copyMakeBorder(stitched, 10,10,10,10,
cv2.BORDER_CONSTANT, (0,0,0))
gray = cv2.cvtColor(stitched, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
mask = np.zeros(thresh.shape, dtype="uint8")
(x,y,w,h) = cv2.boundingRect(c)
cv2.rectangle(mask,(x,y),(x+w,y+h),255,-1)
minRect = mask.copy()
sub = mask.copy()
while cv2.countNonZero(sub) > 0:
minRect = cv2.erode(minRect, None)
sub = cv2.subtract(minRect, thresh)
cnts = cv2.findContours(minRect.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
(x,y,w,h) = cv2.boundingRect(c)
stitched = stitched[y:y + h, x:x + w]
cv2.imshow("Pano Img", stitched)
cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()
|
[
"phyojupiter9@gmail.com"
] |
phyojupiter9@gmail.com
|
9a7b7ade617d5b7c6f80049ccb17d17674ca29ef
|
c662c18340914edbb099dd7412ea1de1dedff730
|
/setup.py
|
5ce9f1b7c35cc7879b9b45c8db3551ab550f49cb
|
[
"MIT"
] |
permissive
|
snaqvi1990/sircel
|
5e01efd2aada04c047730eedc58fc5fc63af8088
|
fa1cc4050af5406f0d6e5902700db7a03d842664
|
refs/heads/master
| 2020-12-02T18:03:18.819512
| 2017-05-09T18:57:58
| 2017-05-09T18:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,279
|
py
|
"""
"""
import shlex
import sys
import os
from setuptools import setup
params = {}
args = shlex.split(' '.join(sys.argv))
if('--kallisto' in args):
index = args.index('--kallisto')
assert(index < len(args)), \
'--kallisto option requires a path'
kallisto_path = args[index + 1]
assert os.path.exists(kallisto_path), \
'kallisto path is invalid.\n%s' % kallisto_path
params['kallisto'] = kallisto_path
sys.argv.remove('--kallisto')
sys.argv.remove(kallisto_path)
else:
params['kallisto'] = None
if('--osx' in args):
params['zcat'] = 'gzcat' #zcat function is broken on mac
sys.argv.remove('--osx')
else:
params['zcat'] = 'zcat'
setup(name='sircel',
version='0.1',
description='Identify and error correct barcodes for single-cell genomics',
url='https://github.com/pachterlab/Sircel',
author='Akshay Tambe',
author_email='akshay.tambe@berkeley.edu',
license='MIT',
packages=['sircel'],
py_modules=['numpy', 'scipy', 'sklearn', 'redis'])
"""
prepare params.json
"""
import json
current_path = os.path.dirname(os.path.abspath(__file__))
params['sircel'] = current_path + '/sircel/Sircel_master.py'
with open('./sircel/params.json', 'w') as writer:
writer.write(json.dumps(params, indent = 3))
|
[
"akshay.tambe@berkeley.edu"
] |
akshay.tambe@berkeley.edu
|
d0eb44f47aea9e440d8ce9e2190b0d49f9f3822d
|
94b101b38acb682422b8e26ff09527e1102e6524
|
/project/users/views.py
|
4ae6702c4b12933ac5fa836b8207dbb98b6bbb8b
|
[] |
no_license
|
mjoze/Web-App
|
f0ff12118510cb5bfa6d4ff5541194b184848c41
|
8f5c237231d35d87a77cf9dffa7261c19f81dec7
|
refs/heads/master
| 2020-12-23T02:47:06.241269
| 2020-03-07T14:34:54
| 2020-03-07T14:34:54
| 237,010,747
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(
request, f'Your account has been created! You are now able to log in.')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(
request, f'Your account has been updated')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context)
|
[
"mirek.jozefiak@gmail.com"
] |
mirek.jozefiak@gmail.com
|
78b373ee16f0efc70102408817bb21f313d8525e
|
fdcbf5b203f07cceefbb38a746f4a43b322e263e
|
/Python/findNeighbors_of_Nulls.py
|
52f8694848396c9527b570609bc2724e421599bd
|
[] |
no_license
|
Masoumeh/0390.IbnAhmadMuqaddasi.AhsanTaqasim
|
e7a3eddc895edb79f8d93c1bd0f09f130a761858
|
592720e5a154fcfe9cdab84b16eaf5574f30b806
|
refs/heads/master
| 2021-01-18T00:36:09.962622
| 2017-11-07T13:13:46
| 2017-11-07T13:13:46
| 45,922,253
| 0
| 0
| null | 2015-11-10T15:49:02
| 2015-11-10T15:49:02
| null |
UTF-8
|
Python
| false
| false
| 1,682
|
py
|
"""
To get some information from the route network graph, like how far are the first two neighbours (with coords) of a node (without coords)
"""
from networkx.readwrite import json_graph
import io, json, csv
import re
import networkx as nx
import sys
import operator
import compose_graphs as cg
def findNeighbors_of_Nulls(G, writer):
#G = nx.Graph()
#G = cg.composeGraphs(textRoutes, cornuRoutes, cornuPlaces)
'''with open(fileName, 'r') as meterFile:
distReader = csv.reader(meterFile, delimiter=',')
next(distReader, None)
for row in distReader:
G.add_node(row[0], lat=row[1], lng=row[2])
G.add_node(row[3], lat=row[4], lng=row[5])
G.add_edge(row[0],row[3], length= row[-1])'''
coord_neighbors = {}
nulls = [n for n in G.nodes() if G.node[n]['lat'] == "null" and G.node[n]['lng'] == "null"]
print(len(nulls))
for node in nulls:
length = nx.single_source_shortest_path_length(G,node)
sorted_length = sorted(length.items(), key=operator.itemgetter(1))
neighCoords = []
# exclude the firs item of list from the loop which is the node itself with the distance of zero from the node! i.e. ('node',0)
for l in sorted_length[1:]:
# check the distance of node from the neigbor and if the neighbor has coordinate
if l[1] == 1 and G.node[l[0]]['lat'] != "null" and G.node[l[0]]['lng'] != "null":
# add the neighbor to array
neighCoords.append( [l[0],l[1]])
# limit the neighbors to two to have at leat two neighbours with
if len(neighCoords) >= 2:
break
if len(neighCoords) == 2:
writer.writerow([node,neighCoords])
|
[
"m.seydi@uni-leipzig.de"
] |
m.seydi@uni-leipzig.de
|
a6fccf16a0bd1c5d62dc11cf686acd57c59da533
|
f4352d9564709d2904041bd08a62ca3bc254bd94
|
/csv_data_reader.py
|
e4d69fd637aa09d46730a8dcf6fd77a004a08d57
|
[] |
no_license
|
akeele/TreidiSim
|
b93d1559fe62697b6e45969cea93c1f1751585c3
|
42c414bff18e999ce12bed466844c1a0fa18e76c
|
refs/heads/master
| 2022-11-05T07:26:58.534621
| 2019-10-14T10:45:27
| 2019-10-14T10:45:27
| 213,561,215
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,300
|
py
|
import pandas
import os
from asset_finder import AssetFinder
class NasdaqOMXCsvReader:
NASDAQ_OMX_CSV_DIRECTORY = "data/nasdaq-omx-csv"
DOHLCV_COLUMNS = ["Date", "Opening price", "High price", "Low price", "Closing price", "Total volume"]
DOHLCV_MAPPER = {"Date": "date",
"Opening price": "open",
"High price": "high",
"Low price": "low",
"Closing price": "close",
"Total volume": "volume"}
def __init__(self, assets=None, all_assets=False):
self.all_assets = all_assets
if self.all_assets == True:
self.assets_csv_files = self._get_all_csv_files()
else:
self.assets = assets
self.assets_csv_files = AssetFinder(self.assets).find_assets_csv_files(self.NASDAQ_OMX_CSV_DIRECTORY)
def _get_all_csv_files(self):
csv_files = os.listdir(self.NASDAQ_OMX_CSV_DIRECTORY)
tickers = [filename.split(".")[0] for filename in csv_files]
csv_files = [os.path.join(self.NASDAQ_OMX_CSV_DIRECTORY, filename) for filename in csv_files]
assets_csv_files = dict(zip(tickers, csv_files))
return assets_csv_files
def _read_to_pandas_dataframe(self, csv_file):
data = pandas.read_csv(csv_file, sep=';', header=1, decimal=',')
# Drop last column, because it is empty
data.drop(data.columns[len(data.columns)-1], axis=1, inplace=True)
return data
def _get_DOHLCV_bars(self, asset_dataframe):
dohlcv_bars = asset_dataframe[self.DOHLCV_COLUMNS]
return dohlcv_bars
def get_assets_bars(self):
assets_bars = {}
for ticker, csv_file in self.assets_csv_files.items():
asset_dataframe = self._read_to_pandas_dataframe(csv_file)
asset_bars = self._get_DOHLCV_bars(asset_dataframe)
# Rename the bars to be consistent with everything else
asset_bars = asset_bars.rename(columns=self.DOHLCV_MAPPER)
# Sort by ascending date
asset_bars['date'] = pandas.to_datetime(asset_bars['date'])
asset_bars = asset_bars.sort_values(by='date')
assets_bars[ticker] = asset_bars
return assets_bars
|
[
"aksu.suunta@gmail.com"
] |
aksu.suunta@gmail.com
|
1d8762c60b7af569450421e970799689990cf863
|
69a8a88c99f5c401b188ce7637174c19a3ed48d8
|
/0x0A-python-inheritance/10-square.py
|
9f90ed3be2ee071cbcc079312aa9f6543eda60d0
|
[] |
no_license
|
JDorangetree/holbertonschool-higher_level_programming
|
0546b25726052a8ce6468781f933eb28d1aee30d
|
f984f5047f690d352c7f203ef16aa7f0cc49afcd
|
refs/heads/master
| 2020-09-29T01:22:22.387395
| 2020-05-16T23:35:12
| 2020-05-16T23:35:12
| 226,912,872
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 495
|
py
|
#!/usr/bin/python3
""" Class that inherits from Rectangle """
Rectangle = __import__('9-rectangle').Rectangle
class Square(Rectangle):
""" Class that defines a Square by inheritance of Rectangle class """
def __init__(self, size):
""" Constructor """
self.integer_validator("size", size)
self.__size = size
super().__init__(size, size)
def area(self):
""" Area method"""
My_area = self.__size * self.__size
return My_area
|
[
"julian.naranjo2014@gmail.com"
] |
julian.naranjo2014@gmail.com
|
9377eeb7e95dc44c45eccf96248f42fb4f744035
|
7f585fdad9cf95714e492b5a4328f9603cf38459
|
/src/decorator-sample.py
|
ff3665bd745d010906214afc3a68e6dda145e662
|
[
"MIT"
] |
permissive
|
skitazaki/python-school-ja
|
2a4da0b4eb130920333d4530e3223217f152ba6c
|
f8f2250d48d734588469806165d24c3e1a9e6765
|
refs/heads/develop
| 2021-08-10T11:41:39.342364
| 2020-03-28T08:38:08
| 2020-03-28T08:38:08
| 2,898,972
| 1
| 0
|
MIT
| 2021-06-10T22:38:31
| 2011-12-02T14:32:48
|
Python
|
UTF-8
|
Python
| false
| false
| 353
|
py
|
class A(object):
@classmethod
def name(cls, msg):
print("Greetings from classmethod.")
cls.hello(msg)
class B(A):
@staticmethod
def hello(msg):
print("Hello", msg, " from Class-B")
class C(A):
@staticmethod
def hello(msg):
print("Hello", msg, " from Class-C")
B.name("Alice")
C.name("Bob")
|
[
"skitazaki@gmail.com"
] |
skitazaki@gmail.com
|
b09326d6b7ef11fa7afb566ee1ae1016ff118aae
|
e6ad1555cb6e2e8b1a3c40e7bf8985d5b62aca9a
|
/aritmetic_funct.py
|
d9aaa4ef53e69f1457f5f658ee3fc352cc152a09
|
[] |
no_license
|
denb11/def_add_HW
|
dc83f0c945ebe874f280bb5f0a3b5ccdf85b44be
|
41fb988e5483555cf515ade99a116ecf5a0c25a9
|
refs/heads/master
| 2022-11-27T19:05:51.754427
| 2020-07-30T19:27:42
| 2020-07-30T19:27:42
| 283,859,707
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
def add(a, b ):
if type(a) == int and type(b) == int:
result = a + b
print(result)
else:
print("valorile transmise nu corespund tipului")
add(10, 20)
|
[
"noreply@github.com"
] |
noreply@github.com
|
3ab4c489a09acde84ec736017a06f464cf066447
|
027698c1805955cb7222f682a0b3939e0f8405a1
|
/functions/problem_set2-2.py
|
30cdc4ae143ca44118b3e1628f1daaf91339107a
|
[] |
no_license
|
mrbartrns/introducing-to-CS
|
e0c114ce175169d6750bdee9fd8ddf3ad264f18e
|
2fceef111ebed0ee0e8266997973dd410f45e7fa
|
refs/heads/master
| 2022-12-07T23:46:33.329046
| 2020-08-30T14:39:04
| 2020-08-30T14:39:04
| 280,989,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
def solve(balance, annualInterestRate):
minimumFixedMonthlyPayment = 10
original = balance
monthlyInterestRate = annualInterestRate / 12.0
monthlyUnpaidBalance = balance - minimumFixedMonthlyPayment
while balance > 0:
for i in range(12):
monthlyUnpaidBalance = balance - minimumFixedMonthlyPayment
balance = monthlyUnpaidBalance * (1 + monthlyInterestRate)
i += 1
if balance > 0:
balance = original
minimumFixedMonthlyPayment += 10
print("Lowest Payment:", minimumFixedMonthlyPayment)
solve(3330, 0.2)
|
[
"mrbartrns@naver.com"
] |
mrbartrns@naver.com
|
bbef2beee7c94d588e9831ccbb760157f2f2e422
|
6915d6a20d82ecf2a2a3d3cd84ca22dab2491004
|
/advtempproject/advtempproject/wsgi.py
|
507d246211545d55217dfb1767569eb090224823
|
[] |
no_license
|
iitian-gopu/django
|
bb4302d101f4434fb61ab374807e29699a432e42
|
31db982212bbb453cc4c56c7f5cfad9a00cd231d
|
refs/heads/master
| 2023-05-14T07:22:35.176477
| 2021-06-04T04:43:26
| 2021-06-04T04:43:26
| 366,114,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
"""
WSGI config for advtempproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "advtempproject.settings")
application = get_wsgi_application()
|
[
"gopalkrishujaiswal2030@gmail.com"
] |
gopalkrishujaiswal2030@gmail.com
|
7cde29cf536fc56e7ac966f0d5cb9dcfc8f92d08
|
64d887f9a43d627696443a0c82233297012cba23
|
/geturls.py
|
872dcfc67fcc115fc4199d659b7d758e4ffcabf4
|
[] |
no_license
|
MatthewSchwartz6/PythonScripts
|
7c09ecbd00a795f71dbdced7e76c93a18f511f75
|
1d78d5b21e5222d3e30e4bbcdfdf54489e3f1112
|
refs/heads/master
| 2021-04-25T14:03:52.398187
| 2018-01-27T00:58:05
| 2018-01-27T00:58:05
| 110,057,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
#!/home/alive/projects/venv/geturls/bin/python
from sys import argv
from bs4 import BeautifulSoup
import requests
def geturls():
url = argv[1]
r = requests.get(url,{'User-agent':'mybot'})
soup = BeautifulSoup(r.text,'html.parser')
a = soup.find_all("a",{"href":True})
for v in a:
print v["href"]
if '__name__' == '__main__':
geturls()
|
[
"noreply@github.com"
] |
noreply@github.com
|
2f4164ef4372fc6478789fc37f7c1f66296b61a9
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/0/az1.py
|
2674ec878632dbc448cc05438068f00a33a83305
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'aZ1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
102056145a28eec5b448b8975f4633f44a628b6a
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/synthetic/rttoaobj.py
|
f85a0bd999b0746da1b151ecd36cc2f7a907ac50
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,526
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtToAObj(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.synthetic.RtToAObj", "cobra.model.synthetic.SwCTestObj")
meta.moClassName = "syntheticRtToAObj"
meta.rnFormat = "rttoAObj"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Synthetic Sw C Test Object"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.synthetic.IfcCTestObj")
meta.parentClasses.add("cobra.model.synthetic.IfcTLTestObj")
meta.parentClasses.add("cobra.model.synthetic.SwCTestObj")
meta.parentClasses.add("cobra.model.synthetic.SwTLTestObj")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rttoAObj', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 20610, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4272
prop.defaultValueStr = "syntheticSwCTestObj"
prop._addConstant("syntheticSwCTestObj", None, 4272)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
f166a652d5b47979391790f851fe8cc5e2d1d43d
|
84c0585118043fb0413bf3cc944d46f3d80acc1b
|
/account/models.py
|
2b8bc8d2006e88a0cb96fc7464ee030e7e79b7e4
|
[] |
no_license
|
CheStix/SocNet
|
77c75ec5f90c084e31204c89c45a1bcd17aef6d6
|
ff6f9299ddff6c0f790a622a428abb63cfda561f
|
refs/heads/master
| 2020-11-29T10:45:09.827530
| 2020-02-14T08:49:50
| 2020-02-14T08:49:50
| 230,094,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
date_of_birth = models.DateField(blank=True, null=True)
photo = models.ImageField(upload_to='users/%Y/%m/%d', blank=True, default='users/profile_default_photo.png')
def __str__(self):
return f'Profile for user {self.user.username}'
class Contact(models.Model):
user_from = models.ForeignKey('auth.User', related_name='rel_from_set', on_delete=models.CASCADE)
user_to = models.ForeignKey('auth.User', related_name='rel_to_set', on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
sorted('-created',)
def __str__(self):
return f'{self.user_from} follows {self.user_to}'
# Add following field to User dynamically
User.add_to_class('following',
models.ManyToManyField('self', through=Contact, related_name='followers', symmetrical=False))
|
[
"che.eldar@gmail.com"
] |
che.eldar@gmail.com
|
31eef008f4bdd94eca8da480dc892e5c688247db
|
83eeeaca79ed0c22a01cb9024182a34f49a59d44
|
/src/3Sum/solution.py
|
24be511f4a323e91d4b5bc7994d5f17720781071
|
[] |
no_license
|
ragingCow/leetcode
|
7ec3d14e0ea3e2ad14dcbed16bd34afe14e8d531
|
c5d4ae54560cf98e3b5361a1cbd59687a0831301
|
refs/heads/master
| 2020-05-18T17:45:42.073427
| 2015-10-19T13:05:19
| 2015-10-19T13:05:19
| 39,693,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums = sorted(nums)
result = []
for i in xrange(len(nums)):
if nums[i] > 0 :
break
for j in xrange(i + 1, len(nums)):
ret = self.binarySearch(nums, j + 1, len(nums), 0 - nums[i] - nums[j])
if ret >= 0:
result.append((nums[i], nums[j], 0 - nums[i] - nums[j]))
return list(set(result))
def binarySearch(self, nums, start, end,value):
left = start
right = end - 1
while left <= right:
middle = (left + right) / 2
if nums[middle] == value:
return middle
elif nums[middle] > value:
right = middle - 1
else:
left = middle + 1
return -1
|
[
"tianbing02@meituan.com"
] |
tianbing02@meituan.com
|
2d2c98d834f20d62c65fc0e05d928154cccd5843
|
0fdb38aee19ca7075af7fdc881234712fdfc50d4
|
/myMatplotlib_dataLabelInsidePieChart.py
|
7dcd1c3d04319ee7bee342c1b8c204d0ef211343
|
[] |
no_license
|
jrahman1988/PythonSandbox
|
3798e7a6fd213c0423da5ca61440caecb8021dd6
|
70c8a2eca50393c9219828049f48102cc7dd1b35
|
refs/heads/master
| 2023-04-10T21:02:59.608414
| 2021-04-21T06:11:55
| 2021-04-21T06:11:55
| 247,202,348
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
'''
Using Matplotlib module of Python, draw a scatter plot
'''
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
# Creating dataset
cars = ['AUDI', 'BMW', 'FORD',
'TESLA', 'JAGUAR', 'MERCEDES']
data = [23, 17, 35, 29, 12, 41]
# Creating explode data
explode = (0.1, 0.0, 0.2, 0.3, 0.0, 0.0)
# Creating color parameters
colors = ( "orange", "cyan", "brown",
"grey", "indigo", "beige")
# Wedge properties
wp = { 'linewidth' : 1, 'edgecolor' : "green" }
# Creating autocpt arguments
def func(pct, allvalues):
absolute = int(pct / 100.*np.sum(allvalues))
return "{:.1f}%\n({:d} g)".format(pct, absolute)
# Creating plot
fig, ax = plt.subplots(figsize =(10, 7))
wedges, texts, autotexts = ax.pie(data,
autopct = lambda pct: func(pct, data),
explode = explode,
labels = cars,
shadow = True,
colors = colors,
startangle = 90,
wedgeprops = wp,
textprops = dict(color ="magenta"))
# Adding legend
ax.legend(wedges, cars,
title ="Cars",
loc ="center left",
bbox_to_anchor =(1, 0, 0.5, 1))
plt.setp(autotexts, size = 8, weight ="bold")
ax.set_title("Customizing pie chart")
# show plot
plt.show()
|
[
"jamilur_rahman@yahoo.com"
] |
jamilur_rahman@yahoo.com
|
e461ffbb5c1ec33bb622eb073345d55ace5d9fc8
|
3e18d5f7d901a44e5c7c8e4961c3123e4b1addcc
|
/Scrapy/venv/lib/python3.7/sre_compile.py
|
2c93afc6b12a0c9e856469ce8d7bf5708dcc0aeb
|
[] |
no_license
|
arunabhthakur94/codesnippets
|
e2094bdb0b30eea836e18b5fb49db426a51d1296
|
c92444b51a920a32a4d90234e9b28cdd10ce8154
|
refs/heads/master
| 2020-12-04T09:40:54.337011
| 2020-01-11T16:45:44
| 2020-01-11T16:45:44
| 231,711,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 64
|
py
|
/home/arunabh/.pyenv/versions/3.7.0/lib/python3.7/sre_compile.py
|
[
"arunabh.thakur94@gmail.com"
] |
arunabh.thakur94@gmail.com
|
660e7e667701cae776fc58821918e1e9bd459b7a
|
cfd7eae9add9c9991d055514f3ee90a1914b8c5f
|
/main.py
|
3aaf48c5fcaf5681805c41fe948c93d7761fa8c0
|
[] |
no_license
|
KU-AI-Club/EXPO_2018
|
5f0234e5e67857d73bf31dd5f896963bd1433136
|
dd59bece17c78536ed881999a5f8d99d67f96d09
|
refs/heads/master
| 2021-01-25T10:39:06.547761
| 2018-03-03T18:32:18
| 2018-03-03T18:32:18
| 123,366,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,335
|
py
|
import gym
import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import tensorflow as tf
print('cool ~(<.<)~')
def pre_proc(I):
I = I[35:195]
I = I[::2,::2,0]
I[I == 144] = 0
I[I == 109] = 0
I[I != 0] = 1
return I.astype(np.float).ravel()
def append_to_file(obs):
with open("test1.txt", "a") as myfile:
for i in range(len(obs)):
if i == len(obs)-1:
myfile.write(str(obs[i]))
else:
myfile.write(str(obs[i])+" ")
myfile.write('\n')
env_name1 = 'Breakout-v0'
env_name2 = 'CartPole-v0'
env_name3 = 'Pong-v0'
env_name4 = 'Phoenix-v0'
env_name5 = 'Assault-v0'
env = gym.make(env_name5)
observation = env.reset()
prev_img = pre_proc(observation)
img_shape = np.shape(prev_img)
img = [[0.0 for i in range(80)]for j in range(80)]
for i in range(5):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
cur_img = pre_proc(obs)
img= np.subtract(cur_img, prev_img)
prev_img = cur_img
plt.imshow(img.reshape(80,80),cmap='gray',aspect='auto',animated=True)
plt.show()
#print(np.shape(img))
#img = img.reshape(8,10)
#print(np.shape(img))
'''
Step 1) Create Placeholders
Step 2) Create Variables
Step 3) Create Graph operations
Step 4) Create Loss Function
Step 5) Create Optimizer
Step 6) Initialize variables and create Session
Step 7) Evaluate the model
'''
batch_size = 1
num_classes = 3
num_steps = 2000
num_layers = 1
nodes_per_lay = 10
epochs = 5
#open AI variables
num_inputs = 4
num_outputs = 1
step_limit = 500
avg_steps = []
def add_layers(nodes_per_lay,num_lay,lay_1):
w = tf.Variable(tf.random_uniform([nodes_per_lay,nodes_per_lay]))
b = tf.Variable(tf.random_uniform([nodes_per_lay]))
y = tf.nn.relu(tf.matmul(lay_1,w)+b)
if num_lay == 0:
return y
else:
return add_layers(nodes_per_lay,num_lay-1,y)
batch_size = 100
num_classes = 10
num_steps = 2000
num_layers = 2
nodes_per_lay = 10
num_inputs = 4
epochs = 20
#Step 1) Create Placeholders
y_sen = tf.placeholder(tf.float32)
x = tf.placeholder(tf.float32,shape=[batch_size,num_inputs])
y_true = tf.placeholder(tf.float32,[batch_size,num_classes])
#Step 2) Create Variables
W_in = tf.Variable(tf.truncated_normal([num_inputs,nodes_per_lay],stddev=.1))
b_in = tf.Variable(tf.truncated_normal([nodes_per_lay],stddev=.1))
W_out = tf.Variable(tf.truncated_normal([nodes_per_lay,num_classes],stddev=.1))
b_out = tf.Variable(tf.truncated_normal([num_classes],stddev=.1))
#Step 3) Create Graph
y_in = tf.nn.relu(tf.matmul(x,W_in) + b_in)
y_hid = add_layers(nodes_per_lay,num_layers,y_in)
y = tf.matmul(y_hid,W_out) + b_out
#Step 4) Loss Function
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y))
#Step 5) Create optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate=.03).minimize(cost)
'''
#Step 6) Create Session
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for ep in range(epochs):
for steps in range(num_steps):
batch_x,batch_y = mnist.train.next_batch(100)
sess.run(optimizer,feed_dict={x:batch_x,y_true:batch_y})
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_true,1))
acc = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
print(sess.run(acc,feed_dict={x:mnist.test.images,y_true:mnist.test.labels}))
'''
|
[
"ravenengineerryan@gmail.com"
] |
ravenengineerryan@gmail.com
|
9e9b7c87c900888217a6f692daed3148757193f4
|
e9f2703cdc62322cb62bbfb40f1e2cbf1a9d4896
|
/otros/principiante.py
|
c7079d11799122997760619614389521798832e9
|
[] |
no_license
|
ileinOriana/automation
|
644ff627d271eee6567dfc77bb243a6a1b615140
|
644235a182c3d3f9493ef75303c0559dbc2063cf
|
refs/heads/master
| 2023-05-07T21:25:48.478488
| 2020-08-17T16:42:50
| 2020-08-17T16:42:50
| 275,272,036
| 0
| 0
| null | 2021-06-02T02:26:13
| 2020-06-27T00:29:32
|
Python
|
UTF-8
|
Python
| false
| false
| 612
|
py
|
import random
#Tirar un dado de 5 valores
#Si es 1, gana una flor.
#Si es 2, gana un tatuaje.
#Si es 3, gana un libro.
#Si es 4, gana un viaje.
#Si es 5, no gana nada.
def lanzar_dados():
value = random.randint(1, 5)
return value
def descifrar_premio(num):
if num == 1:
print('ganaste una flor')
elif num == 2:
print('ganaste un tatuaje')
elif num == 3:
print('ganaste un libro')
elif num == 4:
print('ganaste un viaje')
else:
print('no ganaste nada')
dado = lanzar_dados()
print('tu dado es ', dado)
descifrar_premio(dado)
|
[
"ileinoriana@gmail.com"
] |
ileinoriana@gmail.com
|
89253de93c1f759ccafbc17016927757d6aa8dc8
|
6be61b1e55ef585ddd3bacbea70ed96b054cd9b7
|
/设计模式/结构类模式/代理模式.py
|
714f278ba9b883e39b41d7b5fbcc6a3c9d2884b5
|
[
"MIT"
] |
permissive
|
zchq88/mylearning
|
4a922c0337a840ea7cacdc3998907bfe4836ecd8
|
4b6bf343b40940f6c03321dcd7f075923853ce7c
|
refs/heads/master
| 2021-04-26T23:32:57.862368
| 2018-05-04T02:37:32
| 2018-05-04T02:37:32
| 124,012,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,836
|
py
|
# 为其他对象提供一种代理以控制对这个对象的访问。
class GamePlayer:
name = ""
level = 0
_Prox = None # 强制代理
def __init__(self, _name):
self.name = _name
def login(self, user):
print("登录名为" + user + "登录" + self.name)
def killBoss(self):
print(self.name + "打怪")
def upgrade(self):
if (self.isProx()): # 强制代理
self.level += 1
print(self.name + "升级" + str(self.level))
else:
print(self.name + "请使用代理升级")
def isProx(self):
return not self._Prox == None
# 普通代理
class GamePlayerProxy(GamePlayer):
_gamePlayer = None
def __init__(self, Object):
self._gamePlayer = Object
def login(self, user):
self._gamePlayer.login(user)
def killBoss(self):
print("代练代理:", end='')
self._gamePlayer.killBoss()
def upgrade(self):
self._gamePlayer._Prox = self # 强制代理
self._gamePlayer.upgrade()
self._gamePlayer._Prox = None # 强制代理
# 动态代理
class DynamicProxy:
_obj = None
def __init__(self, Object):
self._obj = Object
for fun in dir(Object):
if "__" not in fun:
_attr = getattr(Object, fun)
if fun == "killBoss":
def function():
print("代练动态代理:", end='')
getattr(self._obj, "killBoss")()
_newattr = function
elif fun == "upgrade":
def function():
self._obj._Prox = self # 强制代理
getattr(self._obj, "upgrade")()
self._obj._Prox = None # 强制代理
_newattr = function
else:
_newattr = _attr
setattr(self, fun, _newattr)
if __name__ == "__main__":
def run(object, _name):
object.login(_name)
object.killBoss()
object.upgrade()
print("------------------" + str(id(object)))
player = GamePlayer("玩家1")
run(player, "A")
playerproxy = GamePlayerProxy(player)
run(playerproxy, "B")
dynamicproxy = DynamicProxy(player)
run(dynamicproxy, "C")
run(player, "A")
# 要点1:虚拟代理,在真实使用时实例化
# 要点2:业务逻辑,不需要关心非本职的工作,通过代理解决预处理和后期处理
# 要点3:代理后期扩展性高
# 要点4:智能化,可使用动态代理,代理不同类型的相同业务逻辑完成审计类需求
# 要点5:强制代理,某些业务逻辑使用强制代理,约束调用代理
# 要点6:主要解决面向切面编程
|
[
"zchq88@aliyun.com"
] |
zchq88@aliyun.com
|
78b7f6cd4a781062166ec98bef378eb05de88bdb
|
67bdc707db11c32a06344b180eb2ab0165161be7
|
/random_forest/timing.py
|
0e0bacddd9bb34f75211464560c835f1b3c90a5e
|
[] |
no_license
|
leoneong/fyp
|
1f754ef7d6639281cf591a5c3ebc60654ce884b2
|
84aaaf5b186f025fd9697823d4c8b42ffa62e200
|
refs/heads/master
| 2023-03-10T01:17:39.406475
| 2020-10-10T04:03:28
| 2020-10-10T04:03:28
| 258,401,222
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
import atexit
from time import clock
from _functools import reduce
def secondsToStr(t):
return "%d:%02d:%02d.%03d" % \
reduce(lambda ll,b : divmod(ll[0],b) + ll[1:],
[(t*1000,),1000,60,60])
line = "="*40
def log(s, elapsed=None):
print (line)
print (secondsToStr(clock()), '-', s)
if elapsed:
print ("Elapsed time:", elapsed)
print (line)
print
def endlog():
end = clock()
elapsed = end-start
log("End Program", secondsToStr(elapsed))
def now():
return secondsToStr(clock())
start = clock()
atexit.register(endlog)
log("Start Program")
|
[
"hippoleone@outlook.com"
] |
hippoleone@outlook.com
|
e3be99e1c6547843b797fea330aa576499260d31
|
99a4e7a4db3a3e062c0b08a5462749a28f3f7a39
|
/core/utils/make_joint_dataset.py
|
592af25331103bb288cfcb090d2dcd893614f3bb
|
[] |
no_license
|
B-Step62/pytorch-motiongan-open
|
f85c1481363230826e9094e1c323ad90f0922744
|
4aefe2c427b88f357e8894d309ff46602e109001
|
refs/heads/master
| 2021-03-20T23:22:49.591472
| 2020-03-15T10:34:54
| 2020-03-15T10:34:54
| 247,241,734
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,651
|
py
|
import os
import sys
import math
import subprocess
import cv2
from collections import OrderedDict
import numpy as np
import core.utils.bvh_to_joint as btoj
BVH_ROOT = './data/bvh/Edi_Mocap_Data/Iwan_style_data'
OUT = './data/bvh/Edi_Mocap_Data/Iwan_style_data'
def main():
# Copy all original bvh file
root_depth = BVH_ROOT.count(os.path.sep)
bvh_paths = []
out_dir = OUT
for (root, dirs, files) in os.walk(BVH_ROOT):
for origin_file in files:
if not origin_file.endswith('.bvh'):
continue
# Output path is 'out' + ('origin_path' - 'root')
if BVH_ROOT != OUT:
post = root.split(os.path.sep)[root_depth:]
out_dir = OUT + ''.join([os.path.sep + p for p in post])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# If save to different directory, copy original bvh
shutil.copy(os.path.join(root, origin_file), os.path.join(out_dir, origin_file))
bvh_paths.append(os.path.join(out_dir, origin_file))
else:
bvh_paths.append(os.path.join(root, origin_file))
skelton, non_end_bones, joints_to_index, permute_xyz_order = btoj.get_standard_format(bvh_paths[0])
for bvh_path in bvh_paths:
_, non_zero_joint_to_index = btoj.cut_zero_length_bone(skelton, joints_to_index)
format_data = btoj.create_data(bvh_path, skelton, joints_to_index)
npy_path = os.path.splitext(bvh_path)[0] + '.npy'
np.save(npy_path, format_data)
print(npy_path, format_data.shape)
|
[
"bsatbeyp@gmail.com"
] |
bsatbeyp@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.