blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b645ed1a0ad19262304bef16a69381cbb05cbc2c
|
4a211e279ec89239033c5fe2d6d8d3e49b48d369
|
/salvo/src/lib/job_control_loader.py
|
d179d460ec8b996e850b26e0c4f04fbb774d9d79
|
[
"Apache-2.0"
] |
permissive
|
envoyproxy/envoy-perf
|
cfb1e8f7af806600f11ebc235c1a72939420b087
|
d131bc2f1a7f8ae4f640da30fd30c027735d9788
|
refs/heads/main
| 2023-08-31T14:02:50.891888
| 2023-08-24T16:19:26
| 2023-08-24T16:19:26
| 94,845,161
| 109
| 29
|
Apache-2.0
| 2023-08-24T16:19:28
| 2017-06-20T03:20:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,111
|
py
|
"""This object abstracts the loading of json strings into protobuf objects."""
import json
import logging
import yaml
from google.protobuf import json_format
import api.control_pb2 as proto_control
log = logging.getLogger(__name__)
def _load_json_doc(filename: str) -> proto_control.JobControl:
"""Load a disk file as JSON.
This function reads the specified filename and parses the contents
as JSON.
Args:
filename: The file whose contents are to be read as JSON data
Returns:
A JobControl object populated with the contents from the
specified JSON file
"""
contents = None
log.debug(f"Opening JSON file {filename}")
try:
with open(filename, 'r') as json_doc:
contents = json_format.Parse(json_doc.read(), proto_control.JobControl())
except FileNotFoundError as file_not_found:
log.exception(f"Unable to load {filename}: {file_not_found}")
except json_format.Error as json_parse_error:
log.exception(f"Unable to parse JSON contents {filename}: {json_parse_error}")
return contents
def _load_yaml_doc(filename: str) -> proto_control.JobControl:
"""Load a disk file as YAML.
This function reads the specified filename and parses the contents
as YAML.
Args:
filename: The file whose contents are to be read as YAML data
Returns:
A JobControl object populated with the contents from the
specified YAML file
"""
log.debug(f"Opening YAML file {filename}")
contents = None
try:
with open(filename, 'r') as yaml_doc:
contents = yaml.safe_load(yaml_doc.read())
contents = json_format.Parse(json.dumps(contents), proto_control.JobControl())
except FileNotFoundError as file_not_found:
log.exception(f"Unable to load {filename}: {file_not_found}")
except json_format.Error as yaml_parse_error:
log.exception(f"Unable to parse YAML contents {filename}: {yaml_parse_error}")
return contents
def load_control_doc(filename: str) -> proto_control.JobControl:
"""Return a JobControl object from the identified filename.
This function uses the extension of the specified file to read its
contents as YAML or JSON
Args:
filename: The file whose contents are to be read and parsed as
a Job Control object.
Returns:
A JobControl object populated with the contents from the
specified filename
"""
contents = None
# Try loading the contents based on the file extension
if filename.endswith('.json'):
log.debug(f"Loading JSON file {filename}")
return _load_json_doc(filename)
elif filename.endswith('.yaml'):
log.debug(f"Loading YAML file {filename}")
return _load_yaml_doc(filename)
else:
log.debug(f"Auto-detecting contents of {filename}")
# Attempt to autodetect the contents
try:
contents = _load_json_doc(filename)
except json_format.Error:
log.info(f"Parsing {filename} as JSON failed. Trying YAML")
if not contents:
try:
contents = _load_yaml_doc(filename)
except json_format.Error:
log.info(f"Parsing {filename} as YAML failed.")
return contents
|
[
"noreply@github.com"
] |
noreply@github.com
|
7477820069e7127b7679f7bebbb2f0d9efd1638d
|
3c5044c77a6c01e1a70b1722e8a860851056f28c
|
/16-1.py
|
97b5250c34e13ea99567b0f0574dcb5660117bae
|
[] |
no_license
|
MANAkudo/pyhton
|
2f8c10bbf0b98babb5fea2ecdc4c2c430668e6fd
|
90fa56fb44e7e02d05250543375a292dfef28eca
|
refs/heads/master
| 2023-08-03T19:57:48.436313
| 2021-09-22T01:50:32
| 2021-09-22T01:50:32
| 409,027,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 116
|
py
|
f = open("16_1_read.txt",'w')
f.write("1\n")
f.write("2\n")
f.write("3\n")
f.write("4\n")
f.write("5\n")
f.close()
|
[
"ykh2135248@stu.o-hara.ac.jp"
] |
ykh2135248@stu.o-hara.ac.jp
|
f64548cc59fb2b2294373d25879cdab04e508e9f
|
d121775327c0c2e1d7210eab0f52d1818c56aa0c
|
/Wikipedia_Scraper/venv/bin/wheel
|
12e896c57139377e445ecb2d018a31e72715bb96
|
[] |
no_license
|
shmoss/Python-Backend-TownSounds
|
f396d8fbd55b08730286109dc27c1e948a33c9c8
|
ba38bed2894ac45eb344c8fa2a23a49daa6fd3f0
|
refs/heads/master
| 2021-07-15T07:52:05.267561
| 2021-07-08T21:28:37
| 2021-07-08T21:28:37
| 180,048,120
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
#!/Users/starrmoss/PycharmProjects/hi/Wikipedia_Scraper/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"shmoss@wisc.edu"
] |
shmoss@wisc.edu
|
|
572850f5eb177b3a39baee3d35b40e3eda54643a
|
4e879398eaecdc19f056ee538d0732b2e92aa84f
|
/SistemaDiscusiones/urls.py
|
7c2bedce1ec87129887c060f542029d86a8b4848
|
[] |
no_license
|
acamposruiz/localdevask
|
9311566ab2526e2b6966374e43e7d198fe24045a
|
867cfafff33fc214d68c499bd7e97b4f77dcd3b0
|
refs/heads/master
| 2021-01-25T04:01:16.308722
| 2014-04-28T03:53:46
| 2014-04-28T03:53:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'SistemaDiscusiones.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^', include('apps.home.urls', namespace="home")),
url(r'^', include('apps.users.urls', namespace="users")),
# PYTHON SOCIAL AUTH
url('', include('social.apps.django_app.urls', namespace="social")),
url(r'^admin/', include(admin.site.urls)),
)
|
[
"acamposruiz@gmail.com"
] |
acamposruiz@gmail.com
|
e34b387068ca8ec0ce9a89b18f694f3e87b653fb
|
b220bd0c6c7fe6fcea00ac2ae5195c1887b8a37e
|
/database/dbconn.py
|
fa05f16ecbde4cfecc85dfd3b816446e8a13ae57
|
[] |
no_license
|
itwastheband/AO3rdr-backend
|
8f624ddeefbc09995f1784c3092fc1ebcbedbff7
|
19c1ed8ecdeea3250a958006d260207c582cb371
|
refs/heads/master
| 2022-06-17T03:47:45.741986
| 2020-05-03T23:34:14
| 2020-05-03T23:34:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,198
|
py
|
import os
import boto
from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey
from boto.dynamodb2.items import Item
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.table import Table
from boto.dynamodb2.exceptions import ItemNotFound
from decimal import Decimal
from flask import _app_ctx_stack
import time
class DBconn(object):
def __init__(self):
aws_access_key_id = os.environ['S3_KEY'] # I AM OPS U NO GET MY KEYS
aws_secret_access_key = os.environ['S3_SECRET'] # DIS IS MY JOB
self._conn = DynamoDBConnection(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
self.works_table = Table('ao3rdr-works', connection=self._conn)
self.immutable_fields = ['work_id', 'user_id']
def get_user(self, user_id):
res = self.works_table.query_2(
user_id__eq=user_id, work_id__eq='settings', attributes=['user_id'])
out = []
for entry in res:
out.append(self.serialize(entry)['user_id'])
return out
def add_user(self, user_id):
""" Adding a user adds a special "work" which is used to store a user's
settings.
"""
return self.works_table.put_item(data={
'user_id': user_id,
'work_id': 'settings',
'created': time.time()
})
def update_work(self, user_id, work_id, data):
item = self.works_table.get_item(user_id=user_id, work_id=work_id)
# update the item
for key, value in data.iteritems():
if key not in self.immutable_fields:
item[key] = value
item['db_updated'] = time.time()
item.partial_save()
def create_work(self, user_id, work_id, data):
data['user_id'] = user_id
data['work_id'] = work_id
if 'created' not in data:
data['created'] = time.time()
self.works_table.put_item(data)
def batch_update(self, data_list):
with self.works_table.batch_write() as batch:
for data in data_list:
batch.put_item(data=data)
def get_work(self, user_id, work_id):
try:
res = self.works_table.get_item(user_id=user_id, work_id=work_id)
except ItemNotFound:
return {}
return self.serialize(res)
def get_all_works(self, user_id):
res = self.works_table.query_2(user_id__eq=user_id)
for entry in res:
yield self.serialize(entry)
def close(self):
self._conn.close()
def serialize(self, item):
out = serialize(dict(item))
return out
def serialize(item):
if isinstance(item, dict):
out = {}
for k, v in item.items():
out[k] = serialize(v)
elif isinstance(item, set) or isinstance(item, list):
out = []
for i in item:
out.append(serialize(i))
elif isinstance(item, Decimal):
out = float(item)
else:
out = item
return out
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
top = _app_ctx_stack.top
if not hasattr(top, 'db_conn'):
top.__setattr__('db_conn', DBconn())
return top.db_conn
'''
# Tips for working with DynameDB
works_table = Table('ao3rdr-works', connection=conn)
# put_item has param overwrite=False
test_data = {
'user_id': 'testuser',
'work_id': '123456',
'rating': 5
}
works_table.put_item(test_data)
# When using get item, must use both primary and secondary keys
works_table.get_item(user_id='testuser', work_id='123456')
# To get by user, query is OK
res = works_table.query_2(user_id__eq='testuser')
for entry in res:
print entry
# entry useful fields: _data, keys(), and index like a dict, eg entry['work_id']
# Use the secondary index
res = works_table.query_2(rating__eq=5, index='rating-index')
for entry in res:
print entry['work_id']
# get_item(table_name, key, attributes_to_get=None, consistent_read=False, object_hook=None)
# put_item(table_name, item, expected=None, return_values=None, object_hook=None)
'''
|
[
"darthkrallt@gmail.com"
] |
darthkrallt@gmail.com
|
9cc95780a34d3bb2c8acb0cde93d72a744ba1ce1
|
5f596cf8fc95e72caa87fcd51aa2446f9e6fc0d4
|
/tasks.py
|
01c26b63c44c8cec31f1ad19c349b4ea31ffa67d
|
[
"MIT"
] |
permissive
|
jakobzeitler/causalinfo
|
265f34f79a13c6ee9ce1173aae202e960766327f
|
a8e6b6e9dae8dfd4d2e18010908c4905089538a1
|
refs/heads/master
| 2020-03-23T17:24:25.087306
| 2017-01-05T08:07:40
| 2017-01-05T08:07:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,699
|
py
|
# -*- coding: utf-8 -*-
from invoke import task, run
import os
import sys
@task
def test(cover=False):
"""Run tests (use --cover for coverage tests)"""
if cover:
run('py.test --cov-report term-missing --cov=causalinfo tests', pty=True)
else:
run('py.test -v', pty=True)
@task
def clean():
"""Clean all build and cruft files"""
print("Removing python cruft ...")
run("find . -name '*.pyc' -exec rm -f {} +")
run("find . -name '*.pyo' -exec rm -f {} +")
run("find . -name '*~' -exec rm -f {} +")
run("find . -name '__pycache__' -exec rm -fr {} +")
print("Removing build ...")
run("rm -rf build")
run("rm -rf dist")
run("rm -rf *.egg-info")
print("Removing IPython Notebook checkpoints...")
run("find . -name '__pynb_checkpoints__' -exec rm -fr {} +")
print("Removing generated html ...")
run("rm -f README.html")
@task
def build():
"""Build the distribution"""
print("Building sdist ...")
run('python setup.py sdist', hide='out')
print("Building bdist_wheel ...")
run('python setup.py bdist_wheel', hide='out')
@task
def publish(release=False):
"""Publish to the cheeseshop."""
if release:
run('python setup.py register')
run('twine upload dist/*.tar.gz')
run('twine upload dist/*.whl')
else:
run('python setup.py -r test register')
run('twine upload -r test dist/*.tar.gz')
run('twine upload -r test dist/*.whl')
@task
def readme(browse=True):
run('rst2html.py README.rst > README.html')
if browse:
run('open README.html')
@task
def notebook():
from IPython.terminal.ipapp import launch_new_instance
from socket import gethostname
import warnings
print('Installing in develop mode')
run('python setup.py develop', hide='out')
print('Changing to notebooks folder')
here = os.path.dirname(__file__)
os.chdir(os.path.join(here, 'notebooks'))
old_argv = sys.argv[:]
# Taken from here:
# http://stackoverflow.com/questions/
# 26338688/start-ipython-notebook-with-python-file
try:
warnings.filterwarnings("ignore", module = "zmq.*")
sys.argv = ['ipython', 'notebook']
sys.argv.append("--IPKernelApp.pylab='inline'")
sys.argv.append("--NotebookApp.ip=" + gethostname())
sys.argv.append("--NotebookApp.open_browser=True")
print('Invoking "' + ' '.join(sys.argv) + '"')
launch_new_instance()
finally:
# Not sure this is strictly necessary...
sys.argv = old_argv
os.chdir(here)
print('Removing development package...')
run('python setup.py develop -u', hide='out')
|
[
"brett.calcott@gmail.com"
] |
brett.calcott@gmail.com
|
2a2f1f71c97c0e8e03c4f0bcc38faa88410be7f6
|
7e2aefac7b540f4d4bad0fa6dd94dbcdad34d6a3
|
/modular/app.py
|
f7b681f53f3b0400a266359154603d66d9fc1cbf
|
[] |
no_license
|
imajaydwivedi/Python-BootCamp
|
ade3a3557d51b7b25a7b2ba3b79952a622896b29
|
2f5e1629a160a33017c9ab548b9d7c88ad57c917
|
refs/heads/master
| 2023-06-29T04:48:20.744027
| 2023-06-13T15:11:36
| 2023-06-13T15:11:36
| 250,841,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
import services.directory as directory
if __name__ == "__main__":
directory.add({"name":"Krishna", "phone": 1234})
directory.add({"name":"Mohan", "phone": 2345})
directory.add({"name":"Koyya", "phone": 3456})
print(directory.list())
print(directory.count())
print(directory.find_by(1))
print(directory.search_by("Koyya"))
directory.remove_by(1)
print(directory.list())
|
[
"dwivedaj@arcesium.com"
] |
dwivedaj@arcesium.com
|
7a4a7b2829526271df0ee298213f785025e1cafc
|
a473fdce56e422137f0f14514081bf7c10e9aa90
|
/source/data_processing.py
|
f58589cc42ed19b0dfa0cd0c580dc9171fd81ec2
|
[] |
no_license
|
mrandic/Bike-Rental-Case
|
bc264d02d115db178ff35d67b0c98d4644dbf954
|
79d29a5a5a1a914e5936e688d2b1ed850373b301
|
refs/heads/main
| 2023-08-15T02:10:52.483478
| 2021-09-21T23:34:44
| 2021-09-21T23:34:44
| 409,000,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,767
|
py
|
import pandas as pd
import numpy as np
from dateutil.parser import parse
def processHubwayTripsData(hubway_trips_df):
"""
Create initial features from hubway trips data
:param hubway_trips_df: Hubway trips data
:return: Feature engineered dataframe
"""
hubway_trips_df['start_date'] = hubway_trips_df['start_date'].apply(lambda x: parse(x))
hubway_trips_df['year_start'] = hubway_trips_df['start_date'].apply(lambda x: x.year)
hubway_trips_df['month_start'] = hubway_trips_df['start_date'].apply(lambda x: x.month)
hubway_trips_df['weekday_start'] = hubway_trips_df['start_date'].apply(lambda x: x.dayofweek)
hubway_trips_df['day_start'] = hubway_trips_df['start_date'].apply(lambda x: x.day)
hubway_trips_df['hour_start'] = hubway_trips_df['start_date'].apply(lambda x: x.hour)
hubway_trips_df = hubway_trips_df.rename(columns={'status': 'trip_status'})
return hubway_trips_df
def mapFrequentPostalCodeToGPSData():
"""
Manually map approximate postal code GPS locations obtained from OpenStreetMap service
:return: Feature engineered dataframe
"""
dict = {'zip_code': ["'02118", "'02139", "'02215", "'02116", "'02115", "'02138", "'02114", "'02143", "'02113", "'02134" ],
'zip_code_lat': [42.3407, 42.3643, 42.3476, 42.3514, 42.3480, 42.34733, 42.36033, 42.38371, 42.36285, 42.35595 ],
'zip_code_lng': [-71.0708, -71.1022, -71.1009, -71.0776, -71.0885, -71.16867, -71.06732, -71.10213, -71.05518, -71.13411 ]
}
return pd.DataFrame(data=dict)
def createMasterDataSet(hubway_trips_df, hubway_stations_df, weather_df, zip_code_gps_df):
"""
Create master dataset from all available datasets
:param hubway_trips_df: Hubway trips data
:param hubway_stations_df: Hubway stations data
:param weather_df: Weather data for Boston (additional added data source)
:param zip_code_gps_df: ZIp code GPS locations (additional added data source)
:return: Master dataset
"""
hubway_trips_df = processHubwayTripsData(hubway_trips_df)
master_df = pd.merge(hubway_trips_df, hubway_stations_df, how='left', left_on='strt_statn', right_on='id')
master_df = master_df.rename(columns={'id': 'id_start', 'terminal': 'terminal_start', 'station': 'station_start',
'municipal': 'municipal_start', 'lat': 'lat_start', 'lng': 'lng_start',
'status': 'status_start'})
master_df = pd.merge(master_df, hubway_stations_df, how='left', left_on='end_statn', right_on='id')
master_df = master_df.rename(
columns={'id': 'id_end', 'terminal': 'terminal_end', 'station': 'station_end', 'municipal': 'municipal_end',
'lat': 'lat_end', 'lng': 'lng_end', 'status': 'status_end'})
master_df = pd.merge(master_df, weather_df, how='left', left_on=['year_start', 'month_start', 'day_start'],
right_on=['Year', 'Month', 'Day'])
master_df = pd.merge(master_df, zip_code_gps_df, how='left', left_on=['zip_code'], right_on=['zip_code'])
return master_df
def importData():
"""
Imports all datasets into working memory using pandas
:return: Pandas dataframes for further analysis
"""
hubway_stations_df = pd.read_csv('hubway_stations.csv', sep=',').sort_values(['station'], ascending=True)
hubway_trips_df = pd.read_csv('hubway_trips.csv', sep=',')
weather_df = pd.read_csv('boston_weather.csv', sep=',')
zip_code_gps_df = mapFrequentPostalCodeToGPSData()
return hubway_trips_df, hubway_stations_df, weather_df, zip_code_gps_df
def createFeatures(master_df):
"""
Create initial set of features to be used in the project
:param master_df: Master dataframe
:return: Master dataframe
"""
# flag whether user has started and finished bike ride on the same station
master_df['same_st_flg'] = np.where(master_df['strt_statn'] == master_df['end_statn'], 1, 0)
# age feature
master_df['age'] = master_df[(master_df['subsc_type'] == 'Registered')]['year_start'] - \
master_df[(master_df['subsc_type'] == 'Registered')]['birth_date']
# Binned Visibility feature
bins = [0, 2, 4, 6, 8, np.inf]
names = ['0-2', '2-4', '4-6', '6-8', '8+']
master_df['Avg Visibility Range (mi)'] = pd.cut(master_df['Avg Visibility (mi)'], bins, labels=names)
# Binned Temperature feature
bins = [20, 40, 60, 80, np.inf]
names = ['20-40', '40-60', '60-80', '80+']
master_df['Avg Temp Range (F)'] = pd.cut(master_df['Avg Temp (F)'], bins, labels=names)
# Binned Humidity feature
bins = [20, 40, 60, 80, np.inf]
names = ['20-40', '40-60', '60-80', '80+']
master_df['Avg Humidity Range (%)'] = pd.cut(master_df['Avg Humidity (%)'], bins, labels=names)
# Binned Wind Range feature
bins = [0, 5, 10, 15, np.inf]
names = ['0-5', '5-10', '10-15', '15+']
master_df['Avg Wind Range (mph)'] = pd.cut(master_df['Avg Wind (mph)'], bins, labels=names)
# Binned Dew Point feature
bins = [0, 20, 40, 60, np.inf]
names = ['0-20', '20-40', '40-60', '60+']
master_df['Avg Dew Point Range (F)'] = pd.cut(master_df['Avg Dew Point (F)'], bins, labels=names)
# Binned Age feature
bins = [0, 20, 40, 60, np.inf]
names = ['0-20', '20-40', '40-60', '60+']
master_df['Age Range'] = pd.cut(master_df[(master_df['subsc_type'] == 'Registered')]['age'], bins, labels=names)
bike_agg = master_df[['bike_nr', 'seq_id', 'duration']].groupby(by=['bike_nr']).agg(
bike_use_cnt=('seq_id', 'count'), bike_ride_duration_avg=('duration', 'mean')).sort_values(["bike_use_cnt"],
ascending=(
False)).reset_index()
master_df = pd.merge(master_df, bike_agg, how='left', left_on=['bike_nr'], right_on=['bike_nr'])
# Binned bike use frequency range
bins = [0, 500, 1000, 1500, np.inf]
names = ['0-500', '500-1000', '1000-1500', '1500+']
master_df['Bike Use Range'] = pd.cut(master_df['bike_use_cnt'], bins, labels=names)
# Binned bike time usage range
bins = [500, 1000, 1500, np.inf]
names = ['500-1000', '1000-1500', '1500+']
master_df['Bike Avg Time Use Range'] = pd.cut(master_df['bike_ride_duration_avg'], bins, labels=names)
# Clear dataset from outliers (durations above 3000s)
master_df = master_df[(master_df["duration"] > 0) & (master_df["duration"] <= 3000)]
return master_df
def renameColumns(feature_set):
"""
Rename columns to standardized style
:param feature_set: Feature dataframe
:return: Feature dataframe with renamed columns
"""
feature_set = feature_set.rename(
columns={'lat_start': 'latitude',
'lng_start': 'longitude',
'year_start': 'year',
'month_start': 'month',
'weekday_start': 'weekday',
'day_start': 'day',
'hour_start': 'hour',
'municipal_start': 'staton_municipality',
'status_start': 'station_status',
'Bike Use Range': 'bike_freq_use_range',
'Bike Avg Time Use Range': 'bike_avg_dur_range',
'Avg Temp (F)': 'avg_tmp_f',
'Avg Dew Point (F)': 'avg_dew_point_f',
'Avg Humidity (%)': 'avg_humidity_pct',
'Avg Sea Level Press (in)': 'avg_sea_level_press_in',
'Avg Visibility (mi)': 'avg_visibility_mi',
'Avg Wind (mph)': 'avg_wind_mph',
'Snowfall (in)': 'sbowfall_in',
'Precip (in)': 'precip_in',
'Events': 'weather_event'
})
return feature_set
def featureSubset(master_df):
"""
Create initial feature subset
The rest of the variables are excluded after being proven to provide
weak influence on variable importance while building the model.
:param master_df: Master dataframe
:return: Master dataframe with filtered columns
"""
feature_set = master_df[[
'municipal_start',
'lat_start',
'lng_start',
'status_start',
'trip_status',
'year_start',
'month_start',
'weekday_start',
'day_start',
'hour_start',
'subsc_type',
'zip_code',
'gender',
'age',
'Bike Use Range',
'Bike Avg Time Use Range',
'Avg Temp (F)',
'Avg Dew Point (F)',
'Avg Humidity (%)',
'Avg Sea Level Press (in)',
'Avg Visibility (mi)',
'Avg Wind (mph)',
'Snowfall (in)',
'Precip (in)',
'Events',
'duration'
]]
return feature_set
def setFeatureCategoryType(feature_set):
"""
Cast feature data type to a category type
This is needed for proper One Hot Encoding process
:param feature_set: Feature dataframe
:return: Feature dataframe with column types set as categorized
"""
feature_set["bike_freq_use_range"] = feature_set["bike_freq_use_range"].astype('category')
feature_set["bike_avg_dur_range"] = feature_set["bike_avg_dur_range"].astype('category')
feature_set["staton_municipality"] = feature_set["staton_municipality"].astype('category')
feature_set["station_status"] = feature_set["station_status"].astype('category')
feature_set["trip_status"] = feature_set["trip_status"].astype('category')
feature_set["subsc_type"] = feature_set["subsc_type"].astype('category')
feature_set["zip_code"] = feature_set["zip_code"].astype('category')
feature_set["gender"] = feature_set["gender"].astype('category')
feature_set["weather_event"] = feature_set["weather_event"].astype('category')
return feature_set
|
[
"milos.randic@telenor.no"
] |
milos.randic@telenor.no
|
fe69d824ce277807f6d3e0d5eaaff8a66490ae4b
|
b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4
|
/otp/src/level/ModelEntity.py
|
5850215d12244dd9e104ca4eebaf6cf5fd012828
|
[] |
no_license
|
satire6/Anesidora
|
da3a44e2a49b85252b87b612b435fb4970469583
|
0e7bfc1fe29fd595df0b982e40f94c30befb1ec7
|
refs/heads/master
| 2022-12-16T20:05:13.167119
| 2020-09-11T16:58:04
| 2020-09-11T17:02:06
| 294,751,966
| 89
| 32
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,052
|
py
|
from toontown.toonbase.ToontownGlobals import *
from direct.directnotify import DirectNotifyGlobal
import BasicEntities
class ModelEntity(BasicEntities.NodePathEntity):
LoadFuncs = {
'loadModelCopy': loader.loadModelCopy,
'loadModel': loader.loadModel,
'loadModelOnce': loader.loadModelOnce,
}
def __init__(self, level, entId):
# TODO: fill in default values automatically for missing attribs
self.collisionsOnly = False
self.loadType = 'loadModelCopy'
self.flattenType = 'light'
self.goonHatType = 'none'
self.entInitialized = False
BasicEntities.NodePathEntity.__init__(self, level, entId)
self.entInitialized = True
self.model = None
self.loadModel()
def destroy(self):
if self.model:
self.model.removeNode()
del self.model
BasicEntities.NodePathEntity.destroy(self)
def loadModel(self):
if self.model:
self.model.removeNode()
self.model = None
if self.modelPath is None:
return
self.model = ModelEntity.LoadFuncs[self.loadType](self.modelPath)
if self.model:
self.model.reparentTo(self)
# hide/show as appropriate
if self.collisionsOnly:
if __dev__:
self.model.setTransparency(1)
self.model.setColorScale(1,1,1,.1)
else:
self.model.hide()
else:
self.model.show()
# HACK SDN: special code for moving crate wall collisions down
if self.modelPath in ("phase_9/models/cogHQ/woodCrateB.bam",
"phase_9/models/cogHQ/metal_crateB.bam",
"phase_10/models/cashbotHQ/CBMetalCrate.bam",
"phase_10/models/cogHQ/CBMetalCrate2.bam",
"phase_10/models/cashbotHQ/CBWoodCrate.bam",
"phase_11/models/lawbotHQ/LB_metal_crate.bam",
"phase_11/models/lawbotHQ/LB_metal_crate2.bam",
):
# get rid of any scales
#self.model.flattenLight()
# move walls down
cNode = self.find("**/wall")
cNode.setZ(cNode, -.75)
# duplicate the floor and move it down to crate a
# catch effect for low-hopped toons
colNode = self.find("**/collision")
floor = colNode.find("**/floor")
floor2 = floor.copyTo(colNode)
floor2.setZ(floor2, -.75)
"""
# incorporate the entity's overall scale
self.model.setScale(self.getScale())
self.setScale(1)
self.model.flattenLight()
"""
if self.goonHatType is not 'none':
self.goonType = {'hardhat':'pg','security':'sg'}[self.goonHatType]
self.hat = self.model
### this was copied from Goon.createHead
if self.goonType == "pg":
self.hat.find("**/security_hat").hide()
elif self.goonType == "sg":
self.hat.find("**/hard_hat").hide()
###
del self.hat
del self.goonType
if self.flattenType == 'light':
self.model.flattenLight()
elif self.flattenType == 'medium':
self.model.flattenMedium()
elif self.flattenType == 'strong':
self.model.flattenStrong()
def setModelPath(self, path):
self.modelPath = path
self.loadModel()
def setCollisionsOnly(self, collisionsOnly):
self.collisionsOnly = collisionsOnly
self.loadModel()
def setGoonHatType(self, goonHatType):
self.goonHatType = goonHatType
self.loadModel()
|
[
"66761962+satire6@users.noreply.github.com"
] |
66761962+satire6@users.noreply.github.com
|
f7cfc720c7204254c708dca38c4f7baee6ae12b1
|
dd126d6b82eb47d90950a355d4948047ae119f9c
|
/fixture/db.py
|
92f71b61a9231f7c5f9209c0af7120114c85c768
|
[] |
no_license
|
Korinsky/Python4QA_B24
|
9382c178a7e564272e2628426946ae087ec4ccdc
|
0c5d5f812a6cb858a3bf59e45745a7fce206fd7e
|
refs/heads/main
| 2023-07-14T05:36:57.307809
| 2021-08-18T13:05:14
| 2021-08-18T13:05:14
| 377,419,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
import pymysql
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self, host, name, user, password):
self.host = host
self.name = name
self.user = user
self.password = password
self.connection = pymysql.connect(host=host, database=name, user=user, password=password, autocommit=True)
def get_groups_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contacts_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname, address, email, email2, email3, home, mobile, work, phone2 from addressbook where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname, address, email, email2, email3, homephone, mobilephone, workphone, secondaryphone) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname, address=address, email=email, email2=email2, email3=email3,
homephone=homephone, mobilephone=mobilephone, workphone=workphone, secondaryphone=secondaryphone))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
def get_contact_in_group(self):
dict = {}
cursor = self.connection.cursor()
try:
cursor.execute("select id, group_id from address_in_groups where deprecated='0000-00-00 00:00:00'")
for row in cursor:
(id, group_id) = row
if id in dict.keys():
value = dict.get(id)
value.append(group_id)
else:
value = []
value.append(group_id)
dict[id] = value
finally:
cursor.close()
return dict
|
[
"72462941+Korinsky@users.noreply.github.com"
] |
72462941+Korinsky@users.noreply.github.com
|
b14adaf5a89b66b23c4ea53b5a93cd242caca777
|
0f16edb46a48f9b5a125abb56fc0545ede1d65aa
|
/test_utilities/src/d1_test/mock_api/tests/test_get.py
|
d1eaef95d18355fd89576cc41c693343b6516ba0
|
[
"Apache-2.0"
] |
permissive
|
DataONEorg/d1_python
|
5e685f1af0c356190f2d6df45d1ac849e2f56972
|
d72a9461894d9be7d71178fb7310101b8ef9066a
|
refs/heads/master
| 2023-08-29T03:16:38.131760
| 2023-06-27T21:59:37
| 2023-06-27T21:59:37
| 60,103,877
| 15
| 12
|
Apache-2.0
| 2023-09-06T18:27:53
| 2016-05-31T16:01:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,721
|
py
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import responses
import d1_test.d1_test_case
import d1_test.mock_api.get
class TestMockGet(d1_test.d1_test_case.D1TestCase):
@responses.activate
def test_1000(self, mn_client_v1_v2):
"""mock_api.get() returns a Requests Response object."""
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
assert isinstance(mn_client_v1_v2.get("test_pid_1"), requests.Response)
@responses.activate
def test_1010(self, mn_client_v1_v2):
"""mock_api.get() returns the same content each time for a given PID."""
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
obj_1a_str = mn_client_v1_v2.get("test_pid_1").content
obj_2a_str = mn_client_v1_v2.get("test_pid_2").content
obj_1b_str = mn_client_v1_v2.get("test_pid_1").content
obj_2b_str = mn_client_v1_v2.get("test_pid_2").content
assert obj_1a_str == obj_1b_str
assert obj_2a_str == obj_2b_str
@responses.activate
def test_1020(self, mn_client_v1_v2):
"""mock_api.get(): Redirects."""
d1_test.mock_api.get.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
direct_sciobj_bytes = mn_client_v1_v2.get("test_pid_1").content
redirect_sciobj_bytes = mn_client_v1_v2.get(
"<REDIRECT:303:3>test_pid_1"
).content
assert direct_sciobj_bytes == redirect_sciobj_bytes
# @responses.activate
# def test_0012(self):
# """mock_api.get() returns 1024 bytes"""
# obj_str = self.client.get('test_pid_1').content
# self.assertEqual(len(obj_str), 1024)
# @responses.activate
# def test_0013(self):
# """mock_api.get(): Passing a trigger header triggers a DataONEException"""
# self.assertRaises(
# d1_common.types.exceptions.NotAuthorized, self.client.get, 'test_pid',
# vendorSpecific={'trigger': '401'}
# )
|
[
"git@dahlsys.com"
] |
git@dahlsys.com
|
4748aa5750dba7b48af7c65f6b08a0be79ebbcb4
|
563c1d3093a047d7185c34557345eadf60d0dcd1
|
/reservoir-id/classifier_apply.py
|
b886278192ca1926597017c7d814da1eb2ac04a2
|
[
"GPL-3.0-only"
] |
permissive
|
kysolvik/reservoir-id
|
ea930cbd93199bf6f3bcda58fd5971d3402eb8bc
|
f3a25d0750d96f369a699547584d7db97b2cb43d
|
refs/heads/master
| 2021-01-19T03:30:41.006479
| 2018-01-17T14:38:42
| 2018-01-17T14:38:42
| 87,315,930
| 0
| 0
|
MIT
| 2018-01-07T16:53:26
| 2017-04-05T13:59:18
|
Python
|
UTF-8
|
Python
| false
| false
| 3,065
|
py
|
#!/usr/bin/env python
"""
Apply classifier exported by classifier_train.py
Inputs: Classifier pkl path, small area cutoff
Outputs: CSV with classified regions
Notes:
1. Make sure that all columns in the apply csv match the train_csv
2. exclude_att_patterns must match
@authors: Kylen Solvik
Date Create: 5/27/17
"""
# Load libraries
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.externals import joblib
import xgboost as xgb
import numpy as np
import sys
import argparse
# Parse arguments
parser = argparse.ArgumentParser(description='Apply Random Forest classifier to prop_csv.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('prop_csv',
help='Path to attribute table (from build_att_table.py).',
type=str)
parser.add_argument('xgb_pkl',
help='Path to pkl with xgb model.',
type=str)
parser.add_argument('class_csv_out',
help='Path for output classified csv',
type=str)
parser.add_argument('--area_lowbound',
help='Lower area bound. Must match trained model. All regions <= in size will be ignored',
default=2,
type=int)
parser.add_argument('--path_prefix',
help='To be placed at beginnings of all other path args',
type=str,default='')
args = parser.parse_args()
def main():
# Set any attributes to exclude for this run
exclude_att_patterns = []
# Load dataset
dataset = pd.read_csv(args.path_prefix + args.prop_csv,header=0)
dataset_acut = dataset.loc[dataset['area'] > args.area_lowbound]
# Exclude attributes matching user input patterns, or if they are all nans
exclude_atts = []
for pattern in exclude_att_patterns:
col_list = [col for col in dataset_acut.columns if pattern in col]
exclude_atts.extend(col_list)
for att in dataset.columns[1:]:
if sum(np.isfinite(dataset[att])) == 0:
exclude_atts.append(att)
for att in list(set(exclude_atts)):
del dataset_acut[att]
(ds_y,ds_x) = dataset_acut.shape
print(ds_y,ds_x)
# Convert dataset to array
array = dataset_acut.values
X = array[:,2:ds_x].astype(float)
Y = array[:,1].astype(int)
# Set nans to 0
X = np.nan_to_num(X)
# Export classifier trained on full data set
clf = joblib.load(args.path_prefix + args.xgb_pkl)
clf_pred = clf.predict(X)
dataset_out = dataset_acut
dataset_out["clf_pred"] = clf_pred
print(str(sum(clf_pred == 1)) + " classified as positive")
print(str(sum(clf_pred == 0)) + " classified as negative")
dataset_out.to_csv(args.path_prefix + args.class_csv_out,index=False)
if __name__ == '__main__':
main()
|
[
"kysolvik@gmail.com"
] |
kysolvik@gmail.com
|
0abd56daa2dfc8f450f36161ccbb0d4530572899
|
13d384f7eb991b7fe901468f1967f7b2952499a6
|
/day-23 turtle-crossing-start/car_manager.py
|
1f23d9f92ae79bb4124d80476d54b7f7eac0db84
|
[] |
no_license
|
miloscomplex/100_Days_of_Python
|
f31638fc5a3913dc32850b61c51d2cecac7cdbdf
|
6ac67472627867d8bf9cccb496e6395d979b8c89
|
refs/heads/main
| 2023-08-25T03:00:49.216040
| 2021-10-07T03:32:19
| 2021-10-07T03:32:19
| 395,512,768
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
import random
from turtle import Turtle
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 10
TOP_MAX = 250
BOTTOM_MAX = -250
LEFT_DISTANCE = -320
class CarManager(Turtle):
def __init__(self):
super().__init__()
self.all_cars = []
self.car_speed = STARTING_MOVE_DISTANCE
self.hideturtle()
def create_car(self):
new_car = Turtle("square")
new_car.color(random.choice(COLORS))
new_car.shapesize(stretch_wid=1, stretch_len=2)
new_car.penup()
random_y = random.randint(BOTTOM_MAX, TOP_MAX)
random_x = random.randint(300, 890)
new_car.goto(random_x, random_y)
self.all_cars.append(new_car)
def move_cars(self):
for car in self.all_cars:
car.backward(self.car_speed)
if car.xcor() < LEFT_DISTANCE:
random_y = random.randint(BOTTOM_MAX, TOP_MAX)
random_x = random.randint(300, 890)
car.goto(random_x, random_y)
def level_up(self):
self.car_speed += MOVE_INCREMENT
|
[
"hicallmesutton@gmail.com"
] |
hicallmesutton@gmail.com
|
9b2f6cdd33b203db499cf006e77db48474b4b153
|
2b240306722b3fba53caf25fc62fd599bb70f082
|
/lectures/cs532-s19/assignments/A6/toPush/Python/driver.py
|
d42194c508f4364eb0d9d53e7a3c25d83dddcea9
|
[] |
no_license
|
bayardd/anwala.github.io
|
cac62b5d13a3e57106aff60f846a2a322938ceaf
|
3d3b23f78813aff39760232f68d0b2043722a342
|
refs/heads/master
| 2020-04-20T04:09:07.304978
| 2019-04-30T17:27:35
| 2019-04-30T17:27:35
| 168,619,026
| 0
| 0
| null | 2019-02-01T00:38:51
| 2019-02-01T00:38:51
| null |
UTF-8
|
Python
| false
| false
| 5,014
|
py
|
import recommendations
allSimilar = []
file = open("data.txt", 'a')
newline = '\n'
tab = '\t'
file.write(f'First User Chosen: {tab} 368{newline}')
file.write(f'Second User Chosen: {tab} 81 {newline}')
file.write(f'Third User Chosen: {tab} 135 {newline}{newline}')
pref = recommendations.loadMovieLens()
# Get sorted list of user ratings
userRatings1 = (sorted(pref['368'].items(), key =
lambda kv:(kv[1], kv[0])))
userRatings2 = (sorted(pref['81'].items(), key =
lambda kv:(kv[1], kv[0])))
userRatings3 = (sorted(pref['135'].items(), key =
lambda kv:(kv[1], kv[0])))
# Get top 5 for each user
userRatings1.reverse()
userRatings2.reverse()
userRatings3.reverse()
# Formatted File output
file.write(f'First User Rating: {newline}')
file.write(f'ID 368 Top 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings1[x][0]
rating = userRatings1[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}ID 368 Bottom 3 Rated Movies: {newline}')
userRatings1.reverse()
for x in range(0,3):
name = userRatings1[x][0]
rating = userRatings1[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}Second User Rating: {newline}')
file.write(f'ID 81 Top 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings2[x][0]
rating = userRatings2[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
userRatings2.reverse()
file.write(f'{newline}ID 81 Bottom 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings2[x][0]
rating = userRatings2[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}Third User Rating: {newline}')
file.write(f'ID 135 Top 3 Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings3[x][0]
rating = userRatings3[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
userRatings3.reverse()
file.write(f'{newline}ID 135 Bottom 3 Rated Movies: {newline}{newline}')
for x in range(0,3):
name = userRatings3[x][0]
rating = userRatings3[x][1]
file.write(f'Name of Movie: {name} {tab} Rating: {rating} {newline}')
file.write(f'{newline}{newline}Substitute User ID: 368 {newline}{newline}')
# Find most correlated users
closest_5 = recommendations.topMatches(pref, '368')
# Find least correlated users
furthest_5 = recommendations.worstMatches(pref, '368')
# Output for least and most correlated users
file.write(f'Five other users with highest correlation: {newline}{newline}')
for x in closest_5:
correlationValue = round(x[0])
tempId = x[1]
file.write(f'User ID:{tempId} {tab}Correlation Value: {correlationValue}{newline}')
file.write(f'{newline}Five other users with lowest correlation: {newline}')
for y in furthest_5:
correlationValue = round(y[0])
tempId = y[1]
file.write(f'User ID:{tempId} {tab}Correlation Value: {correlationValue}{newline}')
recommendedMovies = recommendations.getRecommendations(pref, '368')
file.write(f'{newline}Computed Top 5 Movies to be Watched: {newline}')
for x in range(0,5):
rating = recommendedMovies[x][0]
name = recommendedMovies[x][1]
file.write(f'Name of Movie: {name}{tab} Calculated Rating: {rating}{newline}')
file.write(f'{newline}Computed Bottom 5 Movies to be Watched: {newline}')
recommendedMovies.reverse()
for y in range(0,5):
rating = recommendedMovies[y][0]
name = recommendedMovies[y][1]
file.write(f'Name of Movie: {name}{tab} Calculated Rating: {rating}{newline}')
file.write(f'{newline}{newline}Favorite Movie: {tab} Jurassic Park (1993){newline}')
file.write(f'Least Favorite Movie: {tab} Children of the Corn: The Gathering (1996){newline}{newline}')
similarMovies = recommendations.calculateSimilarItems(pref)
notSimilarMovies = recommendations.calculateLeastSimilarItems(pref)
file.write(f'Top Recommended Movies to be Watched for Jurassic Park: {newline}')
# print(similarMovies['Jurassic Park (1993)'])
for x in similarMovies['Jurassic Park (1993)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Bottom Recommended Movies to be Watched for Jurassic Park{newline}')
for x in notSimilarMovies['Jurassic Park (1993)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Top Recommended Movies to be Watched for Children of the Corn: {newline}')
for x in similarMovies['Children of the Corn: The Gathering (1996)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
file.write(f'{newline}Bottom Recommended Movies to be Watched for Children of the Corn{newline}')
for x in notSimilarMovies['Children of the Corn: The Gathering (1996)']:
name = x[1]
rating = x[0]
file.write(f'Name of Movie: {name}{tab} Calculated Correlation: {rating}{newline}')
|
[
"dbaya001@odu.edu"
] |
dbaya001@odu.edu
|
a4eb444e3bee4d492386c1d33f6ce720fe415054
|
c862c18ea1097ec54df04e09debae9e68d0c9897
|
/edit_note_dialog.py
|
38cc02deab7901e90daae048cc7d898d15833112
|
[] |
no_license
|
YoungTeurus/Organiser_Qt
|
605e8428e15f155c77edeb036d23133e22104365
|
499fcb9259f496adbecfc21730bdc9de33dc04dd
|
refs/heads/master
| 2021-02-05T16:30:57.451874
| 2020-03-01T17:43:14
| 2020-03-01T17:43:14
| 243,803,353
| 0
| 0
| null | 2020-03-01T17:43:16
| 2020-02-28T16:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,775
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Work\Organiser_Qt\edit_note_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(400, 278)
self.title_line = QtWidgets.QLineEdit(Dialog)
self.title_line.setGeometry(QtCore.QRect(120, 10, 261, 20))
self.title_line.setObjectName("title_line")
self.note_text = QtWidgets.QTextEdit(Dialog)
self.note_text.setGeometry(QtCore.QRect(10, 40, 371, 201))
self.note_text.setObjectName("note_text")
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 10, 91, 16))
self.label.setObjectName("label")
self.horizontalLayoutWidget = QtWidgets.QWidget(Dialog)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 243, 371, 31))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.save_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.save_button.setEnabled(False)
self.save_button.setCheckable(False)
self.save_button.setAutoRepeatDelay(298)
self.save_button.setObjectName("save_button")
self.horizontalLayout.addWidget(self.save_button)
self.delete_button = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.delete_button.setObjectName("delete_button")
self.horizontalLayout.addWidget(self.delete_button)
self.note_text.raise_()
self.title_line.raise_()
self.label.raise_()
self.horizontalLayoutWidget.raise_()
self.retranslateUi(Dialog)
self.save_button.clicked.connect(Dialog.save)
self.delete_button.clicked.connect(Dialog.delete)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "Название заметки"))
self.save_button.setText(_translate("Dialog", "Сохранить изменения"))
self.delete_button.setText(_translate("Dialog", "Удалить заметку"))
|
[
"ilya.elfimow@yandex.ru"
] |
ilya.elfimow@yandex.ru
|
28e33303b4a8e6d06e0a3ae120f751b62b91b62b
|
e6a3835a1d1f4d7f6318dfd7047c3b527e994537
|
/src/utils/utils.py
|
b353b1889ce8b210b94356a55dc40562aad8e40d
|
[] |
no_license
|
MMichels/DeepCars
|
9f8faec7b547c585888469202859d317e5d28526
|
327a604faa80d476cafb438b82af6537443670e0
|
refs/heads/master
| 2023-04-13T03:58:01.503567
| 2019-12-17T20:50:44
| 2019-12-17T20:50:44
| 228,690,108
| 0
| 0
| null | 2023-03-25T00:21:00
| 2019-12-17T19:48:14
|
Python
|
UTF-8
|
Python
| false
| false
| 471
|
py
|
import os
from pygame import image, error
from pygame.locals import RLEACCEL
def load_image(path, colorkey=None):
try:
img = image.load(path)
except error as message:
print('Não foi possivel abrir a imagem: ', path)
raise SystemExit(message)
img = img.convert_alpha()
if colorkey:
if colorkey == -1:
colorkey = img.get_at((0, 0))
img.set_colorkey(colorkey, RLEACCEL)
return img, img.get_rect()
|
[
"michels09@hotmail.com"
] |
michels09@hotmail.com
|
c43dee062a7499d04b64507171d861b11b09912e
|
df3c8c521a51f2b412118bd9d0e477da06a3b7cc
|
/build/view_environments/post_create_/create_post/create_post.py
|
2a6a13f8a1551a30e01dd4e643e8f14b345f9bfd
|
[] |
no_license
|
bharatmudragada/fb_post
|
c30b900731db5844df6b438e5d38a0dfb607412a
|
c5e7bb185a561bdcfcd7b2e30264554b07106044
|
refs/heads/master
| 2020-06-21T04:05:22.296755
| 2019-07-17T07:48:22
| 2019-07-17T07:48:22
| 197,339,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
from django_swagger_utils.drf_server.decorators.request_response import request_response
from django_swagger_utils.drf_server.default.parser_mapping import PARSER_MAPPING
from django_swagger_utils.drf_server.default.renderer_mapping import RENDERER_MAPPING
from fb_post.build.serializers.definitions.PostContent.PostContentSerializer import PostContentSerializer
from fb_post.build.serializers.definitions.PostId.PostIdSerializer import PostIdSerializer
options = {
'METHOD': 'POST',
'REQUEST_WRAPPING_REQUIRED': True,
'REQUEST_ENCRYPTION_REQUIRED': False,
'REQUEST_IS_PARTIAL': False,
'PARSER_CLASSES': [
PARSER_MAPPING["application/json"]
],
'RENDERER_CLASSES': [
RENDERER_MAPPING["application/json"]
],
'REQUEST_QUERY_PARAMS_SERIALIZER': None,
'REQUEST_HEADERS_SERIALIZER': None,
'REQUEST_SERIALIZER': PostContentSerializer,
'REQUEST_SERIALIZER_MANY_ITEMS': False,
'RESPONSE': {
'201' : {
'RESPONSE_SERIALIZER': PostIdSerializer,
'RESPONSE_SERIALIZER_MANY_ITEMS': False,
'HEADERS_SERIALIZER': None,
}
,
'400' : {
'RESPONSE_SERIALIZER': None,
'RESPONSE_SERIALIZER_MANY_ITEMS': False,
'HEADERS_SERIALIZER': None,
}
},
"SECURITY":{
"oauth" : [
"write"
]
}
}
app_name = "fb_post"
operation_id = "create_post"
group_name = ""
@request_response(options=options, app_name=app_name, operation_id=operation_id, group_name=group_name)
def create_post(request, *args, **kwargs):
args = (request,) + args
from django_swagger_utils.drf_server.wrappers.view_env_wrapper import view_env_wrapper
return view_env_wrapper(app_name, "create_post", group_name, *args, **kwargs)
|
[
"bharathmudragada123@gmail.com"
] |
bharathmudragada123@gmail.com
|
ddf50e75e79b2fdf8f47933f714c83b2eaa89e66
|
09d3b183035824f990946cdd8faa11e8bd729e6f
|
/geo-data/osmgeojson.py
|
cc3bfcb2ff891030f189c4724e3ddec70e74dbe7
|
[] |
no_license
|
srravya/data-greed
|
78d20066acef11c2a56f03fca18975227102832d
|
566d2c5ad521fd9ffd01df4fd77476bd3cc18c79
|
refs/heads/master
| 2021-01-11T09:27:46.965503
| 2016-06-22T17:11:28
| 2016-06-22T17:11:28
| 57,985,117
| 0
| 0
| null | 2016-06-08T05:19:22
| 2016-05-03T16:44:09
|
Python
|
UTF-8
|
Python
| false
| false
| 2,349
|
py
|
from geojson import Point
from geojson import Feature, FeatureCollection
from geojson import dump, load
from osmapi import OsmApi
import os
def degree_decimal(dms_list):
return dms_list[0] + (dms_list[1] / 60.0) + (dms_list[2] / 3600.0)
DATAFILE='libraries_new.geojson'
TESTFILE='libraries_test.geojson'
# Change the value to switch between test data and actual data
GEODATAFILE=DATAFILE
# COORD_SYSTEM='degree'
COORD_SYSTEM='decimal'
if COORD_SYSTEM == 'decimal':
lat = input('lat: ')
lon = input('lon: ')
elif COORD_SYSTEM == 'degree':
lat_dms = raw_input('deg,min,sec: ')
lon_dms = raw_input('deg,min,sec: ')
lat = degree_decimal([float(x.strip()) for x in lat_dms.split(',')])
lon = degree_decimal([float(y.strip()) for y in lon_dms.split(',')])
def prompt():
print("Select Option")
print("0. Exit")
print("1. Add a node")
print("2. Get node(s)")
def add_to_osm():
connection = OsmApi(passwordfile=u'', api=OSM_EP)
# GeoJSON point is (Easting, Northing) / (Long, Lat) order!
my_point = Point((lon,lat))
''' Properties: {
Name: Name of the library
Operator: Directorate of Public Libraries
Opening Hours: Open hours in OSM format
Address: Door number if available and street
'''
name = raw_input('Name: ')
timings = raw_input('Time: ')
street = raw_input('Street: ')
housenumber = raw_input('Door: ')
postcode = raw_input('PINCODE: ')
my_feature = Feature(geometry=my_point, properties={
'amenity':'library',
'name':name,
'operator':'Directorate of Public Libraries',
'opening_hours':timings,
'addr:country':'IN',
'addr:city':'Chennai',
'addr:street':street,
'addr:housenumber':housenumber,
'address:postcode':postcode,
'marker-color': '#00ff00',
'marker-symbol': 'library'
} )
if os.stat(GEODATAFILE).st_size == 0:
FILE_EMPTY = True
else:
FILE_EMPTY = False
if not FILE_EMPTY:
with open(GEODATAFILE,'r') as data:
current = load(data)
featureSet = current['features']
featureSet.append(my_feature)
print("Total libraries: %d" % len(featureSet))
libraries = FeatureCollection(featureSet)
else:
libraries = FeatureCollection([my_feature])
# Write data to file
with open(GEODATAFILE,'w+') as data:
dump(libraries, data, indent=4, sort_keys=True)
|
[
"eternaltyro@gmail.com"
] |
eternaltyro@gmail.com
|
bb35ccd3ccfc92a049807e3711182d740eb677b8
|
eab2dc435028b2548554d97b24eb7b7e3576b953
|
/iblrig/check_sync_pulses.py
|
b53097729443914a5879f7b454f1900b4316e049
|
[
"MIT"
] |
permissive
|
k1o0/iblrig
|
35edd8570215ca591b1f1e26e47439e633aa587a
|
9177b852b344a9bbc26e4a4aeb5f0182bd8a9b25
|
refs/heads/master
| 2021-05-24T12:58:47.552912
| 2020-02-25T20:19:59
| 2020-02-25T20:19:59
| 253,573,669
| 0
| 0
|
MIT
| 2020-04-06T17:48:28
| 2020-04-06T17:48:28
| null |
UTF-8
|
Python
| false
| false
| 2,875
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Monday, February 25th 2019, 2:10:38 pm
import logging
import sys
from pathlib import Path
import ibllib.io.raw_data_loaders as raw
import matplotlib.pyplot as plt
import numpy as np
from iblrig.misc import get_port_events
log = logging.getLogger("iblrig")
def sync_check(tph):
events = tph.behavior_data["Events timestamps"]
ev_bnc1 = get_port_events(events, name="BNC1")
ev_bnc2 = get_port_events(events, name="BNC2")
ev_port1 = get_port_events(events, name="Port1")
NOT_FOUND = "COULD NOT FIND DATA ON {}"
bnc1_msg = NOT_FOUND.format("BNC1") if not ev_bnc1 else "OK"
bnc2_msg = NOT_FOUND.format("BNC2") if not ev_bnc2 else "OK"
port1_msg = NOT_FOUND.format("Port1") if not ev_port1 else "OK"
warn_msg = f"""
##########################################
NOT FOUND: SYNC PULSES
##########################################
VISUAL STIMULUS SYNC: {bnc1_msg}
SOUND SYNC: {bnc2_msg}
CAMERA SYNC: {port1_msg}
##########################################"""
if not ev_bnc1 or not ev_bnc2 or not ev_port1:
log.warning(warn_msg)
if __name__ == "__main__":
if len(sys.argv) == 1:
print("I need a file name...")
session_data_file = Path(sys.argv[1])
if not session_data_file.exists():
raise FileNotFoundError(f"{session_data_file}")
if session_data_file.name.endswith(".jsonable"):
data = raw.load_data(session_data_file.parent.parent)
else:
try:
data = raw.load_data(session_data_file)
except Exception:
print("Not a file or a valid session folder")
unsynced_trial_count = 0
frame2ttl = []
sound = []
camera = []
trial_end = []
for trial_data in data:
tevents = trial_data["behavior_data"]["Events timestamps"]
ev_bnc1 = get_port_events(tevents, name="BNC1")
ev_bnc2 = get_port_events(tevents, name="BNC2")
ev_port1 = get_port_events(tevents, name="Port1")
if not ev_bnc1 or not ev_bnc2 or not ev_port1:
unsynced_trial_count += 1
frame2ttl.extend(ev_bnc1)
sound.extend(ev_bnc2)
camera.extend(ev_port1)
trial_end.append(trial_data["behavior_data"]["Trial end timestamp"])
print(f"Found {unsynced_trial_count} trials with bad sync data")
f = plt.figure() # figsize=(19.2, 10.8), dpi=100)
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
ax.plot(camera, np.ones(len(camera)) * 1, "|")
ax.plot(sound, np.ones(len(sound)) * 2, "|")
ax.plot(frame2ttl, np.ones(len(frame2ttl)) * 3, "|")
[ax.axvline(t, alpha=0.5) for t in trial_end]
ax.set_ylim([0, 4])
ax.set_yticks(range(4))
ax.set_yticklabels(["", "camera", "sound", "frame2ttl"])
plt.show()
|
[
"nbonacchi@gmail.com"
] |
nbonacchi@gmail.com
|
b676c5cba48c2e1efd64286543f5f6aadfef51fd
|
ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1
|
/res/packages/scripts/scripts/common/wotdecorators.py
|
1554469a75cbd2eab8d57565f8457da484b5051a
|
[] |
no_license
|
webiumsk/WOT-0.9.20.0
|
de3d7441c5d442f085c47a89fa58a83f1cd783f2
|
811cb4e1bca271372a1d837a268b6e0e915368bc
|
refs/heads/master
| 2021-01-20T22:11:45.505844
| 2017-08-29T20:11:38
| 2017-08-29T20:11:38
| 101,803,045
| 0
| 1
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 2,832
|
py
|
# 2017.08.29 21:52:48 Střední Evropa (letní čas)
# Embedded file name: scripts/common/wotdecorators.py
import inspect
from functools import update_wrapper
from debug_utils import LOG_WRAPPED_CURRENT_EXCEPTION, CRITICAL_ERROR
from time_tracking import LOG_TIME_WARNING
import time
import time_tracking
def noexcept(func):
def wrapper(*args, **kwArgs):
try:
return func(*args, **kwArgs)
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
return wrapper
def nofail(func):
def wrapper(*args, **kwArgs):
try:
return func(*args, **kwArgs)
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
CRITICAL_ERROR('Exception in no-fail code')
return wrapper
def exposedtoclient(func):
def wrapper(*args, **kwArgs):
try:
lastTick = time.time()
result = func(*args, **kwArgs)
timeSinceLastTick = time.time() - lastTick
if timeSinceLastTick > time_tracking.DEFAULT_TIME_LIMIT:
LOG_TIME_WARNING(timeSinceLastTick, context=(getattr(args[0], 'id', 0),
func.__name__,
args,
kwArgs))
return result
except:
LOG_WRAPPED_CURRENT_EXCEPTION(wrapper.__name__, func.__name__, func.func_code.co_filename, func.func_code.co_firstlineno + 1)
return wrapper
def singleton(cls):
return cls()
def decorate(func, dec):
argspec = inspect.getargspec(func)
name = func.__name__
signature = inspect.formatargspec(*argspec)
params = inspect.formatargspec(formatvalue=(lambda value: ''), *argspec)
source = 'def %s%s: return __dec%s\n' % (name, signature, params)
code = compile(source, '<decorator-gen>', 'single')
env = {'__dec': dec}
eval(code, env)
return update_wrapper(env[name], func)
def decorator(dec):
def wrapper(func):
return decorate(func, dec(func))
return wrapper
def condition(attributeName, logFunc = None, logStack = True):
def decorator(func):
def wrapper(*args, **kwargs):
attribute = getattr(args[0], attributeName)
if not bool(attribute):
if logFunc:
logFunc('Method condition failed', args, kwargs, stack=logStack)
return
return func(*args, **kwargs)
return decorate(func, wrapper)
return decorator
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\wotdecorators.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:52:48 Střední Evropa (letní čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
5c59103c775af199dd22c783d6c67d60fb97d5d3
|
49e0b6094a6841efd74ba57cd01913b465223333
|
/data_structures_and_algorithms_python/challenges/tree_fizz_buzz/tree_fizz_buzz.py
|
5883a22406f14bb3defa4c58189abd1927c6c06e
|
[] |
no_license
|
HamzaQahoush/data-structures-and-algorithms--Python
|
1c2fdfc8b90efc190108ed139372591741d5acc7
|
81bc4424065bc6b7ef99ab4dbba60524a75058a4
|
refs/heads/master
| 2023-07-15T04:03:05.158576
| 2021-08-05T17:34:47
| 2021-08-05T17:34:47
| 376,792,369
| 0
| 1
| null | 2021-08-05T17:29:16
| 2021-06-14T11:00:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,647
|
py
|
class Node :
def __init__(self,value):
self.value = value
self.child = []
def __str__(self):
return str(self.value)
class KAryTree :
def __init__(self):
self.root= None
"""This code done with help from Ahmad Zatar"""
def fizz_Buzz_Tree(KAryTree):
def traverse(node):
if node.child :
for i in range(len(node.child)):
traverse (node.child[i])
if node.child[i].value %5 == 0 and\
node.child[i].value % 3 == 0:
node.child[i].value= "Fizz Buzz"
elif node.child[i].value %5 == 0 : node.child[i].value= "Buzz"
elif node.child[i].value %3 == 0 : node.child[i].value= "Fizz"
else: node.child[i].value =str(node.child[i].value)
traverse(KAryTree.root)
if KAryTree.root.value %5 == 0 and\
KAryTree.root.value %3 ==0 :
KAryTree.root.value ="Fizz Buzz"
if KAryTree.root.value %5 == 0 : KAryTree.root.value ="Buzz"
if KAryTree.root.value %3 ==0 : KAryTree.root.value ="Fizz"
else : KAryTree.root.value= str(KAryTree.root.value)
return KAryTree
if __name__ == "__main__":
kAryTree = KAryTree()
kAryTree.root=Node(1) #root
kAryTree.root.child+=[Node(2)] #child 0
kAryTree.root.child+=[Node(3)] #child 1
kAryTree.root.child+=[Node(5)] #child 2
kAryTree.root.child[0].child+=[Node(5)] #child[0,0]
fizz_Buzz_Tree(kAryTree)
print(kAryTree.root.child[0].value) # 2 -> 2
print(kAryTree.root.child[1].value) # 3 -> Fizz
print(kAryTree.root.child[0].child[0].value) # 5 -> Buzz
|
[
"hamza.qah@gmail.com"
] |
hamza.qah@gmail.com
|
8b0d58ef495a25ef7a5bac1d8320f8430110b81a
|
4bdb484b1aaf38f38e512042e249c26bb8cb181c
|
/v-server/shopspider/diy/configs.py
|
3e57d1d8796addaa9191b063104920b91f3dcb92
|
[] |
no_license
|
fan1018wen/scrapy-spider
|
593ec2b6e02724e185e135ecc107400eeb7aec37
|
97d7ea1ce63d6c84ef9e01fb55e9376dbd7b8e83
|
refs/heads/master
| 2021-01-15T22:14:57.787123
| 2013-09-27T03:59:55
| 2013-09-27T03:59:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
##coding=utf-8
# Define some diy functions here
table_prefix = 'P1_WJ_TEST_LANG' #数据表前缀 #pipeline eg TEST --> TEST_SHOP TEST_PRODUCT TEST_PRODUCT_IMAGE
show_messages = True #是否打印相关调试信息 True / False
#-数据库配置---如需修改端口 请移步至 pipeline
db_type = 'oracle' #数据库类型 oracle / mysql #pipeline
db_host = '172.16.4.211' #数据库主机 #pipeline
db_user = 'spider' # 用户名
db_pass = 'spider' # 密码
db_name = 'spider' #mysql为数据库名
db_sid = 'xe' # oracle为服务名 jlproject_primary
handle_image = True #是否处理图片 True / False #pipeline 一般无需修改 处理图片源路径为 http 绝对路径
download_image = False #是否下载图片 True / False #pipeline 一般无需修改
image_dir = '/picdir/php' #图片存放根目录 linux | windows 'D:\\7788\\picdir\\php' 一般无需修改
global conf
conf = {
'table_prefix' : table_prefix,
'show_messages' : show_messages,
'db_type' : db_type,
'db_host' : db_host,
'db_user' : db_user,
'db_pass' : db_pass,
'db_name' : db_name,
'db_sid' : db_sid,
'handle_image' : handle_image,
'download_image' : download_image,
'image_dir' : image_dir
}
#if conf['show_messages'] :
|
[
"wj922@qq.com"
] |
wj922@qq.com
|
13a4f3ce6cf13557eb0b81be5c554c8af70bd323
|
6984724d0466d477635b23d073affa9b00f01f67
|
/Tasks/Ramanenka_Tasks/HT6/app_Calc.py
|
139762ac73cc6b004c125c7310934ab7e8c2ccb9
|
[] |
no_license
|
RomanPutsilouski/M-PT1-37-21
|
202414fac782e6c68f741e55f9b7697f0c974f45
|
ceef9b4e6bcff2a9033615ec761f0e2e73c9467e
|
refs/heads/main
| 2023-05-30T21:10:22.404817
| 2021-06-30T00:26:29
| 2021-06-30T00:26:29
| 348,462,785
| 1
| 0
| null | 2021-06-05T15:44:27
| 2021-03-16T19:06:57
|
Python
|
UTF-8
|
Python
| false
| false
| 257
|
py
|
from ht6_calculator_with_brackets import recurs
"""Enter the expression or continue with default expression"""
expression = '(25 -(5- (1-2))/(5-8))'
# equation = input('Expression is: \n')
results = float(recurs(expression))
print(f'Result is: {results}')
|
[
"margoroma2010@gmail.com"
] |
margoroma2010@gmail.com
|
f312f96e09ae162f71d13541059405e61729ea52
|
34d99bff51f26c03fcf05141589f51abeae2ff98
|
/HTJK/venv/Lib/site-packages/wqrfnium/wqrfnium.py
|
11297b7b76430aef3371b426153664074192804d
|
[] |
no_license
|
zmbhza/appui
|
d5b31c60122eabe4d8d484d0d15e333b46a9d46f
|
7a5b1072245c53b5a227943b41ef0b54420c7107
|
refs/heads/master
| 2022-12-21T14:00:41.509390
| 2020-09-27T03:34:15
| 2020-09-27T03:34:15
| 297,602,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,096
|
py
|
# -*- coding: utf-8 -*-
import os,sys
import re,time
import Levenshtein
import xlrd,xlwt
from xlutils.copy import copy
import os,platform
import configparser
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
#----------------------------------
# diy your elements_xls_path
def create_xls(elements_xls_path):
if not os.path.exists(elements_xls_path):
book = xlwt.Workbook(encoding='utf-8',style_compression=0)
book.add_sheet('Sheet1',cell_overwrite_ok=True)
book.save(elements_xls_path)
def get_elements(icon):
try:
Data = xlrd.open_workbook(elements_xls_path)
except Exception:
print('Please put the element into the elements.xls first!')
print('First column:icon,Second column:tmp_find_method,Third column:tmp_find_value,Fourth column:index,Fifth column:html_element')
print('For example:seachinput,id,kw,0,<input type="text" class="s_ipt" name="wd" id="kw" maxlength="100" autocomplete="off">')
exit(0)
table = Data.sheet_by_name("Sheet1")
nrows = table.nrows
for i in range(nrows):
element_tmp = table.cell(i,0).value
if element_tmp == icon:
try:
html_element = table.cell(i,4).value
except:
html_element = ''
return [table.cell(i,1).value,table.cell(i,2).value,int(table.cell(i,3).value),html_element,i]
print('not fonund the element: [ %s ],please fixed it by yourself...'%icon)
exit(0)
def update_elements(id,html,tmp,tmp_value,index):
Data = xlrd.open_workbook(elements_xls_path)
ww = copy(Data)
ww.get_sheet(0).write(id, 1,tmp)
ww.get_sheet(0).write(id, 2,tmp_value)
ww.get_sheet(0).write(id, 3,index)
ww.get_sheet(0).write(id, 4,html)
os.remove(elements_xls_path)
ww.save(elements_xls_path)
def input_html_element(id,html):
Data = xlrd.open_workbook(elements_xls_path)
ww = copy(Data)
ww.get_sheet(0).write(id, 4, html)
os.remove(elements_xls_path)
ww.save(elements_xls_path)
def likescore(oldstr,newstr):
score = Levenshtein.ratio(str(oldstr), str(newstr))
return score
def search_new(driver,old_html):
try:old_id = re.findall(r'id="(.*?)"',old_html)[0]
except:old_id = None
try:old_name = re.findall(r'name="(.*?)"',old_html)[0]
except:old_name=None
try:old_class = re.findall(r'class="(.*?)"',old_html)[0]
except:old_class=None
try:old_text = re.findall(r'>(.*?)<',old_html)[0]
except:old_text=''
try:old_value = re.findall(r'value="(.*?)"',old_html)[0]
except:old_value=''
try:old_onclick = re.findall(r'onclick="(.*?)"',old_html)[0]
except:old_onclick=None
try:old_style = re.findall(r'style="(.*?)"',old_html)[0]
except:old_style=''
try:old_placeholder = re.findall(r'placeholder="(.*?)"', old_html)[0]
except:old_placeholder=None
try:old_href = re.findall(r'href="(.*?)"',old_html)[0]
except:old_href=None
try:old_type = re.findall(r'type="(.*?)"',old_html)[0]
except:old_type = None
#--------------------------------------------------------get all par
try:
bq = re.findall(r'<(.+?) ',old_html)[0]
except:
bq = re.findall(r'<(.+?)>',old_html)[0]
new_elements = driver.find_elements_by_tag_name(bq)
end_element = new_elements[0]
end_index = 0
tmp_score = 0
for i in range(len(new_elements)):
score = 0
new_id = new_elements[i].get_attribute("id")
new_name = new_elements[i].get_attribute("name")
new_class = new_elements[i].get_attribute("class")
new_text = new_elements[i].text
new_value = new_elements[i].get_attribute("value")
new_onclick = new_elements[i].get_attribute("onclick")
new_style = new_elements[i].get_attribute("style")
new_placeholder = new_elements[i].get_attribute("placeholder")
new_href = new_elements[i].get_attribute("href")
try:new_type = re.findall(r'type="(.*?)"',new_elements[i].get_attribute("outerHTML"))[0]
except:new_type = None
score += likescore(old_id, new_id)
score += likescore(old_name, new_name)
score += likescore(old_class, new_class)
score += likescore(old_text, new_text)
score += likescore(old_value, new_value)
score += likescore(old_onclick, new_onclick)
score += likescore(str(old_style).replace(' ',''), str(new_style).replace(' ',''))
score += likescore(old_placeholder, new_placeholder)
score += likescore(old_href, new_href)
score += likescore(old_type,new_type)
if score > tmp_score:
end_element = new_elements[i]
end_index = i
tmp_score = score
new_html = end_element.get_attribute("outerHTML")
new_tmp = 'tag name' #use id,name
new_tmp_value = bq
new_index = end_index
return [end_element,new_html,new_tmp,new_tmp_value,new_index]
def getelement(driver,icon):
time1 = time.time()
element = get_elements(icon)
if element == 'error':
raise Exception
print('find: %s ...'%icon)
old_html = element[3]
try:
if element[0] == 'link_text': element[0] = 'link text'
if element[0] == 'class' or element[0] == 'class_name': element[0] = 'class name'
el = driver.find_elements(element[0],element[1])[element[2]]
print('success in %s s'%str(time.time()-time1)[:5])
if old_html == '':
html_element = el.get_attribute("outerHTML")
input_html_element(element[-1],html_element)
return el
except Exception:
print('find_faild,begin fix....')
if element[-2] == '':
print('we find this element:%s are you first set,but set wrong.Please set right in first time.'%icon)
exit(0)
newel_detail = search_new(driver,old_html)
newel = newel_detail[0]
new_html = newel_detail[1]
new_tmp = newel_detail[2]
new_tmp_value = newel_detail[3]
new_index = newel_detail[4]
update_elements(element[4],html=new_html,tmp=new_tmp,tmp_value=new_tmp_value,index=new_index)
print('find success in %s s'%str(time.time()-time1)[:5])
return newel
try:
cfp = configparser.ConfigParser()
cfp.read('wqrfnium.ini')
elements_xls_path = cfp.get('Excel','elements_xls_path')
except: # create wqrfnium.ini
cfp = configparser.ConfigParser()
cfp["Excel"] = {"elements_xls_path":""}
with open('wqrfnium.ini','w') as fp:
cfp.write(fp)
elements_xls_path = cfp.get('Excel','elements_xls_path')
def begin_wqrf(path):
global elements_xls_path
if 'xls' not in path.split('.')[-1]:
if path[-1] == '/':
path += 'elements.xls'
else:
path += '/elements.xls'
if elements_xls_path != path:
print("----------------------------------")
print("You are changeing the elements_xls_path,the new path is %s now!"%path)
print("你正在自定义元素表elements.xls的存放路径,新路径为:%s"%path)
print("You'd better handle the old elements_xls : %s by yourself."%elements_xls_path)
print("你最好处理掉旧的元素表:%s"%elements_xls_path)
create_xls(path)
cfp.set("Excel","elements_xls_path",path)
with open("wqrfnium.ini","w+") as f:
cfp.write(f)
elements_xls_path = path
if elements_xls_path == '': #no path
# begin to set the elements
if 'arwin' in platform.system() or 'inux' in platform.system() :
elements_xls_path =os.environ['HOME']+"/elements.xls"
else:
elements_xls_path = "C:\\elements.xls"
print('You are first use wqrfnium,it is creating elements.xls,you must edit elements.xls and play wqrfnium after!')
print('这是您第一次使用wqrfnium,它正在自动创建元素表elements.xls,您必须在这次启动后再去使用wqrfnium和添加元素到elements.xls等操作!')
print('Your elements.xls tmp path is %s' % elements_xls_path)
print('你的元素表elements.xls的临时路径是 %s'%elements_xls_path)
print("First colum is element's icon,second is element's tmp_find_method,third is element's tmp_find_value,forth is element's index,the last is element's html_element")
print("元素表:第一列为元素的标识,第二列为元素的临时定位方式,第三列为元素的临时定位值,第四列为元素的下标,最后一列元素的html标签源码")
print("You can also read the README to get help or wirte email to 1074321997@qq.com")
print("你也可以去阅读README.md来获取更多帮助,或者发送邮件到1074321997@qq.com联系作者")
print('You can use code [begin_wqrf("your diy new elements_xls_path ")] to diy your elements_xls_path!')
print('你可以在文件开头添加代码[begin_wqrf("你的元素表elements.path的自定义存放路径")] 来 自定义 你的元素表存放路径!')
create_xls(elements_xls_path)
cfp.set("Excel", "elements_xls_path", elements_xls_path)
with open("wqrfnium.ini", "w+") as f:
cfp.write(f)
else:
if 'arwin' in platform.system() or 'inux' in platform.system() :
if elements_xls_path == os.environ['HOME']+"/elements.xls": # default path
print('Your elements.xls tmp path is default : %s'%elements_xls_path)
print('你的elements.xls 的临时存放路径为默认:%s'%elements_xls_path)
else:
print('Your elements.xls tmp path is diy by yourself : %s' % elements_xls_path)
print('你的elements.xls 的自定义存放路径为:%s' % elements_xls_path)
else:
if elements_xls_path == "C:\\elements.xls": # default path
print('Your elements.xls tmp path is default : %s'%elements_xls_path)
print('你的elements.xls 的临时存放路径为默认:%s' % elements_xls_path)
else:
print('Your elements.xls tmp path is diy by yourself : %s' % elements_xls_path)
print('你的elements.xls 的自定义存放路径为:%s' % elements_xls_path)
|
[
"847160625@qq.com"
] |
847160625@qq.com
|
84fdc9040b3bcc55c94270233da3cce4c9b669d5
|
babc56e88a3b5f5038be70ad676d5bd8f1bbf0d2
|
/wind_direction_byo.py
|
94bc6600dd5986d16cb2cf6d96ba20ac2a7f7738
|
[] |
no_license
|
VicenteYago/CustomWeatherStation
|
873405ca16aa0b6f4f291cbc0068a6ea10aef745
|
c655f947cca2cd0f8827c18f6f7a7c4c11ef4d43
|
refs/heads/master
| 2022-11-13T06:48:05.736830
| 2020-06-30T00:43:07
| 2020-06-30T00:43:07
| 269,812,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
from gpiozero import MCP3008
import time
import math
adc = MCP3008(channel=0)
count = 0
values = []
volts = [0.4, 1.4, 1.2, 2.8,
2.9, 2.2, 2.5, 1.8,
2.0, 0.7, 0.8, 0.1,
0.3, 0.2, 0.6, 2.7]
volts_dic = {
0.4: 0.0,
1.4: 22.5,
1.2: 45.0,
2.8: 67.5,
2.7: 90.5,
2.9: 112.5,
2.2: 135.0,
2.5: 157.5,
1.8: 180.0,
2.0: 202.5,
0.7: 225.0,
0.8: 247.5,
0.1: 270.0,
0.3: 292.5,
0.2: 315.0,
0.6: 337.5
}
def get_average(angles):
sin_sum = 0.0
cos_sum = 0.0
for angle in angles:
r = math.radians(angle)
sin_sum += math.sin(r)
cos_sum += math.cos(r)
flen = float(len(angles))
s = sin_sum / flen
c = cos_sum / flen
arc = math.degrees(math.atan(s / c))
average = 0.0
if s > 0 and c > 0:
average = arc
elif c < 0:
average = arc + 180
elif s < 0 and c > 0:
average = arc + 360
return 0.0 if average == 360 else average
def get_value(length = 5):
data = []
print("Measuring wind direction for %d seconds..." % length)
start_time = time.time()
while time.time() - start_time <= length:
wind = round(adc.value*3.3,1)
if not wind in volts_dic:
print("Unknown value :", str(wind))
else:
data.append(volts_dic[wind])
return get_average(data)
while True:
print(get_value())
|
[
"="
] |
=
|
4ee39fb041156b51bf7fa191a298758ceaab2ef0
|
bcda171a045e86f8437c9dd5f37a0a1ac2316063
|
/anonymization/newtest.py
|
1ed85056501ce83aeffe09c6b85218895595e2aa
|
[] |
no_license
|
blackfeathering/CommunityDeception-master
|
f1127a9d22869a3bbc8db40ca99c89c0e98279d5
|
c49dafd8774e029c0d57aa4f63ad192aacafa07f
|
refs/heads/master
| 2023-04-03T03:41:13.651533
| 2021-03-15T06:16:28
| 2021-03-15T06:16:28
| 255,219,882
| 0
| 0
| null | 2021-03-29T22:52:54
| 2020-04-13T03:13:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,824
|
py
|
import logging.config
import sys
import cmath
from typing import List
from settings import master
from igraph import Graph
from igraph.clustering import VertexClustering
from utils.counter_pre import count_security_index_by_pre
from utils.pre_counter import count_pre_security_index
from utils.counter import count_security_index
from utils.timer import time_mark
import time
logging.config.dictConfig(master.LOGGING_SETTINGS)
logger = logging.getLogger('normal')
class NewtestCommunityCombine(object):
def __init__(self, graph, edges_sum, detection_func, func_args, interval, partitions=None,
path=None, index0=2, index1=0, **kwargs):
self.__graph = graph
self.__edges_sum = edges_sum
self.__detection_func = detection_func
self.__func_args = func_args
self.__interval = interval
self.__partitions = partitions
self.__path = path
self.__community_index_0 = index0
self.__community_index_1 = index1
self.__edge_set = None
self.__degree_list = None
self.__vertex_list = None
self.__vertex_part = None
self.__edge_added_list = None
self.__partitions_expected = None
self.__partitions_expected_degree: List[int] = list()
self.__partitions_expected_volume: List[int] = list()
self.__sorted_partitions_expected: List[List[int]] = list()
self.__degree_distribute: List[int] = list()
self.__start_time = time.time()
self.__end_time = None
def __start(self):
logger.info("CommunityCombine")
logger.info(f'Time : {time_mark(self.__start_time)}')
logger.info(f'Graph: {self.__path}')
logger.info(f'Info : {self.__graph.vcount()} {self.__graph.ecount()}')
logger.info(f'Edges: {self.__edges_sum}')
logger.info(f'Func : {self.__detection_func.__name__}')
logger.info(f'Args : {self.__func_args}')
logger.info(f'Gap : {self.__interval}')
logger.info(f'Parts: {len(self.__partitions)}')
logger.info("Community1")
subgraph0 = self.__partitions.subgraph(self.__community_index_0)
logger.info(f'Community index: {self.__community_index_0}, '
f'Info : {subgraph0.vcount()} {subgraph0.ecount()}')
logger.info("Community2")
subgraph1 = self.__partitions.subgraph(self.__community_index_1)
logger.info(f'Community index: {self.__community_index_1}, '
f'Info : {subgraph1.vcount()} {subgraph1.ecount()}')
logger.info("=" * 60)
def __quit(self):
self.__end_time = time.time()
logger.info("=" * 60)
logger.info(f'Time : {time_mark(self.__end_time)}')
logger.info(f'Total: {(self.__end_time - self.__start_time):10.4f} s')
logger.info("=" * 60)
logger.info("\n\n")
def __preprocess(self):
self.__edge_set = set(self.__graph.get_edgelist())
if not self.__partitions:
self.__partitions = self.__detection_func(self.__graph, **self.__func_args)
self.__set_necessary_info()
def __set_necessary_info(self):
v_degree = list()
v_index = list()
v_partation = list()
memberships = self.__partitions._membership
if self.__community_index_0 > self.__community_index_1:
a = self.__community_index_1
self.__community_index_1 = self.__community_index_0
self.__community_index_0 = a
for index in range(len(memberships)):
if memberships[index] == self.__community_index_0:
v_index.append(index)
v_degree.append(self.__graph.degree(index))
v_partation.append(0)
if memberships[index] == self.__community_index_1:
v_index.append(index)
v_degree.append(self.__graph.degree(index))
v_partation.append(1)
self.__degree_list = v_degree
self.__vertex_list = v_index
self.__vertex_part = v_partation
# 最终合并的社区编号为self.__community_index_1
partation_expected = VertexClustering(graph=self.__partitions._graph, membership=list(self.__partitions._membership))
for i in range(len(partation_expected._membership)):
if partation_expected._membership[i] == self.__community_index_1:
partation_expected._membership[i] = self.__community_index_0
for i in range(len(partation_expected._membership)):
if partation_expected._membership[i] == partation_expected._len - 1:
partation_expected._membership[i] = self.__community_index_1
partation_expected._len -= 1
#print(partation_expected._membership)
self.__partitions_expected = partation_expected
|
[
"1960554271@qq.com"
] |
1960554271@qq.com
|
8608678850cf6031586f8b1bce7e8531244232c5
|
7869035b72807394154285d307e0597ee16f11d8
|
/src/data_loader.py
|
2a23407ac8c03daa931088d7b07b81b5ff04a48b
|
[] |
no_license
|
tiffany70072/TokenPositioning
|
cb74edae92e19c16f8ca763935e56b0f2e698b85
|
a2ab63640a2aff1abfccaa1c1486d8a97026ef0b
|
refs/heads/master
| 2022-07-19T11:21:04.716882
| 2020-04-17T06:02:18
| 2020-04-17T06:02:18
| 254,995,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,225
|
py
|
import numpy as np
import os
from sklearn.model_selection import train_test_split
def load_data(task, data_name, data_type):
if task == "autoenc-last" or task == 'token-posi':
assert data_type == "train" or data_type == "valid", "no this data type."
data_path = os.path.join("../data", data_name)
encoder_data = np.load(os.path.join(data_path, "encoder_%s.npy" % data_type))
decoder_data = np.load(os.path.join(data_path, "decoder_%s.npy" % data_type))
assert encoder_data.shape[0] == decoder_data.shape[0], "data size not match."
decoder_output = set_decoder_output_data(decoder_data)
return encoder_data, decoder_data, decoder_output
else:
raise "No this task for load_data."
def set_decoder_output_data(decoder_input):
# Reshape 2d array into 3d array for Keras training.
# Shift one time step because decoder_input and decoder_output are different with one time step.
decoder_output = decoder_input.copy()
for i in range(len(decoder_output)):
decoder_output[i, :-1] = decoder_input[i, 1:] # Remove the first token in decoder output.
decoder_output[i, -1] *= 0
decoder_output = np.reshape(decoder_output, [decoder_output.shape[0], decoder_output.shape[1], 1])
return decoder_output
"""
def cut_validation(self):
# TODO: cut training, validation and testing
split_result = data_reader.data_split(self.encoder_in, self.decoder_in, self.decoder_out)
self.encoder_in = split_result[0]
self.decoder_in = split_result[1]
self.decoder_out = split_result[2]
self.encoder_in_valid = split_result[3][:50000] # TODO: Deal with too many data.
self.decoder_in_valid = split_result[4][:50000]
self.decoder_out_valid = split_result[5][:50000]
self.encoder_in_test = split_result[6]
self.decoder_in_test = split_result[7]
self.decoder_out_test = split_result[8]
self.encoder_in = split_result[0]#[:3000]
self.decoder_in = split_result[1]#[:3000]
self.decoder_out = split_result[2]#[:3000]
print("(Cut validation) training size:", self.encoder_in.shape)
print("(Cut validation) validation size:", self.encoder_in_valid.shape)
print("(Cut validation) testing size:", self.encoder_in_test.shape)
"""
|
[
"tiffany70072@gmail.com"
] |
tiffany70072@gmail.com
|
bec7c5ea5c678a589efad67a06df92c0335711e2
|
dc29b57b9a025287574117a4e7c7fc27663d6063
|
/pydemo/src/wxdemo/gridbagdemo.py
|
3dc34973c575305cf8cc3a71ddc85a57d34b5233
|
[] |
no_license
|
bspeng922/pyutils
|
e4d0e988d5c168a3a9e97da2d09c6b714faa2c9a
|
4fa6c75a7159e03383c0f89d67d1ca37f3d0f0a5
|
refs/heads/master
| 2020-04-11T09:59:19.089455
| 2017-01-06T07:42:20
| 2017-01-06T07:42:20
| 7,434,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
import wx
class Example(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, "", size=(320,130))
self.InitUI()
def InitUI(self):
panel = wx.Panel(self)
text = wx.StaticText(panel, label="Rename To")
tc = wx.TextCtrl(panel)
btnok = wx.Button(panel, label="OK", size=(90,28))
btnclose = wx.Button(panel, label="Close", size=(90,28))
sizer = wx.GridBagSizer(4,4)
sizer.Add(text, pos=(0,0), flag=wx.TOP|wx.LEFT|wx.BOTTOM, border=5)
sizer.Add(tc, pos=(1,0), span=(1,5), flag=wx.EXPAND|wx.LEFT|wx.RIGHT, border=5)
sizer.Add(btnok, pos=(3,3))
sizer.Add(btnclose, pos=(3,4), flag=wx.RIGHT|wx.BOTTOM, border=5)
sizer.AddGrowableCol(1)
sizer.AddGrowableRow(2)
panel.SetSizer(sizer)
if __name__ == "__main__":
app = wx.App()
Example(None, -1).Show()
app.MainLoop()
|
[
"bspeng922@gmail.com"
] |
bspeng922@gmail.com
|
6b09cc57289aebfadf3badeff4f9bef7c017e0dc
|
04cd6250630b3aad49219acbae0b7682f4263afb
|
/sbaas/analysis/analysis_stage02_isotopomer/stage02_isotopomer_dependencies.py
|
7813c8ad014ac51fbf424a16b962f14cfd089746
|
[
"Apache-2.0"
] |
permissive
|
SBRG/sbaas
|
ec04bd3a82248600328c053bc798d7d302fbaf9d
|
9df76bbffdd620cf8566744a2b0503935998fbe0
|
refs/heads/master
| 2021-01-21T23:29:26.713889
| 2015-06-24T17:16:59
| 2015-06-24T17:16:59
| 28,518,590
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297,680
|
py
|
'''isotopomer metabolomics analysis class'''
from sbaas.analysis.analysis_base import *
from .stage02_isotopomer_query import *
from .stage02_isotopomer_io import *
# Dependencies
import operator, json, csv
from copy import copy
# Dependencies from 3rd party
import scipy.io
from numpy import histogram, mean, std, loadtxt
import matplotlib as mpl
import matplotlib.pyplot as plt
import h5py
from sbaas.resources.molmass import Formula
# Dependencies from cobra
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.io.sbml import write_cobra_model_to_sbml_file
from cobra.io.mat import save_matlab_model
from cobra.manipulation.modify import convert_to_irreversible, revert_to_reversible
from cobra.flux_analysis.objective import update_objective
from cobra.flux_analysis.variability import flux_variability_analysis
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
from cobra.flux_analysis import flux_variability_analysis, single_deletion
from cobra.core.Reaction import Reaction
from cobra.core.Metabolite import Metabolite
class stage02_isotopomer_dependencies():
def __init__(self):
self.calculate = base_calculate();
#variables:
self.isotopomer_rxns_net_irreversible = {
'ptrc_to_4abut_1':{'reactions':['PTRCTA','ABUTD'],
'stoichiometry':[1,1]},
'ptrc_to_4abut_2':{'reactions':['GGPTRCS','GGPTRCO','GGGABADr','GGGABAH'],
'stoichiometry':[1,1,1,1]},
'glu_DASH_L_to_acg5p':{'reactions':['ACGS','ACGK'],
'stoichiometry':[1,1]},
'2obut_and_pyr_to_3mop':{'reactions':['ACHBS','KARA2','DHAD2'],
'stoichiometry':[1,1,1]},
'pyr_to_23dhmb':{'reactions':['ACLS','KARA1_reverse'],
'stoichiometry':[1,1]},
#'met_DASH_L_and_ptrc_to_spmd_and_5mta':{'reactions':['METAT','ADMDC','SPMS'],
# 'stoichiometry':[1,1,1]}, #cannot be lumped
'chor_and_prpp_to_3ig3p':{'reactions':['ANS','ANPRT','PRAIi','IGPS'],
'stoichiometry':[1,1,1,1]},
'hom_DASH_L_and_cyst_DASH_L_to_pyr_hcys_DASH_L':{'reactions':['HSST','SHSL1','CYSTL'],
'stoichiometry':[1,1,1]},
'e4p_and_pep_to_3dhq':{'reactions':['DDPA','DHQS'],
'stoichiometry':[1,1]},
'aspsa_to_sl2a6o':{'reactions':['DHDPS','DHDPRy','THDPS'],
'stoichiometry':[1,1,1]},
'glu_DASH_L_to_glu5sa':{'reactions':['GLU5K','G5SD'],
'stoichiometry':[1,1]},
'g1p_to_glycogen':{'reactions':['GLGC','GLCS1'],
'stoichiometry':[1,1]},
'thr_DASH_L_to_gly':{'reactions':['THRD','GLYAT_reverse'],
'stoichiometry':[1,1]}, #need to remove deadend mets: athr-L: ATHRDHr, ATHRDHr_reverse; aact: AACTOOR, AOBUTDs
'dhap_to_lac_DASH_D':{'reactions':['MGSA','LGTHL','GLYOX'],
'stoichiometry':[1,1,1]},
'hom_DASH_L_to_thr_DASH_L':{'reactions':['HSK','THRS'],
'stoichiometry':[1,1]},
'3pg_to_ser_DASH_L':{'reactions':['PGCD','PSERT','PSP_L'],
'stoichiometry':[1,1,1]},
'prpp_to_his_DASH_L':{'reactions':['ATPPRT','PRATPP','PRAMPC','PRMICI','IG3PS','IGPDH','HSTPT','HISTP','HISTD'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'UMPSYN_aerobic':{'reactions':['ASPCT','DHORTS_reverse','DHORD2','ORPT_reverse','OMPDC'],
'stoichiometry':[1,1,1,1,1]},
#'UMPSYN_anaerobic':{'reactions':['ASPCT','DHORTS_reverse','DHORD5','ORPT_reverse','OMPDC'],
# 'stoichiometry':[1,1,1,1,1]},
'IMPSYN_1':{'reactions':['GLUPRT','PRAGSr','PRFGS','PRAIS'],
'stoichiometry':[1,1,1,1]},
'IMPSYN_2':{'reactions':['AIRC2','AIRC3_reverse','PRASCSi','ADSL2r'],
'stoichiometry':[1,1,1,1]},
'IMPSYN_3':{'reactions':['AICART','IMPC_reverse'],
'stoichiometry':[1,1]},
'imp_to_gmp':{'reactions':['IMPD','GMPS2'],
'stoichiometry':[1,1]},
'imp_to_amp':{'reactions':['ADSS','ADSL1r'],
'stoichiometry':[1,1]},
#'utp_to_dump_anaerobic':{'reactions':['RNTR4c2','DUTPDP'],
# 'stoichiometry':[1,1]},
'udp_to_dump_aerobic':{'reactions':['RNDR4','NDPK6','DUTPDP'],
'stoichiometry':[1,1,1]},
#'dtmp_to_dttp':{'reactions':['DTMPK','NDPK4'],
# 'stoichiometry':[1,1]}, #cannot be lumped
'COASYN':{'reactions':['ASP1DC','MOHMT','DPR','PANTS','PNTK','PPNCL2','PPCDC','PTPATi','DPCOAK'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'FADSYN_1':{'reactions':['GTPCII2','DHPPDA2','APRAUR','PMDPHT','RBFSb'],
'stoichiometry':[1,1,1,1,1]},
'FADSYN_2':{'reactions':['RBFSa','DB4PS'],
'stoichiometry':[1,1]},
'FADSYN_3':{'reactions':['RBFK','FMNAT'],
'stoichiometry':[1,1]},
'NADSYN_aerobic':{'reactions':['ASPO6','QULNS','NNDPR','NNATr','NADS1','NADK'],
'stoichiometry':[1,1,1,1,1,1]},
#'NADSYN_anaerobic':{'reactions':['ASPO5','QULNS','NNDPR','NNATr','NADS1','NADK'],
# 'stoichiometry':[1,1,1,1,1,1]},
#'NADSALVAGE':{'reactions':['NADPPPS','NADN','NNAM','NAMNPP','NMNN','NMNDA','NMNAT','NADDP','ADPRDP'],
# 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, #cannot be lumped
'THFSYN':{'reactions':['GTPCI','DNTPPA','DNMPPA','DHNPA2r','HPPK2','ADCS','ADCL','DHPS2','DHFS'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'GTHSYN':{'reactions':['GLUCYS','GTHS'],
'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_1':{'reactions':['DASYN181','AGPAT181','G3PAT181'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_2':{'reactions':['PSSA181','PSD181'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_3':{'reactions':['PGSA160','PGPP160'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_4':{'reactions':['DASYN161','AGPAT161','G3PAT161'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_5':{'reactions':['PGSA181','PGPP181'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_6':{'reactions':['PSD161','PSSA161'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_7':{'reactions':['PSSA160','PSD160'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_8':{'reactions':['DASYN160','AGPAT160','G3PAT160'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_9':{'reactions':['PGSA161','PGPP161'],'stoichiometry':[1,1]},
'MOLYBDOPTERIN_1':{'reactions':['MPTAT','MPTS','CPMPS'],'stoichiometry':[1,1,1]},
'MOLYBDOPTERIN_2':{'reactions':['MOCDS','MOGDS'],'stoichiometry':[1,1]},
'MOLYBDOPTERIN_3':{'reactions':['MOADSUx','MPTSS'],'stoichiometry':[1,1]},
'COFACTOR_1':{'reactions':['GLUTRR','G1SAT','GLUTRS'],'stoichiometry':[1,1,1]},
'COFACTOR_2':{'reactions':['DHNAOT4','UPPDC1','DHNCOAT','DHNCOAS','SEPHCHCS','SUCBZS','SUCBZL','PPPGO3','FCLT','CPPPGO','SHCHCS3'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]},
'COFACTOR_3':{'reactions':['TYRL','AMMQLT8','HEMEOS','UPP3MT','SHCHD2','SHCHF','ENTCS','CBLAT'],'stoichiometry':[1,1,1,1,1,1,1,1]},
'VITB6':{'reactions':['E4PD','PERD','OHPBAT','PDX5PS','PDX5PO2'],'stoichiometry':[1,1,1,1,1]},
#'THIAMIN':{'reactions':['AMPMS2','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]}, # original pathway without correction
'THIAMIN':{'reactions':['AMPMS3','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]},
'COFACTOR_4':{'reactions':['I4FE4ST','I4FE4SR','I2FE2SS2'],'stoichiometry':[1,1,1]},
'COFACTOR_5':{'reactions':['BMOGDS1','BMOGDS2','BMOCOS'],'stoichiometry':[1,1,1]},
'COFACTOR_6':{'reactions':['DMPPS','GRTT','DMATT'],'stoichiometry':[1,1,1]},
'COFACTOR_7':{'reactions':['MECDPS','DXPRIi','MEPCT','CDPMEK','MECDPDH5'],'stoichiometry':[1,1,1,1,1]},
'COFACTOR_8':{'reactions':['LIPOS','LIPOCT'],'stoichiometry':[1,1]},
'COFACTOR_9':{'reactions':['OMMBLHX','OMPHHX','OPHHX','HBZOPT','DMQMT','CHRPL','OMBZLM','OPHBDC','OHPHM'],'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'COFACTOR_10':{'reactions':['SERASr','DHBD','UPP3S','HMBS','ICHORT','DHBS'],'stoichiometry':[1,1,1,1,1,1]},
'COFACTOR_11':{'reactions':['PMEACPE','EGMEACPR','DBTS','AOXSr2','I2FE2SR','OPMEACPD','MALCOAMT','AMAOTr','OPMEACPS','OPMEACPR','OGMEACPD','OGMEACPR','OGMEACPS','EPMEACPR','BTS5'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_1':{'reactions':['UAMAGS','UAPGR','UAGPT3','PAPPT3','GLUR_reverse','UAGCVT','UAMAS','UDCPDP','UGMDDS','UAAGDS'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1]},
'CELLENV_2':{'reactions':['3HAD181','3OAR181','3OAS181','EAR181x'],'stoichiometry':[1,1,1,1]},
'CELLENV_3':{'reactions':['3HAD160','3OAR160','EAR160x','3OAS160'],'stoichiometry':[1,1,1,1]},
'CELLENV_4':{'reactions':['EAR120x','3OAR120','3HAD120','3OAS120','EAR100x'],'stoichiometry':[1,1,1,1,1]},
'CELLENV_5':{'reactions':['G1PACT','UAGDP','PGAMT_reverse','GF6PTA'],'stoichiometry':[1,1,1,1]},
'CELLENV_6':{'reactions':['3OAR40','EAR40x','3OAS60','3OAR60','3HAD80','3OAS80','3OAR80','EAR60x','3HAD60','EAR80x','3HAD40'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_7':{'reactions':['3HAD161','EAR161x','3OAS161','3OAR161','3OAS141','3HAD141','3OAR121','EAR121x','3HAD121','EAR141x','T2DECAI','3OAR141','3OAS121'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_8':{'reactions':['TDPGDH','TDPDRR','TDPDRE','G1PTT'],'stoichiometry':[1,1,1,1]},
'CELLENV_9':{'reactions':['3OAS140','3OAR140'],'stoichiometry':[1,1]},
'CELLENV_10':{'reactions':['3HAD140','EAR140x'],'stoichiometry':[1,1]},
'CELLENV_11':{'reactions':['3OAR100','3HAD100','3OAS100'],'stoichiometry':[1,1,1]},
'LIPOPOLYSACCHARIDE_1':{'reactions':['COLIPAabcpp','COLIPAabctex','EDTXS1','EDTXS2','GALT1','GLCTR1','GLCTR2','GLCTR3','HEPK1','HEPK2','HEPT1','HEPT2','HEPT3','HEPT4','LPADSS','MOAT','MOAT2','MOAT3C','RHAT1','TDSK','USHD'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]},
'LIPOPOLYSACCHARIDE_2':{'reactions':['AGMHE','GMHEPAT','GMHEPK','GMHEPPA','S7PI'],'stoichiometry':[1,1,1,1,1]},
'LIPOPOLYSACCHARIDE_3':{'reactions':['U23GAAT','UHGADA','UAGAAT'],'stoichiometry':[1,1,1]},
'LIPOPOLYSACCHARIDE_4':{'reactions':['KDOPP','KDOCT2','KDOPS'],'stoichiometry':[1,1,1]},
'ASTPathway':{'reactions':['AST','SADH','SGDS','SGSAD','SOTA'],'stoichiometry':[1,1,1,1,1]}
};
#model reduction functions
def load_ALEWt(self,anoxic = False, oxic = True, update_ampms2 = True, convert2irreversible = False):
'''load iJO1366 with the following changes:
1. update to AMPMS2 to account for carbon monoxide
2. changes to uptake bounds for glucose M9 media
3. constrain the model to use 'PFK' instead of 'F6PA', 'DHAPT' when grown on glucose
4. constrain the model to use the physiologically perferred glutamate synthesis enzymes
5. depending on oxygen availability, constrain the model to use the correct RNR enzymes
6. depending on oxygen availability, constrain the model to use the correct Dihydroorotate dehydrogenase (PyrD) enzymes
7. constrain fatty acid biosynthesis to use the physiologically preferred enzymes'''
ijo1366_sbml = settings.workspace_data+"/models/iJO1366.xml"
# Read in the sbml file and define the model conditions
cobra_model = create_cobra_model_from_sbml_file(ijo1366_sbml, print_time=True)
if update_ampms2:
# Update AMPMS2
coc = Metabolite('co_c','CO','carbon monoxide','c');
cop = Metabolite('co_p','CO','carbon monoxide','p');
coe = Metabolite('co_e','CO','carbon monoxide','e');
cobra_model.add_metabolites([coc,cop,coe])
ampms2_mets = {};
ampms2_mets[cobra_model.metabolites.get_by_id('air_c')] = -1;
ampms2_mets[cobra_model.metabolites.get_by_id('amet_c')] = -1;
ampms2_mets[cobra_model.metabolites.get_by_id('dad_DASH_5_c')] = 1;
ampms2_mets[cobra_model.metabolites.get_by_id('met_DASH_L_c')] = 1;
ampms2_mets[cobra_model.metabolites.get_by_id('4ampm_c')] = 1;
ampms2_mets[cobra_model.metabolites.get_by_id('h_c')] = 3;
ampms2_mets[cobra_model.metabolites.get_by_id('for_c')] = 1;
ampms2_mets[cobra_model.metabolites.get_by_id('co_c')] = 1;
ampms2 = Reaction('AMPMS3');
ampms2.add_metabolites(ampms2_mets);
copp_mets = {};
copp_mets[cobra_model.metabolites.get_by_id('co_c')] = -1;
copp_mets[cobra_model.metabolites.get_by_id('co_p')] = 1;
copp = Reaction('COtpp');
copp.add_metabolites(copp_mets);
coex_mets = {};
coex_mets[cobra_model.metabolites.get_by_id('co_p')] = -1;
coex_mets[cobra_model.metabolites.get_by_id('co_e')] = 1;
coex = Reaction('COtex');
coex.add_metabolites(coex_mets);
cotrans_mets = {};
cotrans_mets[cobra_model.metabolites.get_by_id('co_e')] = -1;
cotrans = Reaction('EX_co_LPAREN_e_RPAREN_');
cotrans.add_metabolites(cotrans_mets);
cobra_model.add_reactions([ampms2,copp,coex,cotrans]);
cobra_model.remove_reactions(['AMPMS2']);
# Define the model conditions:
system_boundaries = [x.id for x in cobra_model.reactions if x.boundary == 'system_boundary'];
for b in system_boundaries:
cobra_model.reactions.get_by_id(b).lower_bound = 0.0;
cobra_model.reactions.get_by_id(b).upper_bound = 0.0;
# Reset demand reactions
demand = ['DM_4CRSOL',
'DM_5DRIB',
'DM_AACALD',
'DM_AMOB',
'DM_MTHTHF',
'DM_OXAM'];
for d in demand:
cobra_model.reactions.get_by_id(d).lower_bound = 0.0;
cobra_model.reactions.get_by_id(d).upper_bound = 1000.0;
# Change the objective
update_objective(cobra_model,{'Ec_biomass_iJO1366_WT_53p95M':1.0})
# Assign KOs
# Specify media composition (M9 glucose):
cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').lower_bound = -10.0;
cobra_model.reactions.get_by_id('EX_o2_LPAREN_e_RPAREN_').lower_bound = -18.0;
#uptake = ['EX_cl_LPAREN_e_RPAREN_',
# 'EX_so4_LPAREN_e_RPAREN_',
# 'EX_ca2_LPAREN_e_RPAREN_',
# 'EX_pi_LPAREN_e_RPAREN_',
# 'EX_fe2_LPAREN_e_RPAREN_',
# 'EX_cu2_LPAREN_e_RPAREN_',
# 'EX_zn2_LPAREN_e_RPAREN_',
# 'EX_cbl1_LPAREN_e_RPAREN_',
# 'EX_mobd_LPAREN_e_RPAREN_',
# 'EX_ni2_LPAREN_e_RPAREN_',
# 'EX_mn2_LPAREN_e_RPAREN_',
# 'EX_k_LPAREN_e_RPAREN_',
# 'EX_nh4_LPAREN_e_RPAREN_',
# 'EX_cobalt2_LPAREN_e_RPAREN_',
# 'EX_mg2_LPAREN_e_RPAREN_'];
uptake = ['EX_ca2_LPAREN_e_RPAREN_',
'EX_cbl1_LPAREN_e_RPAREN_',
'EX_cl_LPAREN_e_RPAREN_',
'EX_co2_LPAREN_e_RPAREN_',
'EX_cobalt2_LPAREN_e_RPAREN_',
'EX_cu2_LPAREN_e_RPAREN_',
'EX_fe2_LPAREN_e_RPAREN_',
'EX_fe3_LPAREN_e_RPAREN_',
'EX_h_LPAREN_e_RPAREN_',
'EX_h2o_LPAREN_e_RPAREN_',
'EX_k_LPAREN_e_RPAREN_',
'EX_mg2_LPAREN_e_RPAREN_',
'EX_mn2_LPAREN_e_RPAREN_',
'EX_mobd_LPAREN_e_RPAREN_',
'EX_na1_LPAREN_e_RPAREN_',
'EX_nh4_LPAREN_e_RPAREN_',
'EX_ni2_LPAREN_e_RPAREN_',
'EX_pi_LPAREN_e_RPAREN_',
'EX_sel_LPAREN_e_RPAREN_',
'EX_slnt_LPAREN_e_RPAREN_',
'EX_so4_LPAREN_e_RPAREN_',
'EX_tungs_LPAREN_e_RPAREN_',
'EX_zn2_LPAREN_e_RPAREN_'];
for u in uptake:
cobra_model.reactions.get_by_id(u).lower_bound = -1000.0;
# Specify allowed secretion products
secrete = ['EX_meoh_LPAREN_e_RPAREN_',
'EX_5mtr_LPAREN_e_RPAREN_',
'EX_h_LPAREN_e_RPAREN_',
'EX_co2_LPAREN_e_RPAREN_',
'EX_co_LPAREN_e_RPAREN_',
'EX_h2o_LPAREN_e_RPAREN_',
'EX_ac_LPAREN_e_RPAREN_',
'EX_fum_LPAREN_e_RPAREN_',
'EX_for_LPAREN_e_RPAREN_',
'EX_etoh_LPAREN_e_RPAREN_',
'EX_lac_DASH_L_LPAREN_e_RPAREN_',
'EX_pyr_LPAREN_e_RPAREN_',
'EX_succ_LPAREN_e_RPAREN_'];
for s in secrete:
cobra_model.reactions.get_by_id(s).upper_bound = 1000.0;
# Constrain specific reactions
noFlux = ['F6PA', 'DHAPT'];
ammoniaExcess = ['GLUDy']; # PMCID: 196288
# RNR control (DOI:10.1111/j.1365-2958.2006.05493.x)
# Dihydroorotate dehydrogenase (PyrD) (DOI:10.1016/S0076-6879(78)51010-0, PMID: 199252, DOI:S0969212602008316 [pii])
aerobic = ['RNDR1', 'RNDR2', 'RNDR3', 'RNDR4', 'DHORD2', 'ASPO6','LCARR','PFL','FRD2','FRD3']; # see DOI:10.1111/j.1365-2958.2011.07593.x; see DOI:10.1089/ars.2006.8.773 for a review
anaerobic = ['RNTR1c2', 'RNTR2c2', 'RNTR3c2', 'RNTR4c2', 'DHORD5', 'ASPO5','PDH','SUCDi']; # see DOI:10.1074/jbc.274.44.31291, DOI:10.1128/JB.00440-07
if anoxic:
rxnList = noFlux + ammoniaExcess + anaerobic;
for rxn in rxnList:
cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn).upper_bound = 0.0;
elif oxic:
rxnList = noFlux + ammoniaExcess + aerobic;
for rxn in rxnList:
cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn).upper_bound = 0.0;
else:
rxnList = noFlux + ammoniaExcess;
for rxn in rxnList:
cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn).upper_bound = 0.0;
# Set the direction for specific reactions
# Fatty acid biosynthesis: DOI: 10.1016/j.ymben.2010.10.007, PMCID: 372925
fattyAcidSynthesis = ['ACCOAC', 'ACOATA', 'HACD1', 'HACD2', 'HACD3', 'HACD4', 'HACD5', 'HACD6', 'HACD7', 'HACD8', 'KAS14', 'KAS15', 'MACPD', 'MCOATA', '3OAR100', '3OAR120', '3OAR121', '3OAR140', '3OAR141', '3OAR160', '3OAR161', '3OAR180', '3OAR181', '3OAR40', '3OAR60', '3OAR80']
fattyAcidOxidation = ['ACACT1r', 'ACACT2r', 'ACACT3r', 'ACACT4r', 'ACACT5r', 'ACACT6r', 'ACACT7r', 'ACACT8r', 'ACOAD1f', 'ACOAD2f', 'ACOAD3f', 'ACOAD4f', 'ACOAD5f', 'ACOAD6f', 'ACOAD7f', 'ACOAD8f', 'CTECOAI6', 'CTECOAI7', 'CTECOAI8', 'ECOAH1', 'ECOAH2', 'ECOAH3', 'ECOAH4', 'ECOAH5', 'ECOAH6', 'ECOAH7', 'ECOAH8']
ndpk = ['NDPK1','NDPK2','NDPK3','NDPK4','NDPK5','NDPK7','NDPK8'];
rxnList = fattyAcidSynthesis + fattyAcidOxidation;
for rxn in rxnList:
cobra_model.reactions.get_by_id(rxn).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn).upper_bound = 1000.0;
# convert to irreversible
if convert2irreversible: convert_to_irreversible(cobra_model);
return cobra_model;
def reduce_model(self,cobra_model,cobra_model_outFileName=None):
'''reduce model'''
# Input: cobra_model
# Output: cobra_model
# the lower and upper bounds have been set to 0.0
# for all reactions that cannot carry a flux
cobra_model.optimize()
sol_f = cobra_model.solution.f
fva_data = flux_variability_analysis(cobra_model, fraction_of_optimum=0.9,
objective_sense='maximize', the_reactions=None,
allow_loops=True, solver='gurobi',
the_problem='return', tolerance_optimality=1e-6,
tolerance_feasibility=1e-6, tolerance_barrier=1e-8,
lp_method=1, lp_parallel=0, new_objective=None,
relax_b=None, error_reporting=None,
number_of_processes=1, copy_model=False);
#with open("data/ijo1366_irrev_fva.json", 'w') as outfile:
# json.dump(data, outfile, indent=4);
#fva_data = json.load(open("data/ijo1366_irrev_fva.json"));
# Reduce model
rxns_noflux = [];
for k,v in fva_data.items():
if v['minimum'] == 0.0 and v['maximum'] == 0.0:
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
rxns_noflux.append(k);
if cobra_model_outFileName:
write_cobra_model_to_sbml_file(cobra_model,cobra_model_outFileName)
cobra_model.optimize()
sol_reduced_f = cobra_model.solution.f
# Check that the reduced model is consistent with the original model
if not sol_f == sol_reduced_f:
print('reduced model is inconsistent with the original model')
print('original model solution: ' + str(sol_f))
print('reduced model solution: ' + str(sol_reduced_f))
def reduce_model_pfba(self,cobra_model,cobra_model_outFileName=None,fba_outFileName=None,subs=[]):
'''reduce model using pfba'''
# Input: cobra_model
# cobra_model_outFileName
# subs = string of specific subsystems to reduce
# Output: cobra_model
# the lower and upper bounds have been set to 0.0
# for all reactions that cannot carry a flux
cobra_model.optimize()
sol_f = cobra_model.solution.f
# Find minimal flux solution:
pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi');
# Reduce model
rxns_noflux = [];
# set lb and ub for all reactions with 0 flux to 0;
for k,v in cobra_model.solution.x_dict.items():
if (v < 0.0 or v == 0.0) and cobra_model.reactions.get_by_id(k).subsystem in subs:
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
rxns_noflux.append(k);
if cobra_model_outFileName:
write_cobra_model_to_sbml_file(cobra_model,cobra_model_outFileName)
if pfba_outFileName:
# Write pfba solution to file
with open(pfba_outFileName,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Flux'])
for k,v in cobra_model.solution.x_dict.items():
writer.writerow([k,v]);
cobra_model.optimize()
sol_reduced_f = cobra_model.solution.f
# Check that the reduced model is consistent with the original model
if not sol_f == sol_reduced_f:
print('reduced model is inconsistent with the original model')
print('original model solution: ' + str(sol_f))
print('reduced model solution: ' + str(sol_reduced_f))
def add_net_reaction(self,cobra_model_IO, rxn_dict_I,remove_reverse=False):
'''add a net reaction to the model after removing
the individual reactions'''
# input: rxn_dict_I = dictionary of net reaction ids and
# corresponding list of individual reaction ids
# output: cobra_model_IO = individual reactions replaced with a
# net reaction
cobra_model_IO.optimize();
sol_orig = cobra_model_IO.solution.f;
print("original model solution", sol_orig)
try:
cobra_model_tmp = cobra_model_IO.copy2();
except KeyError as e:
print(e);
# make net reactions:
rxn_dict_net = {};
for k,v in rxn_dict_I.items():
rxn_net = make_net_reaction(cobra_model_tmp, k, v['reactions'],v['stoichiometry']);
if rxn_net:
rxn_net.lower_bound = 0.0;
rxn_net.upper_bound = 1000.0;
rxn_net.objective_coefficient = 0.0;
else:
print('an error occured in add_net_reaction')
exit(-1)
#rxn_net.reversibility = False;
rxn_dict_net[k] = (v['reactions'],rxn_net);
# add replace individual reactions with net reaction
for k,v in rxn_dict_net.items():
cobra_model_IO.remove_reactions(v[0]);
# remove the reverse reaction if it exists for irreversible models
if remove_reverse:
for rxn in v[0]:
if '_reverse' in rxn:
rxn_rev = rxn.replace('_reverse','')
if cobra_model_IO.reactions.has_id(rxn_rev): cobra_model_IO.remove_reactions(rxn_rev);
else:
rxn_rev = rxn+'_reverse';
if cobra_model_IO.reactions.has_id(rxn_rev): cobra_model_IO.remove_reactions(rxn_rev);
cobra_model_IO.add_reaction(v[1]);
cobra_model_IO.optimize();
sol_new = cobra_model_IO.solution.f;
print(k, sol_new)
def make_net_reaction(self,cobra_model_I, rxn_id_I, rxn_list_I,stoich_list_I):
'''generate a net reaction from a list of individual reactions'''
# input: rxn_list_I = list of reaction IDs
# output: rxn_net_O = net reaction (cobra Reaction object)
from cobra.core.Reaction import Reaction
#rxn_net_O = cobra_model_I.reactions.get_by_id(rxn_list_I[0]);
#for r in rxn_list_I[1:]:
# if cobra_model_I.reactions.get_by_id(r).reversibility:
# print r + " is reversible!";
# print "continue?"
# rxn_net_O += cobra_model_I.reactions.get_by_id(r);
# check input:
if not len(stoich_list_I) == len(rxn_list_I):
print("error in " + rxn_id_I + ": there are " + str(len(rxn_list_I)) + " rxn ids and " + str(len(stoich_list_I)) + " coefficients");
exit(-1);
rxn_net_O = Reaction(rxn_id_I);
for i,r in enumerate(rxn_list_I):
mets = {};
metlist = [];
metlist = cobra_model_I.reactions.get_by_id(r).products + cobra_model_I.reactions.get_by_id(r).reactants;
for met in metlist:
mets[met] = cobra_model_I.reactions.get_by_id(r).get_coefficient(met)*stoich_list_I[i];
rxn_net_O.add_metabolites(mets);
rxn_net_O.subsystem = cobra_model_I.reactions.get_by_id(r).subsystem; #copy over the subsystem
# check net reaction
#if not rxn_net_O.check_mass_balance():
#print "error: " + rxn_id_I + " is not elementally balanced";
#print rxn_net_O.id;
#print rxn_net_O.build_reaction_string();
return rxn_net_O;
def get_solBySub(self,cobra_model_I,sol_I,sub_I):
sol_O = {};
for k,v in sol_I.items():
try:
if cobra_model_I.reactions.get_by_id(k).subsystem == sub_I:
sol_O[k] = v;
except:
print(k + ' reaction not found')
return sol_O;
def groupBySameFlux(self,cobra_model_I,sol_I):
flux_list = [];
for r,f in sol_I.items():
if not f in flux_list and float(f)>0.0:
flux_list.append(f)
sameFlux_O = {};
for f in flux_list:
rxn_list = [];
for r,v in sol_I.items():
if v==f:
rxn_list.append(r);
stoich = [1]*len(rxn_list)
rxnName = '';
for rxn in rxn_list:
rxnName = rxnName + rxn + '_';
rxnName = rxnName[:-1];
# check that the reaction name is less than 225 characters
if len(rxnName)>224:
rxnName = rxnName[:224];
sameFlux_O[rxnName] = {'reactions':rxn_list,
'stoichiometry':stoich,
'flux':f};
#netRxn = make_net_reaction(cobra_model_copy,rxnName,rxn_list,stoich)
#sameFlux_O[rxnName] = {'reactions':rxn_list,
# 'stoichiometry':stoich,
# 'flux':f,
# 'net':netRxn};
return sameFlux_O
def add_net_reaction_subsystem(self,cobra_model_IO,sol_I,subs_I):
'''make net reactions for specific subsystems grouped
by reactions that have the same flux from pfba'''
#input: cobra_model
# sol_I = pfba solution
# sub_I = list of model subsystems
#output: cobra_model
# convert model to irreversible
# convert_to_irreversible(cobra_model_IO);
# Make net reactions for pathways outside of the scope
# of the isotopomer model
for s in subs_I:
sol = get_solBySub(cobra_model_IO,sol_I,s)
sameFlux = groupBySameFlux(cobra_model_IO,sol)
netRxns = {};
for k,v in sameFlux.items():
if len(v['reactions'])>1:
netRxns[k] = v;
add_net_reaction(cobra_model_IO,netRxns);
# add subsystem information back in
for k in sameFlux.keys():
cobra_model_IO.reactions.get_by_id(k).subsystem = s
remove_noflux_reactions(cobra_model_IO,sol_I,subs_I)
# convert model back to reversible
# revert_to_reversible(cobra_model_IO);
def remove_noflux_reactions(self,cobra_model,sol=None,subs=[]):
'''remove noflux reactions'''
# Input: cobra_model
# sol = pfba solution
# subs = string of specific subsystems to reduce
# Output: cobra_model
# if the lower and upper bounds are zero, the reactions
# are removed
cobra_model.optimize()
sol_f = cobra_model.solution.f
# Reduce model
rxns_noflux = [];
# set lb and ub for all reactions with 0 flux to 0;
if sol:
if subs:
for k,v in sol.items():
try:
if (float(v) < 0.0 or float(v) == 0.0) and cobra_model.reactions.get_by_id(k).subsystem in subs:
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
cobra_model.remove_reactions(k)
rxns_noflux.append(k);
except:
print('reaction is not in model: ' + k)
else:
for k,v in sol.items():
try:
if (float(v) < 0.0 or float(v) == 0.0):
cobra_model.reactions.get_by_id(k).lower_bound = 0.0;
cobra_model.reactions.get_by_id(k).upper_bound = 0.0;
cobra_model.remove_reactions(k)
rxns_noflux.append(k);
except:
print('reaction is not in model: ' + k)
else:
if subs:
for r in cobra_model.reactions:
if r.lower_bound == 0.0 and r.upper_bound == 0.0 and cobra_model.reactions.get_by_id(r.id).subsystem in subs:
cobra_model.remove_reactions(r.id)
else:
for r in cobra_model.reactions:
if r.lower_bound == 0.0 and r.upper_bound == 0.0:
cobra_model.remove_reactions(r.id)
cobra_model.optimize()
sol_reduced_f = cobra_model.solution.f
# Check that the reduced model is consistent with the original model
if not sol_f == sol_reduced_f:
print('reduced model is inconsistent with the original model')
print('original model solution: ' + str(sol_f))
print('reduced model solution: ' + str(sol_reduced_f))
def get_reactionsInfo(self,cobra_model):
'''return the number of reactions and the number of reactions
that cannot carry a flux (i.e. lb and ub of 0.0)'''
nrxn_O = len(cobra_model.reactions);
nrxn_noflux_O = 0;
for r in cobra_model.reactions:
if r.lower_bound == 0.0 and r.upper_bound == 0.0:
nrxn_noflux_O += 1;
return nrxn_O, nrxn_noflux_O
#model reduction iteration functions
def makeIsotopomerModel_iteration01(self,pfba_file,netrxn_irreversible_model_filename,fva_reduced_model_filename,reduced_lbub_filename):
'''iteration 1:
identification of reactions that can be lumped in pathways outside the model scope'''
cobra_model = self.load_ALEWt();
# Make the model irreversible for downstream manipulations:
convert_to_irreversible(cobra_model);
# Add lumped isotopomer reactions
self.add_net_reaction(cobra_model,isotopomer_rxns_net_irreversible);
# Find minimal flux solution:
pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi');
# Write pfba solution to file
with open(pfba_file,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Flux'])
for k,v in cobra_model.solution.x_dict.items():
writer.writerow([k,v]);
# Read in pfba solution
pfba_sol = {};
with open(pfba_file,mode='r') as infile:
dictreader = csv.DictReader(infile)
for r in dictreader:
pfba_sol[r['Reaction']] = r['Flux'];
# Make net reactions for pathways outside of the scope
# of the isotopomer model
subs = ['Cell Envelope Biosynthesis',
'Glycerophospholipid Metabolism',
'Lipopolysaccharide Biosynthesis / Recycling',
'Membrane Lipid Metabolism',
'Murein Biosynthesis'
'Murein Recycling',
'Cofactor and Prosthetic Group Biosynthesis',
#'Transport, Inner Membrane',
#'Transport, Outer Membrane',
#'Transport, Outer Membrane Porin',
'tRNA Charging',
'Unassigned',
'Exchange',
'Inorganic Ion Transport and Metabolism',
'Nitrogen Metabolism'];
self.add_net_reaction_subsystem(cobra_model,pfba_sol,subs);
self.remove_noflux_reactions(cobra_model,pfba_sol,['Transport, Outer Membrane Porin','Transport, Inner Membrane','Transport, Outer Membrane'])
revert_to_reversible(cobra_model);
# write model to sbml
write_cobra_model_to_sbml_file(cobra_model,netrxn_irreversible_model_filename)
# Reduce model using FVA:
self.reduce_model(cobra_model,fva_reduced_model_filename)
# Remove all reactions with 0 flux
self.remove_noflux_reactions(cobra_model);
with open(reduced_lbub_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Formula','LB','UB','Subsystem'])
for r in cobra_model.reactions:
writer.writerow([r.id,
r.build_reaction_string(),
r.lower_bound,
r.upper_bound,
r.subsystem]);
def makeIsotopomerModel_iteration02(self,pfba_filename,fva_reduced_model_filename,netrxn_irreversible_model_filename,reduced_lbub_filename):
'''iteration 2:
addition of finalized lumped reactions that are in pathways that are within the scope of the model
and reduction by removing reactions with zero optimal minimal flux outside the scope of the model'''
cobra_model = load_ALEWt();
# Make the model irreversible for downstream manipulations:
convert_to_irreversible(cobra_model);
cobra_model.optimize();
# Add lumped isotopomer reactions
self.add_net_reaction(cobra_model,isotopomer_rxns_net_irreversible,True);
cobra_model.optimize();
# Find minimal flux solution:
pfba = optimize_minimal_flux(cobra_model,True,solver='gurobi');
# Write pfba solution to file
with open(pfba_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Flux','Subsystem'])
for k,v in cobra_model.solution.x_dict.items():
writer.writerow([k,v,cobra_model.reactions.get_by_id(k).subsystem]);
# Read in pfba solution
pfba_sol = {};
with open(pfba_filename,mode='r') as infile:
dictreader = csv.DictReader(infile)
for r in dictreader:
pfba_sol[r['Reaction']] = r['Flux'];
# remove noflux reactions for pathways outside of the scope
# of the isotopomer model
subs = ['Cell Envelope Biosynthesis',
'Glycerophospholipid Metabolism',
'Lipopolysaccharide Biosynthesis / Recycling',
'Membrane Lipid Metabolism',
'Murein Biosynthesis'
'Murein Recycling',
'Cofactor and Prosthetic Group Biosynthesis',
'Transport, Inner Membrane',
'Transport, Outer Membrane',
'Transport, Outer Membrane Porin',
'tRNA Charging',
'Unassigned',
#'Exchange',
'Inorganic Ion Transport and Metabolism',
'Nitrogen Metabolism',
'Alternate Carbon Metabolism'];
self.remove_noflux_reactions(cobra_model,pfba_sol,subs)
# Reduce model using FVA:
self.reduce_model(cobra_model,fva_reduced_model_filename)
# Reset secretion products that may have been turned off
secrete = ['EX_meoh_LPAREN_e_RPAREN_',
'EX_5mtr_LPAREN_e_RPAREN_',
'EX_h_LPAREN_e_RPAREN_',
'EX_co2_LPAREN_e_RPAREN_',
'EX_co_LPAREN_e_RPAREN_',
'EX_h2o_LPAREN_e_RPAREN_',
'EX_ac_LPAREN_e_RPAREN_',
'EX_fum_LPAREN_e_RPAREN_',
'EX_for_LPAREN_e_RPAREN_',
'EX_etoh_LPAREN_e_RPAREN_',
'EX_lac_DASH_L_LPAREN_e_RPAREN_',
'EX_pyr_LPAREN_e_RPAREN_',
'EX_succ_LPAREN_e_RPAREN_'];
for s in secrete:
cobra_model.reactions.get_by_id(s).upper_bound = 1000.0;
# Remove all reactions with 0 flux
r1,r2 = self.get_reactionsInfo(cobra_model);
while r2 !=0:
self.remove_noflux_reactions(cobra_model);
r1,r2 = self.get_reactionsInfo(cobra_model);
print(r1,r2);
# write model to sbml
write_cobra_model_to_sbml_file(cobra_model,netrxn_irreversible_model_filename)
with open(reduced_lbub_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Formula','LB','UB','Subsystem'])
for r in cobra_model.reactions:
writer.writerow([r.id,
r.build_reaction_string(),
r.lower_bound,
r.upper_bound,
r.subsystem]);
def makeIsotopomerModel_cobraMAT(self,model_filename,xml_filename,mat_filename,csv_filename,isotopomer_mapping_filename,ko_list=[],flux_dict={},description=None):
'''iteration 3:
Remove reactions that are thermodynamically unfavorable and add isotopomer data'''
# Read in the sbml file and define the model conditions
cobra_model = create_cobra_model_from_sbml_file(model_filename, print_time=True)
# Modify glucose uptake:
if cobra_model.reactions.has_id('EX_glc_LPAREN_e_RPAREN__reverse'):
lb,ub = cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN__reverse').lower_bound,cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN__reverse').upper_bound;
EX_glc_mets = {};
EX_glc_mets[cobra_model.metabolites.get_by_id('glc_DASH_D_e')] = -1;
EX_glc = Reaction('EX_glc_LPAREN_e_RPAREN_');
EX_glc.add_metabolites(EX_glc_mets);
cobra_model.add_reaction(EX_glc)
cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').lower_bound = -ub;
cobra_model.reactions.get_by_id('EX_glc_LPAREN_e_RPAREN_').upper_bound = lb;
cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN__reverse'])
## Remove thermodynamically infeasible reactions:
#infeasible = [];
#loops = [];
#cobra_model.remove_reactions(infeasible + loops);
# Apply KOs, if any:
for ko in ko_list:
cobra_model.reactions.get_by_id(ko).lower_bound = 0.0;
cobra_model.reactions.get_by_id(ko).upper_bound = 0.0;
# Apply flux constraints, if any:
for rxn,flux in flux_dict.items():
cobra_model.reactions.get_by_id(rxn).lower_bound = flux['lb'];
cobra_model.reactions.get_by_id(rxn).upper_bound = flux['ub'];
# Change description, if any:
if description:
cobra_model.description = description;
# Read in isotopomer model
isotopomer_mapping = self.read_isotopomer_mapping_csv(isotopomer_mapping_filename); #broken
isotopomer_str = self.build_isotopomer_str(isotopomer_mapping);
# write model to sbml
write_cobra_model_to_sbml_file(cobra_model,xml_filename)
# Add isotopomer field to model
for r in cobra_model.reactions:
if r.id in isotopomer_str:
cobra_model.reactions.get_by_id(r.id).isotopomer = isotopomer_str[r.id];
else:
cobra_model.reactions.get_by_id(r.id).isotopomer = '';
# Add null basis:
cobra_model_array = cobra_model.to_array_based_model();
N = self.calculate.null(cobra_model_array.S.todense()) #convert S from sparse to full and compute the nullspace
cobra_model.N = N;
# solve and save pFBA for later use:
optimize_minimal_flux(cobra_model,True,solver='gurobi');
# add match field:
match = numpy.zeros(len(cobra_model.reactions));
cobra_model.match = match;
# write model to mat
save_matlab_model_isotopomer(cobra_model,mat_filename);
with open(csv_filename,mode='wb') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Reaction','Formula','LB','UB','Genes','Subsystem','Isotopomer'])
for r in cobra_model.reactions:
writer.writerow([r.id,
r.build_reaction_string(),
r.lower_bound,
r.upper_bound,
r.gene_reaction_rule,
r.subsystem,
r.isotopomer]);
#ecoli_INCA modifications
def expand_ecoliINCA01(self,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O):
'''expand the INCA Ecoli model to account for additional metabolites'''
query = stage02_isotopomer_query()
# get the xml model
cobra_model_sbml = ''
cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I);
# load the model
if cobra_model_sbml:
if cobra_model_sbml['file_type'] == 'sbml':
with open('data/cobra_model_tmp.xml','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True);
elif cobra_model_sbml['file_type'] == 'json':
with open('data/cobra_model_tmp.json','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = load_json_model('data/cobra_model_tmp.json');
else:
print('file_type not supported')
#get the atomMapping_reactions
atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I);
#change the mapping_id
for cnt,row in enumerate(atomMappingReactions):
atomMappingReactions[cnt]['mapping_id']=mapping_id_O;
#expand the model to include glyoxylate shunt:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','glx_c');
glx = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
glx.charge = met_row['charge']
#get metabolites in the model
icit = cobra_model.metabolites.get_by_id('icit_c')
succ = cobra_model.metabolites.get_by_id('succ_c')
accoa = cobra_model.metabolites.get_by_id('accoa_c')
mal = cobra_model.metabolites.get_by_id('mal_DASH_L_c')
#make ICL
rxn_mets = {};
rxn_mets[icit] = -1;
rxn_mets[succ] = 1;
rxn_mets[glx] = 1;
rxn = Reaction('ICL');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='ICL';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1,1]
row_tmp['reactants_ids_tracked']=['icit_c']
row_tmp['products_ids_tracked']=['glx_c','succ_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C"], ["C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1], [0, 1, 2, 3]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['ab','fcde']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make MALS
rxn_mets = {};
rxn_mets[glx] = -1;
rxn_mets[accoa] = -1;
rxn_mets[mal] = 1;
rxn = Reaction('MALS');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='MALS';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1,-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['accoa_c','glx_c']
row_tmp['products_ids_tracked']=['mal_DASH_L_c']
row_tmp['reactants_elements_tracked']=[["C", "C"], ["C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1], [0, 1]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3]]
row_tmp['reactants_mapping']=['ab','cd']
row_tmp['products_mapping']=['cdba']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#add in glucose transporters and intracellular glc
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_c");
glc_c = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
glc_c.charge = met_row['charge']
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_e");
glc_e = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'e')
glc_e.charge = met_row['charge']
glcext = Metabolite('glc_DASH_D_e.ext',met_row['formula'],met_row['met_name'],'e')
glcext.charge = met_row['charge']
glcpre = Metabolite('glc_DASH_D_e.pre',met_row['formula'],met_row['met_name'],'e')
glcpre.charge = met_row['charge']
#get metabolites in the model
pep = cobra_model.metabolites.get_by_id('pep_c')
pyr = cobra_model.metabolites.get_by_id('pyr_c')
g6p = cobra_model.metabolites.get_by_id('g6p_c')
#make EX_glc_LPAREN_e_RPAREN_
rxn_mets = {};
rxn_mets[glcext] = -1;
rxn_mets[glc_e] = 1;
rxn = Reaction('EX_glc_LPAREN_e_RPAREN_');
cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN_']);
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN_';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.ext']
row_tmp['products_ids_tracked']=['glc_DASH_D_e']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make EX_glc_LPAREN_e_RPAREN__pre
rxn_mets = {};
rxn_mets[glcpre] = -1;
rxn_mets[glc_e] = 1;
rxn = Reaction('EX_glc_LPAREN_e_RPAREN__pre');
cobra_model.remove_reactions(['v60']);
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN__pre';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.pre']
row_tmp['products_ids_tracked']=['glc_DASH_D_e']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make GLCptspp "glc_DASH_D_p + pep_c --> g6p_c + pyr_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[pep] = -1;
rxn_mets[g6p] = 1;
rxn_mets[pyr] = 1;
rxn = Reaction('GLCptspp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCptspp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1,-1]
row_tmp['products_stoichiometry_tracked']=[1,1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e','pep_c']
row_tmp['products_ids_tracked']=['g6p_c','pyr_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['reactants_mapping']=['abcdef','ghi']
row_tmp['products_mapping']=['abcdef','ghi']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make GLCt2pp "glc_DASH_D_p + h_p --> glc_DASH_D_c + h_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[glc_c] = 1;
rxn = Reaction('GLCt2pp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCt2pp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e']
row_tmp['products_ids_tracked']=['glc_DASH_D_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make HEX1 "atp_c + glc_DASH_D_c --> g6p_c + h_c + adp_c"
rxn_mets = {};
rxn_mets[glc_c] = -1;
rxn_mets[g6p] = 1;
rxn = Reaction('HEX1');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='HEX1';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_c']
row_tmp['products_ids_tracked']=['g6p_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
##expand the model
#acon = Metabolite('acon_DASH_C_c','C6H3O6','cis-Aconitate','c');
#cit = cobra_model.metabolites.get_by_id('cit_c')
#icit = cobra_model.metabolites.get_by_id('icit_c')
#e4p = cobra_model.metabolites.get_by_id('e4p_c')
#r5p = cobra_model.metabolites.get_by_id('r5p_c')
#phe = cobra_model.metabolites.get_by_id('phe_DASH_L_c')
#his = cobra_model.metabolites.get_by_id('his_DASH_L_c')
#phpyr = Metabolite('phpyr_c','C9H7O3','Phenylpyruvate','c');
#prpp = Metabolite('prpp_c','C5H8O14P3','5-Phospho-alpha-D-ribose 1-diphosphate','c');
## update selected reactions to account for new metabolites
#for rxn,row in enumerate(atomMappingReactions):
# if row['rxn_id'] == 'ACONTa_ACONTb':
# #split ACONTa_ACONTb
# aconta_mets = {};
# aconta_mets[cit] = -1;
# aconta_mets[acon] = 1;
# aconta = Reaction('ACONTa');
# aconta.add_metabolites(aconta_mets);
# cobra_model.remove_reactions(['ACONTa_ACONTb']);
# cobra_model.add_reactions([aconta]);
# cobra_model.repair();
# # Update the mapping ids
# atomMappingReactions[rxn]['products_ids_tracked']=['acon_DASH_C_c']
# atomMappingReactions[rxn]['comment_']='updated'
# elif row['rxn_id'] == 'PheSYN':
# #split PheSYN to add in phpyr
# # Update the mapping_ids
# atomMappingReactions[rxn]['mapping_id']=mapping_id_O;
# atomMappingReactions[rxn]['rxn_id']=rxn_ids[rxn];
# atomMappingReactions[rxn]['rxn_description']='';
# atomMappingReactions[rxn]['rxn_equation']='';
# atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[]
# atomMappingReactions[rxn]['products_stoichiometry_tracked']=[]
# atomMappingReactions[rxn]['reactants_ids_tracked']=[]
# atomMappingReactions[rxn]['products_ids_tracked']=[]
# atomMappingReactions[rxn]['reactants_elements_tracked']=[]
# atomMappingReactions[rxn]['products_elements_tracked']=[]
# atomMappingReactions[rxn]['reactants_positions_tracked']=[]
# atomMappingReactions[rxn]['products_positions_tracked']=[]
# atomMappingReactions[rxn]['reactants_mapping']=[]
# atomMappingReactions[rxn]['products_mapping']=[]
# atomMappingReactions[rxn]['used_']=True
# atomMappingReactions[rxn]['comment_']=None
# elif row['rxn_id'] == 'HisSYN':
# # split HisSYN to add in prpp
# #cobra_model.reactions.get_by_id(rxn_ids[rxn])
# #cobra_model.reactions.get_by_id(rxn_ids[rxn])
# # Update the mapping_ids
# atomMappingReactions[rxn]['reactants_ids_tracked']=[r.replace('r5p_c','prpp_c') for r in atomMappingReactions[rxn]['reactants_ids_tracked']]
# # combine TKT1a and TKT1b
# # combine TKT2a and TKT2b
# # split PPC_PPCK
# # split PTAr_ACKr_ACS
## add in ACONTb
#acontb_mets = {};
#acontb_mets[acon] = -1;
#acontb_mets[icit] = 1;
#acontb = Reaction('ACONTb');
#acontb.add_metabolites(acontb_mets);
#cobra_model.add_reactions([acontb]);
#cobra_model.repair();
## add in ACONTb mapping
#row={};
#row['mapping_id']=mapping_id_O;
#row['rxn_id']='ACONTb';
#row['rxn_description']='';
#row['rxn_equation']='';
#row['reactants_stoichiometry_tracked']=[-1]
#row['products_stoichiometry_tracked']=[1]
#row['reactants_ids_tracked']=['acon_DASH_C_c']
#row['products_ids_tracked']=['icit_c']
#row['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row['reactants_mapping']=['abcdef']
#row['products_mapping']=['abcdef']
#row['used_']=True
#row['comment_']='added'
#atomMappingReactions.append(row)
## add in e4p_to_phpyr
## add in r5p_to_prp
#r5p_to_prpp_mets = {};
#r5p_to_prpp_mets[e4p] = -1;
#r5p_to_prpp_mets[prpp] = 1;
#r5p_to_prpp = Reaction('r5p_to_prpp');
#r5p_to_prpp.add_metabolites(r5p_to_prpp_mets);
#cobra_model.add_reactions([r5p_to_prpp]);
#cobra_model.repair();
## add in r5p_to_prpp mapping
#row={};
#row['mapping_id']=mapping_id_O;
#row['rxn_id']='r5p_to_prpp';
#row['rxn_description']='';
#row['rxn_equation']='';
#row['reactants_stoichiometry_tracked']=[-1]
#row['products_stoichiometry_tracked']=[1]
#row['reactants_ids_tracked']=['r5p_c']
#row['products_ids_tracked']=['prpp_c']
#row['reactants_elements_tracked']=[["C", "C", "C", "C", "C"]]
#row['products_elements_tracked']=[["C", "C", "C", "C", "C"]]
#row['reactants_positions_tracked']=[[0, 1, 2, 3, 4]]
#row['products_positions_tracked']=[[0, 1, 2, 3, 4]]
#row['reactants_mapping']=['abcde']
#row['products_mapping']=['abcde']
#row['used_']=True
#row['comment_']='added'
#atomMappingReactions.append(row)
# write the model to a temporary file
save_json_model(cobra_model,'data/cobra_model_tmp.json')
# add the model information to the database
io = stage02_isotopomer_io()
dataStage02IsotopomerModelRxns_data = [];
dataStage02IsotopomerModelMets_data = [];
dataStage02IsotopomerModels_data,\
dataStage02IsotopomerModelRxns_data,\
dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json')
io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data);
io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data);
io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data);
#add atomMappingReactions to the database
io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions);
def expand_ecoliINCA02(self,experiment_id_I,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O):
'''expand the INCA Ecoli model to account for additional metabolites'''
query = stage02_isotopomer_query()
# get the xml model
cobra_model_sbml = ''
cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I);
# load the model
if cobra_model_sbml:
if cobra_model_sbml['file_type'] == 'sbml':
with open('data/cobra_model_tmp.xml','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True);
elif cobra_model_sbml['file_type'] == 'json':
with open('data/cobra_model_tmp.json','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = load_json_model('data/cobra_model_tmp.json');
else:
print('file_type not supported')
#get the atomMapping_reactions
atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I);
#change the mapping_id
for cnt,row in enumerate(atomMappingReactions):
atomMappingReactions[cnt]['mapping_id']=mapping_id_O;
accoa = cobra_model.metabolites.get_by_id('accoa_c')
#expand the model to include ATPSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','atp_c');
atp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
atp.charge = met_row['charge']
#get metabolites in the model
r5p = cobra_model.metabolites.get_by_id('r5p_c')
fthf = cobra_model.metabolites.get_by_id('10fthf_c')
gly = cobra_model.metabolites.get_by_id('gly_c')
co2 = cobra_model.metabolites.get_by_id('co2_c')
glu = cobra_model.metabolites.get_by_id('glu_DASH_L_c')
gln = cobra_model.metabolites.get_by_id('gln_DASH_L_c')
asp = cobra_model.metabolites.get_by_id('asp_DASH_L_c')
fum = cobra_model.metabolites.get_by_id('fum_c')
#make ATPSYN (irreversible)
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gly] = -1;
rxn_mets[co2] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gln] = -1;
rxn_mets[asp] = -1;
rxn_mets[asp] = -1;
rxn_mets[atp] = 1;
rxn_mets[glu] = 1;
rxn_mets[fum] = 1;
rxn_mets[fum] = 1;
rxn = Reaction('ATPSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include GTPSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','gtp_c');
gtp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
gtp.charge = met_row['charge']
#get metabolites in the model
r5p = cobra_model.metabolites.get_by_id('r5p_c')
fthf = cobra_model.metabolites.get_by_id('10fthf_c')
gly = cobra_model.metabolites.get_by_id('gly_c')
co2 = cobra_model.metabolites.get_by_id('co2_c')
glu = cobra_model.metabolites.get_by_id('glu_DASH_L_c')
gln = cobra_model.metabolites.get_by_id('gln_DASH_L_c')
asp = cobra_model.metabolites.get_by_id('asp_DASH_L_c')
fum = cobra_model.metabolites.get_by_id('fum_c')
#make GTPSYN (irreversible)
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gly] = -1;
rxn_mets[co2] = -1;
rxn_mets[fthf] = -1;
rxn_mets[gln] = -1;
rxn_mets[gln] = -1;
rxn_mets[asp] = -1;
rxn_mets[gtp] = 1;
rxn_mets[glu] = 1;
rxn_mets[glu] = 1;
rxn_mets[fum] = 1;
rxn = Reaction('GTPSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include VPMATr_reverse and VPMATr:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','3mob_c');
mob3 = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
mob3.charge = met_row['charge']
#get metabolites in the model
val = cobra_model.metabolites.get_by_id('val_DASH_L_c')
ala = cobra_model.metabolites.get_by_id('ala_DASH_L_c')
pyr = cobra_model.metabolites.get_by_id('pyr_c')
#make VPMATr_reverse (irreversible)
rxn_mets = {};
rxn_mets[val] = -1;
rxn_mets[pyr] = -1;
rxn_mets[mob3] = 1;
rxn_mets[ala] = 1;
rxn = Reaction('VPMATr_reverse');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#make VPMATr (irreversible)
rxn_mets = {};
rxn_mets[mob3] = -1;
rxn_mets[ala] = -1;
rxn_mets[val] = 1;
rxn_mets[pyr] = 1;
rxn = Reaction('VPMATr');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include COASYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','coa_c');
coa = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
coa.charge = met_row['charge']
#get metabolites in the model
cys = cobra_model.metabolites.get_by_id('cys_DASH_L_c')
mlthf = cobra_model.metabolites.get_by_id('mlthf_c')
#make COASYN (irreversible)
rxn_mets = {};
rxn_mets[atp] = -1;
rxn_mets[mlthf] = -1;
rxn_mets[mob3] = -1;
rxn_mets[asp] = -1;
rxn_mets[cys] = -1;
rxn_mets[coa] = 1;
rxn_mets[co2] = 1;
rxn_mets[co2] = 1;
rxn = Reaction('COASYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include FADSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','fad_c');
fad = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
fad.charge = met_row['charge']
#get metabolites in the model
ru5p = cobra_model.metabolites.get_by_id('ru5p_DASH_D_c')
#make FADSYN (irreversible)
rxn_mets = {};
rxn_mets[gtp] = -1;
rxn_mets[ru5p] = -1;
rxn_mets[ru5p] = -1;
rxn_mets[atp] = -1;
rxn_mets[fad] = 1;
rxn_mets[co2] = 1;
rxn_mets[co2] = 1;
rxn_mets[co2] = 1;
rxn = Reaction('FADSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include CBMKr and CBMKr_reverse:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','cbp_c');
cbp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
cbp.charge = met_row['charge']
#make CBMKr (irreversible)
rxn_mets = {};
rxn_mets[co2] = -1;
rxn_mets[cbp] = 1;
rxn = Reaction('CBMKr');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#make CBMKr_reverse (irreversible)
rxn_mets = {};
rxn_mets[cbp] = -1;
rxn_mets[co2] = 1;
rxn = Reaction('CBMKr_reverse');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#expand the model to include UTPSYN:
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','utp_c');
utp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
utp.charge = met_row['charge']
#make UTPSYN (irreversible)
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[cbp] = -1;
rxn_mets[asp] = -1;
rxn_mets[utp] = 1;
rxn_mets[co2] = 1;
rxn = Reaction('UTPSYN');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
# update selected reactions to account for coa_c
cobra_model.reactions.get_by_id("ArgSYN").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("CS").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("LeuSYN").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("PDH").add_metabolites({coa:-1});
cobra_model.reactions.get_by_id("PTAr_ACKr_ACS").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("PTAr_ACKr_ACS_reverse").add_metabolites({coa:-1});
cobra_model.reactions.get_by_id("SERAT_CYSS").add_metabolites({coa:1});
cobra_model.reactions.get_by_id("THRD_GLYAT").add_metabolites({coa:-1});
cobra_model.reactions.get_by_id("MALS").add_metabolites({coa:1});
# update selected mappings to account for coa_c
for rxn,row in enumerate(atomMappingReactions):
if row['rxn_id'] == 'ArgSYN':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1,-1,-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1,1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['glu_DASH_L_c','co2_c','gln_DASH_L_c','asp_DASH_L_c','accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['arg_DASH_L_c','akg_c','fum_c','ac_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abcde','f','ghijk','lmno','ABCDEFGHIJKLMNOPQRSTUpq']
atomMappingReactions[rxn]['products_mapping']=['abcdef','ghijk','lmno','pq','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'CS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['oaa_c','accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['cit_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abcd','ABCDEFGHIJKLMNOPQRSTUef']
atomMappingReactions[rxn]['products_mapping']=['dcbfea','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'LeuSYN':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1,-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1,1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c','pyr_c','pyr_c','glu_DASH_L_c']
atomMappingReactions[rxn]['products_ids_tracked']=['leu_DASH_L_c','co2_c','co2_c','akg_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab','cde','fgh','ijklm']
atomMappingReactions[rxn]['products_mapping']=['abdghe','c','f','ijklm','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'PDH':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['pyr_c','coa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['accoa_c','co2_c']
atomMappingReactions[rxn]['reactants_mapping']=['abc','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['products_mapping']=['ABCDEFGHIJKLMNOPQRSTUbc','a']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'PTAr_ACKr_ACS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['ac_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab']
atomMappingReactions[rxn]['products_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'PTAr_ACKr_ACS_reverse':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['ac_c','coa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['accoa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['products_mapping']=['ABCDEFGHIJKLMNOPQRSTUab']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'SERAT_CYSS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['ser_DASH_L_c','accoa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['cys_DASH_L_c','ac_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abc','ABCDEFGHIJKLMNOPQRSTUde']
atomMappingReactions[rxn]['products_mapping']=['abc','de','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'THRD_GLYAT':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['thr_DASH_L_c','coa_c']
atomMappingReactions[rxn]['products_ids_tracked']=['gly_c','accoa_c']
atomMappingReactions[rxn]['reactants_mapping']=['abcd','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['products_mapping']=['ab','ABCDEFGHIJKLMNOPQRSTUcd']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
elif row['rxn_id'] == 'MALS':
atomMappingReactions[rxn]['reactants_stoichiometry_tracked']=[-1,-1]
atomMappingReactions[rxn]['products_stoichiometry_tracked']=[1,1]
atomMappingReactions[rxn]['reactants_ids_tracked']=['accoa_c','glx_c']
atomMappingReactions[rxn]['products_ids_tracked']=['mal_DASH_L_c','coa_c']
atomMappingReactions[rxn]['reactants_mapping']=['ABCDEFGHIJKLMNOPQRSTUab','cd']
atomMappingReactions[rxn]['products_mapping']=['cdba','ABCDEFGHIJKLMNOPQRSTU']
atomMappingReactions[rxn]['reactants_elements_tracked']=[]
atomMappingReactions[rxn]['products_elements_tracked']=[]
atomMappingReactions[rxn]['reactants_positions_tracked']=[]
atomMappingReactions[rxn]['products_positions_tracked']=[]
for cnt,mapping in enumerate(atomMappingReactions[rxn]['reactants_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['reactants_elements_tracked'].append(elements)
atomMappingReactions[rxn]['reactants_positions_tracked'].append(positions)
for cnt,mapping in enumerate(atomMappingReactions[rxn]['products_mapping']):
positions = []
elements = []
for pos,element in enumerate(mapping):
positions.append(pos);
elements.append('C');
atomMappingReactions[rxn]['products_elements_tracked'].append(elements)
atomMappingReactions[rxn]['products_positions_tracked'].append(positions)
# update BOF
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014','adp_c');
adp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
adp.charge = met_row['charge']
cobra_model.reactions.get_by_id("Ec_Biomass_INCA").add_metabolites({coa:2.51,
atp:-53.95,gtp:-0.20912,fad:-0.000223,utp:-0.1401});
# write the model to a temporary file
save_json_model(cobra_model,'data/cobra_model_tmp.json')
# add the model information to the database
io = stage02_isotopomer_io()
dataStage02IsotopomerModelRxns_data = [];
dataStage02IsotopomerModelMets_data = [];
dataStage02IsotopomerModels_data,\
dataStage02IsotopomerModelRxns_data,\
dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json')
io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data);
io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data);
io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data);
#add atomMappingReactions to the database
io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions);
# expand atomMappingReactions
imm = stage02_isotopomer_metaboliteMapping()
irm = stage02_isotopomer_reactionMapping()
mappingUtilities = stage02_isotopomer_mappingUtilities()
# make atomMappingMetabolites
mappingUtilities.make_missingMetaboliteMappings(experiment_id_I,model_id_I=[model_id_O],
mapping_id_rxns_I=[mapping_id_O],
mapping_id_mets_I=[],#mapping_id_mets_I=[mapping_id_I],
mapping_id_new_I=mapping_id_O);
# update symmetric metabolites
imm.get_metaboliteMapping(mapping_id_O,'succ_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'fum_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'26dap_DASH_M_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
## update _elements and _positions-_tracked
#irm.get_reactionMapping(mapping_id_O,'ArgSYN')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'CS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'LeuSYN')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'PDH')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'PTAr_ACKr_ACS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'PTAr_ACKr_ACS_reverse')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'SERAT_CYSS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'THRD_GLYAT')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#irm.get_reactionMapping(mapping_id_O,'MALS')
#irm.checkAndCorrect_elementsAndPositions();
#irm.update_reactionMapping()
#irm.clear_reactionMapping()
#make default base metabolites
imm.get_metaboliteMapping(mapping_id_O,'asp_DASH_L_c')
imm.make_defaultBaseMetabolites()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'cys_DASH_L_c')
imm.make_defaultBaseMetabolites()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'ru5p_DASH_D_c')
imm.make_defaultBaseMetabolites()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
#add in PRS to the network?
#if not, substitute r5p_c for prpp_c
#substitute co2_c for for_c
#substitute phe_DASH_L_c for phpyr_c
#ATPSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'r5p_c':'C'},{'10fthf_c':'C'},{'gly_c':'C'},{'co2_c':'C'},{'10fthf_c':'C'}],
[],
[],
'atp_c',
[],
[])
irm.add_productMapping(['atp_c'])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'gln_DASH_L_c':'C'}],
[],
[],
'glu_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'asp_DASH_L_c':'C'}],
[],
[],
'fum_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'ATPSYN',
[{'asp_DASH_L_c':'C'}],
[],
[],
'fum_c',
[],
[])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#GTPSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'r5p_c':'C'},{'10fthf_c':'C'},{'gly_c':'C'},{'co2_c':'C'},{'10fthf_c':'C'}],
[],
[],
'gtp_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'gln_DASH_L_c':'C'}],
[],
[],
'glu_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'gln_DASH_L_c':'C'}],
[],
[],
'glu_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'GTPSYN',
[{'asp_DASH_L_c':'C'}],
[],
[],
'fum_c',
[],
[])
irm.add_productMapping(['gtp_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#VPAMTr_reverse
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr_reverse',
[{'val_DASH_L_c':'C'}],
[],
[],
'3mob_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr_reverse',
[{'pyr_c':'C'}],
[],
[],
'ala_DASH_L_c',
[],
[])
irm.add_productMapping(['3mob_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#VPAMTr
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr',
[{'3mob_c':'C'}],
[],
[],
'val_DASH_L_c',
[],
[])
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'VPAMTr',
[{'ala_DASH_L_c':'C'}],
[],
[],
'pyr_c',
[],
[])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#COASYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'COASYN',
[{'atp_c':'C'},{'mlthf_c':'C'},{'3mob_c':'C'},{'asp_DASH_L_c':'C'},{'cys_DASH_L_c':'C'}],
[{'asp_DASH_L_c':3},{'cys_DASH_L_c':4}],
[{'co2_c':0},{'co2_c':0}],
'coa_c',
[{'co2_c':'C'},{'co2_c':'C'}],
['co2_c','co2_c'])
#reverse product mapping for 3mob_c in database!
irm.update_productMapping(['coa_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#ACCOA_psuedo
irm.make_trackedBinaryReaction('full04','140407_iDM2014','accoa_c_base_met_ids',
[{'coa_c':'C'},{'ac_c':'C'}],
'accoa_c')
irm.update_productMapping(['accoa_c'])
irm.clear_reactionMapping()
#FADSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'FADSYN',
[{'gtp_c':'C'},{'ru5p_DASH_D_c':'C'},{'ru5p_DASH_D_c':'C'},{'atp_c':'C'}],
[{'gtp_c':0},{'ru5p_DASH_D_c':1},{'ru5p_DASH_D_c':2}],
[{'10fthf_c':0},{'co2_c':0},{'co2_c':0}],
'fad_c',
[{'10fthf_c':'C'},{'co2_c':'C'},{'co2_c':'C'}],
['co2_c','co2_c','co2_c'])
irm.add_productMapping(['fad_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#CBMKr
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'CBMKr',
[{'co2_c':'C'}],
[],
[],
'cbp_c',
[],
[])
irm.add_productMapping(['cbp_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#CBMKr_reverse
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'CBMKr_reverse',
[{'cbp_c':'C'}],
[],
[],
'co2_c',
[],
[])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#UTPSYN
irm.make_trackedCompoundReaction(mapping_id_O,model_id_O,'UTPSYN',
[{'r5p_c':'C'},{'cbp_c':'C'},{'asp_DASH_L_c':'C'}],
[{'asp_DASH_L_c':2}],
[{'co2_c':0}],
'utp_c',
[{'co2_c':'C'}],
['co2_c'])
irm.add_productMapping(['utp_c'])
irm.add_reactionMapping()
irm.clear_reactionMapping()
#ecoli_RL2013 modifications (TODO)
def expand_ecoliRL2013_01(self,experiment_id_I,model_id_I,mapping_id_I,date_I,model_id_O,mapping_id_O):
'''expand the INCA Ecoli model to account for additional metabolites'''
query = stage02_isotopomer_query()
# get the xml model
cobra_model_sbml = ''
cobra_model_sbml = query.get_row_modelID_dataStage02IsotopomerModels(model_id_I);
# load the model
if cobra_model_sbml:
if cobra_model_sbml['file_type'] == 'sbml':
with open('data/cobra_model_tmp.xml','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = create_cobra_model_from_sbml_file('data/cobra_model_tmp.xml', print_time=True);
elif cobra_model_sbml['file_type'] == 'json':
with open('data/cobra_model_tmp.json','wb') as file:
file.write(cobra_model_sbml['model_file']);
file.close()
cobra_model = None;
cobra_model = load_json_model('data/cobra_model_tmp.json');
else:
print('file_type not supported')
#get the atomMapping_reactions
atomMappingReactions = query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I);
#change the mapping_id
for cnt,row in enumerate(atomMappingReactions):
atomMappingReactions[cnt]['mapping_id']=mapping_id_O;
#add in glucose transporters and intracellular glc
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"atp_c");
atp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
atp.charge = met_row['charge']
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_c");
glc_c = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
glc_c.charge = met_row['charge']
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"glc_DASH_D_e");
glc_e = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'e')
glc_e.charge = met_row['charge']
glcext = Metabolite('glc_DASH_D_e.ext',met_row['formula'],met_row['met_name'],'e')
glcext.charge = met_row['charge']
glcpre = Metabolite('glc_DASH_D_e.pre',met_row['formula'],met_row['met_name'],'e')
glcpre.charge = met_row['charge']
#get metabolites in the model
pep = cobra_model.metabolites.get_by_id('pep_c')
pyr = cobra_model.metabolites.get_by_id('pyr_c')
g6p = cobra_model.metabolites.get_by_id('g6p_c')
#make EX_glc_LPAREN_e_RPAREN_
rxn_mets = {};
rxn_mets[glcext] = -1;
rxn_mets[glc_e] = 1;
rxn = Reaction('EX_glc_LPAREN_e_RPAREN_');
cobra_model.remove_reactions(['EX_glc_LPAREN_e_RPAREN_']);
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN_';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.ext']
row_tmp['products_ids_tracked']=['glc_DASH_D_e']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
##make EX_glc_LPAREN_e_RPAREN__pre
#rxn_mets = {};
#rxn_mets[glcpre] = -1;
#rxn_mets[glc_e] = 1;
#rxn = Reaction('EX_glc_LPAREN_e_RPAREN__pre');
#cobra_model.remove_reactions(['v60']);
#rxn.add_metabolites(rxn_mets);
#cobra_model.add_reactions([rxn]);
#cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
#cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
#cobra_model.repair();
##append the new atom mappings
#row_tmp = {};
#row_tmp['mapping_id']=mapping_id_O;
#row_tmp['rxn_id']='EX_glc_LPAREN_e_RPAREN__pre';
#row_tmp['rxn_description']='';
#row_tmp['rxn_equation']='';
#row_tmp['reactants_stoichiometry_tracked']=[-1]
#row_tmp['products_stoichiometry_tracked']=[1]
#row_tmp['reactants_ids_tracked']=['glc_DASH_D_e.pre']
#row_tmp['products_ids_tracked']=['glc_DASH_D_e']
#row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
#row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
#row_tmp['reactants_mapping']=['abcdef']
#row_tmp['products_mapping']=['abcdef']
#row_tmp['used_']=True
#row_tmp['comment_']='added'
#atomMappingReactions.append(row_tmp);
#make GLCptspp "glc_DASH_D_p + pep_c --> g6p_c + pyr_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[pep] = -1;
rxn_mets[g6p] = 1;
rxn_mets[pyr] = 1;
rxn = Reaction('GLCptspp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCptspp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1,-1]
row_tmp['products_stoichiometry_tracked']=[1,1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e','pep_c']
row_tmp['products_ids_tracked']=['g6p_c','pyr_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"],["C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5],[0, 1, 2]]
row_tmp['reactants_mapping']=['abcdef','ghi']
row_tmp['products_mapping']=['abcdef','ghi']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make GLCt2pp "glc_DASH_D_p + h_p --> glc_DASH_D_c + h_c"
rxn_mets = {};
rxn_mets[glc_e] = -1;
rxn_mets[glc_c] = 1;
rxn = Reaction('GLCt2pp');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='GLCt2pp';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_e']
row_tmp['products_ids_tracked']=['glc_DASH_D_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
#make HEX1 "atp_c + glc_DASH_D_c --> g6p_c + h_c + adp_c"
rxn_mets = {};
rxn_mets[glc_c] = -1;
rxn_mets[atp] = -1;
rxn_mets[g6p] = 1;
rxn = Reaction('HEX1');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.reactions.get_by_id(rxn.id).lower_bound = 0.0;
cobra_model.reactions.get_by_id(rxn.id).upper_bound = 1000.0;
cobra_model.repair();
#append the new atom mappings
row_tmp = {};
row_tmp['mapping_id']=mapping_id_O;
row_tmp['rxn_id']='HEX1';
row_tmp['rxn_description']='';
row_tmp['rxn_equation']='';
row_tmp['reactants_stoichiometry_tracked']=[-1]
row_tmp['products_stoichiometry_tracked']=[1]
row_tmp['reactants_ids_tracked']=['glc_DASH_D_c']
row_tmp['products_ids_tracked']=['g6p_c']
row_tmp['reactants_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['products_elements_tracked']=[["C", "C", "C", "C", "C", "C"]]
row_tmp['reactants_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['products_positions_tracked']=[[0, 1, 2, 3, 4, 5]]
row_tmp['reactants_mapping']=['abcdef']
row_tmp['products_mapping']=['abcdef']
row_tmp['used_']=True
row_tmp['comment_']='added'
atomMappingReactions.append(row_tmp);
# add in PRPPS phosphoribosylpyrophosphate synthetase atp[c] + r5p[c] <=> amp[c] + h[c] + prpp[c]
#get metabolites not in the model
met_row = {}
met_row = query.get_row_modelIDAndMetID_dataStage02IsotopomerModelMetabolites('140407_iDM2014',"prpp_c");
prpp = Metabolite(met_row['met_id'],met_row['formula'],met_row['met_name'],'c')
prpp.charge = met_row['charge']
r5p = cobra_model.metabolites.get_by_id('r5p_c')
# expand the model
rxn_mets = {};
rxn_mets[r5p] = -1;
rxn_mets[atp] = -1;
rxn_mets[prpp] = 1;
rxn = Reaction('PRPPS');
rxn.add_metabolites(rxn_mets);
cobra_model.add_reactions([rxn]);
cobra_model.repair();
# add in rxn mapping
row={};
row['mapping_id']=mapping_id_O;
row['rxn_id']='PRPPS';
row['rxn_description']='';
row['rxn_equation']='';
row['reactants_stoichiometry_tracked']=[-1]
row['products_stoichiometry_tracked']=[1]
row['reactants_ids_tracked']=['r5p_c']
row['products_ids_tracked']=['prpp_c']
row['reactants_elements_tracked']=[["C", "C", "C", "C", "C"]]
row['products_elements_tracked']=[["C", "C", "C", "C", "C"]]
row['reactants_positions_tracked']=[[0, 1, 2, 3, 4]]
row['products_positions_tracked']=[[0, 1, 2, 3, 4]]
row['reactants_mapping']=['abcde']
row['products_mapping']=['abcde']
row['used_']=True
row['comment_']='added'
atomMappingReactions.append(row)
##expand the model
#acon = Metabolite('acon_DASH_C_c','C6H3O6','cis-Aconitate','c');
#cit = cobra_model.metabolites.get_by_id('cit_c')
#icit = cobra_model.metabolites.get_by_id('icit_c')
#e4p = cobra_model.metabolites.get_by_id('e4p_c')
#phe = cobra_model.metabolites.get_by_id('phe_DASH_L_c')
his = cobra_model.metabolites.get_by_id('his_DASH_L_c')
#phpyr = Metabolite('phpyr_c','C9H7O3','Phenylpyruvate','c');
# update selected reactions to account for new metabolites
for rxn,row in enumerate(atomMappingReactions):
if row['rxn_id'] == 'HisSYN':
# split HisSYN to add in prpp
cobra_model.reactions.get_by_id(row['rxn_id']).subtract_metabolites({atp:-1,r5p:-1})
cobra_model.reactions.get_by_id(row['rxn_id']).add_metabolites({prpp:-1})
# Update the mapping_ids
atomMappingReactions[rxn]['reactants_ids_tracked']=[r.replace('r5p_c','prpp_c') for r in atomMappingReactions[rxn]['reactants_ids_tracked']]
# write the model to a temporary file
save_json_model(cobra_model,'data/cobra_model_tmp.json')
# add the model information to the database
io = stage02_isotopomer_io()
dataStage02IsotopomerModelRxns_data = [];
dataStage02IsotopomerModelMets_data = [];
dataStage02IsotopomerModels_data,\
dataStage02IsotopomerModelRxns_data,\
dataStage02IsotopomerModelMets_data = io._parse_model_json(model_id_O, date_I, 'data/cobra_model_tmp.json')
io.add_data_stage02_isotopomer_modelMetabolites(dataStage02IsotopomerModelMets_data);
io.add_data_stage02_isotopomer_modelReactions(dataStage02IsotopomerModelRxns_data);
io.add_data_stage02_isotopomer_models(dataStage02IsotopomerModels_data);
#add atomMappingReactions to the database
io.add_data_stage02_isotopomer_atomMappingReactions(atomMappingReactions);
# expand atomMappingReactions
imm = stage02_isotopomer_metaboliteMapping()
irm = stage02_isotopomer_reactionMapping()
mappingUtilities = stage02_isotopomer_mappingUtilities()
# make atomMappingMetabolites
mappingUtilities.make_missingMetaboliteMappings(experiment_id_I,model_id_I=[model_id_O],
mapping_id_rxns_I=[mapping_id_O],
mapping_id_mets_I=[],
mapping_id_new_I=mapping_id_O);
# update symmetric metabolites
imm.get_metaboliteMapping(mapping_id_O,'succ_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'fum_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
imm.get_metaboliteMapping(mapping_id_O,'26dap_DASH_M_c')
imm.make_symmetric()
imm.update_metaboliteMapping()
imm.clear_metaboliteMapping()
#analysis functions
def load_isotopomer_matlab(self,matlab_data,isotopomer_data=None):
'''Load 13CFlux isotopomer simulation data from matlab file'''
# load measured isotopomers from MATLAB file into numpy array
# load names and calculated isotopomers from MATLAB file into numpy array
names = scipy.io.loadmat(matlab_data)['output']['names'][0][0];
calculated_ave = scipy.io.loadmat(matlab_data)['output']['ave'][0][0];
calculated_stdev = scipy.io.loadmat(matlab_data)['output']['stdev'][0][0];
# load residuals from MATLAB file into numpy array
residuals = scipy.io.loadmat(matlab_data)['residuals'];
if isotopomer_data:
measured_dict = json.load(open(isotopomer_data,'r'));
measured_names = [];
measured_ave = [];
measured_stdev = [];
# extract data to lists
for frag,data in measured_dict['fragments'].items():
for name in data['data_names']:
measured_names.append(name);
for ave in data['data_ave']:
measured_ave.append(ave);
for stdev in data['data_stdev']:
measured_stdev.append(stdev);
# convert lists to dict
measured_dict = {};
for i,name in enumerate(measured_names):
measured_dict[name]={'measured_ave':measured_ave[i],
'measured_stdev':measured_stdev[i]};
# match measured names to calculated names
measured_ave = [];
measured_stdev = [];
residuals = [];
for i,name in enumerate(names):
if name[0][0] in measured_dict:
measured_ave.append(measured_dict[name[0][0]]['measured_ave']);
measured_stdev.append(measured_dict[name[0][0]]['measured_stdev']);
residuals.append(measured_dict[name[0][0]]['measured_ave']-calculated_ave[i][0]);
else:
measured_ave.append(None);
measured_stdev.append(None);
residuals.append(None);
else:
measured_ave_tmp = scipy.io.loadmat(matlab_data)['toCompare'];
measured_ave = [];
for d in measured_ave_tmp:
measured_ave.append(d[0]);
measured_stdev = numpy.zeros(len(measured_ave));
# combine into a dictionary
isotopomer = {};
for i in range(len(names)):
isotopomer[names[i][0][0]] = {'measured_ave':measured_ave[i], #TODO: extract out by fragment names
'measured_stdev':measured_stdev[i],
'calculated_ave':calculated_ave[i][0],
'calculated_stdev':calculated_stdev[i][0],
'residuals':residuals[i]};
return isotopomer;
def load_confidenceIntervals_matlab(self,matlab_data,cobra_model_matlab,cobra_model_name):
'''Load confidence intervals from matlab file'''
# load confidence intervals from MATLAB file into numpy array
cimin_h5py = h5py.File(matlab_data)['ci']['minv'][0];
cimax_h5py = h5py.File(matlab_data)['ci']['maxv'][0];
cimin = numpy.array(cimin_h5py);
cimax = numpy.array(cimax_h5py);
# load cobramodel
rxns = scipy.io.loadmat(cobra_model_matlab)[cobra_model_name]['rxns'][0][0]
# combine cimin, cimax, and rxns into dictionary
ci = {};
for i in range(len(cimin)):
ci[rxns[i][0][0]] = {'minv':cimin[i],'maxv':cimax[i]};
return ci;
def compare_isotopomers_calculated(self,isotopomer_1, isotopomer_2):
'''compare two calculated isotopomer distributions'''
# extract into lists
absDif_list = [];
ssr_1_list = [];
ssr_2_list = [];
bestFit_list = [];
frag_list = [];
ssr_1 = 0.0; # sum of squared residuals (threshold of 10e1, Antoniewicz poster, co-culture, Met Eng X)
ssr_2 = 0.0;
measured_1_list = [];
measured_2_list = [];
calculatedAve_1_list = [];
calculatedAve_2_list = [];
measuredStdev_1_list = [];
measuredStdev_2_list = [];
for frag,data in isotopomer_1.items():
absDif = 0.0;
sr_1 = 0.0;
sr_2 = 0.0;
bestFit = None;
absDif = fabs(isotopomer_1[frag]['calculated_ave'] - isotopomer_2[frag]['calculated_ave']);
sr_1 = pow(isotopomer_1[frag]['calculated_ave']-isotopomer_1[frag]['measured_ave'],2);
sr_2 = pow(isotopomer_2[frag]['calculated_ave']-isotopomer_2[frag]['measured_ave'],2);
if sr_1>sr_2: bestFit = '2';
elif sr_1<sr_2: bestFit = '1';
elif sr_1==sr_2: bestFit = None;
absDif_list.append(absDif);
ssr_1_list.append(sr_1);
ssr_2_list.append(sr_2);
bestFit_list.append(bestFit);
frag_list.append(frag);
ssr_1 += sr_1;
ssr_2 += sr_2;
measured_1_list.append(isotopomer_1[frag]['measured_ave'])
measured_2_list.append(isotopomer_2[frag]['measured_ave'])
calculatedAve_1_list.append(isotopomer_1[frag]['calculated_ave']);
calculatedAve_2_list.append(isotopomer_2[frag]['calculated_ave']);
measuredStdev_1_list.append(isotopomer_1[frag]['measured_stdev']);
measuredStdev_2_list.append(isotopomer_2[frag]['measured_stdev']);
# calculate the correlation coefficient
# 1. between measured vs. calculated (1 and 2)
# 2. between calculated 1 vs. calculated 2
r_measuredVsCalculated_1 = None;
r_measuredVsCalculated_2 = None;
r_measured1VsMeasured2 = None;
p_measuredVsCalculated_1 = None;
p_measuredVsCalculated_2 = None;
p_measured1VsMeasured2 = None;
r_measuredVsCalculated_1, p_measuredVsCalculated_1 = scipy.stats.pearsonr(measured_1_list,calculatedAve_1_list);
r_measuredVsCalculated_2, p_measuredVsCalculated_2 = scipy.stats.pearsonr(measured_2_list,calculatedAve_2_list);
r_measured1VsMeasured2, p_measured1VsMeasured2 = scipy.stats.pearsonr(calculatedAve_1_list,calculatedAve_2_list);
# wrap stats into a dictionary
isotopomer_comparison_stats = {};
isotopomer_comparison_stats = dict(list(zip(('r_measuredVsCalculated_1', 'p_measuredVsCalculated_1',
'r_measuredVsCalculated_2', 'p_measuredVsCalculated_2',
'r_measured1VsMeasured2', 'p_measured1VsMeasured2',
'ssr_1,ssr_2'),
(r_measuredVsCalculated_1, p_measuredVsCalculated_1,
r_measuredVsCalculated_2, p_measuredVsCalculated_2,
r_measured1VsMeasured2, p_measured1VsMeasured2,
ssr_1,ssr_2))));
## zip, sort, unzip # does not appear to sort correctly!
#zipped = zip(absDif_list,ssr_1_list,ssr_2_list,bestFit_list,frag_list,
# measured_1_list,measured_2_list,calculatedAve_1_list,calculatedAve_2_list,
# measuredStdev_1_list,measuredStdev_2_list);
#zipped.sort();
#zipped.reverse();
#absDif_list,ssr_1_list,sst_2_list,bestFit_list,frag_list,\
# measured_1_list,measured_2_list,calculatedAve_1_list,calculatedAve_2_list,\
# measuredStdev_1_list,measuredStdev_2_list = zip(*zipped);
# restructure into a list of dictionaries for easy parsing or data base viewing
isotopomer_comparison = [];
for i in range(len(absDif_list)):
isotopomer_comparison.append({'isotopomer_absDif':absDif_list[i],
'isotopomer_1_sr':ssr_1_list[i],
'isotopomer_2_sr':ssr_2_list[i],
'bestFit':bestFit_list[i],
'frag':frag_list[i],
'measured_1_ave':measured_1_list[i],
'measured_2_ave':measured_2_list[i],
'measured_1_stdev':measuredStdev_1_list[i],
'measured_2_stdev':measuredStdev_2_list[i],
'calculated_1_ave':calculatedAve_1_list[i],
'calculated_2_ave':calculatedAve_2_list[i]});
return isotopomer_comparison,isotopomer_comparison_stats;
def compare_ci_calculated(self,ci_1,ci_2):
'''compare 2 calculated confidence intervals'''
# extract into lists
rxns_1_list = [];
rxns_2_list = [];
ciminv_1_list = [];
ciminv_2_list = [];
cimaxv_1_list = [];
cimaxv_2_list = [];
cirange_1_list = [];
cirange_2_list = [];
cirange_1_sum = 0.0;
cirange_2_sum = 0.0;
# ci_1:
for k,v in ci_1.items():
rxns_1_list.append(k);
ciminv_1_list.append(v['minv']);
cimaxv_1_list.append(v['maxv']);
cirange_1_list.append(v['maxv']-v['minv']);
cirange_1_sum += v['maxv']-v['minv'];
## zip, sort, unzip
#zipped1 = zip(rxns_1_list,ciminv_1_list,cimaxv_1_list,cirange_1_list);
#zipped1.sort();
#rxns_1_list,ciminv_1_list,cimaxv_1_list,cirange_1_list = zip(*zipped1);
# ci_2:
for k,v in ci_2.items():
rxns_2_list.append(k);
ciminv_2_list.append(v['minv']);
cimaxv_2_list.append(v['maxv']);
cirange_2_list.append(v['maxv']-v['minv']);
cirange_2_sum += v['maxv']-v['minv'];
## zip, sort, unzip
#zipped2 = zip(rxns_2_list,ciminv_2_list,cimaxv_2_list,cirange_2_list);
#zipped2.sort();
#rxns_2_list,ciminv_2_list,cimaxv_2_list,cirange_2_list = zip(*zipped2);
# compare by rxn_id
cirange_absDev_list = [];
rxns_combined_list = [];
ciminv_1_combined_list = [];
ciminv_2_combined_list = [];
cimaxv_1_combined_list = [];
cimaxv_2_combined_list = [];
cirange_1_combined_list = [];
cirange_2_combined_list = [];
cirange_1_combined_sum = 0.0;
cirange_2_combined_sum = 0.0;
for i in range(len(rxns_1_list)):
for j in range(len(rxns_2_list)):
if rxns_1_list[i] == rxns_2_list[j]:
rxns_combined_list.append(rxns_1_list[i]);
cirange_absDev_list.append(fabs(cirange_1_list[i]-cirange_2_list[j]));
ciminv_1_combined_list.append(ciminv_1_list[i]);
ciminv_2_combined_list.append(ciminv_2_list[j]);
cimaxv_1_combined_list.append(cimaxv_1_list[i]);
cimaxv_2_combined_list.append(cimaxv_2_list[j]);
cirange_1_combined_list.append(cirange_1_list[i]);
cirange_2_combined_list.append(cirange_2_list[j]);
cirange_1_combined_sum += cirange_1_list[i]
cirange_2_combined_sum += cirange_2_list[j]
## zip, sort, unzip
#zippedCombined = zip(cirange_absDev_list,rxns_combined_list,ciminv_1_combined_list,ciminv_2_combined_list,cimaxv_1_combined_list,cimaxv_2_combined_list,cirange_1_combined_list,cirange_2_combined_list);
#zippedCombined.sort();
#zippedCombined.reverse();
#cirange_absDev_list,rxns_combined_list,ciminv_1_combined_list,ciminv_2_combined_list,cimaxv_1_combined_list,cimaxv_2_combined_list,cirange_1_combined_list,cirange_2_combined_list = zip(*zippedCombined);
# restructure into a list of dictionaries for easy parsing or data base viewing
ci_comparison = [];
for i in range(len(cirange_absDev_list)):
ci_comparison.append({'cirange_absDev_list':cirange_absDev_list[i],
'rxns_combined_list':rxns_combined_list[i],
'ciminv_1_combined_list':ciminv_1_combined_list[i],
'ciminv_2_combined_list':ciminv_2_combined_list[i],
'cimaxv_1_combined_list':cimaxv_1_combined_list[i],
'cimaxv_2_combined_list':cimaxv_2_combined_list[i],
'cirange_1_combined_list':cirange_1_combined_list[i],
'cirange_2_combined_list':cirange_2_combined_list[i]});
return ci_comparison,cirange_1_sum,cirange_2_sum,cirange_1_combined_sum,cirange_2_combined_sum;
def plot_compare_isotopomers_calculated(self,isotopomer_comparison,isotopomer_comparison_stats):
'''Plot 1: isotopomer fitting comparison
Plot 2: isotopomer residual comparison'''
io = base_exportData(isotopomer_comparison);
# Plot 1 and Plot 2:
io.write_dict2tsv('data//data.tsv');
def plot_ci_calculated(self,ci):
'''plot confidence intervals from fluxomics experiment using escher'''
data = [];
flux1 = {};
flux2 = {};
for k,v in ci.items():
flux1[k] = v['minv'];
flux2[k] = v['maxv'];
data.append(flux1);
data.append(flux2);
io = base_exportData(data);
io.write_dict2json('visualization/escher/ci.json');
def export_modelWithFlux(self,cobra_model_xml_I,ci_list_I,cobra_model_xml_O):
'''update model lower_bound/upper_bound with calculated flux confidence intervals'''
cobra_model = create_cobra_model_from_sbml_file(cobra_model_xml_I);
rxns_add = [];
rxns_omitted = [];
rxns_break = [];
system_boundaries = [x.id for x in cobra_model.reactions if x.boundary == 'system_boundary'];
objectives = [x.id for x in cobra_model.reactions if x.objective_coefficient == 1];
for i,ci_I in enumerate(ci_list_I):
print('add flux from ci ' + str(i));
for rxn in cobra_model.reactions:
if rxn.id in list(ci_I.keys()) and not(rxn.id in system_boundaries)\
and not(rxn.id in objectives):
cobra_model_copy = cobra_model.copy();
# check for reactions that break the model:
if ci_I[rxn.id]['minv'] > 0:
cobra_model_copy.reactions.get_by_id(rxn.id).lower_bound = ci_I[rxn.id]['minv'];
if ci_I[rxn.id]['maxv'] > 0 and ci_I[rxn.id]['maxv'] > ci_I[rxn.id]['minv']:
cobra_model_copy.reactions.get_by_id(rxn.id).upper_bound = ci_I[rxn.id]['maxv'];
cobra_model_copy.optimize(solver='gurobi');
if not cobra_model_copy.solution.f:
print(rxn.id + ' broke the model!')
rxns_break.append(rxn.id);
else:
if ci_I[rxn.id]['minv'] > 0:
cobra_model.reactions.get_by_id(rxn.id).lower_bound = ci_I[rxn.id]['minv'];
if ci_I[rxn.id]['maxv'] > 0 and ci_I[rxn.id]['maxv'] > ci_I[rxn.id]['minv']:
cobra_model.reactions.get_by_id(rxn.id).upper_bound = ci_I[rxn.id]['maxv'];
rxns_add.append(rxn.id);
else:
rxns_omitted.append(rxn.id);
write_cobra_model_to_sbml_file(cobra_model,cobra_model_xml_O)
class stage02_isotopomer_metaboliteMapping():
"""Class to standardize metabolite mapping:
A mapped metabolite takes the following form:
'met_id' + 'nMet_id' + '_' + 'element' + nElement
Input:
met_ids_elements_I = [{met_id:element},...]
[{'f6p_c':'C'},{'f6p_c':'C'},{'f6p_c':'H'},{'f6p_c':'H'},{'ac_c':'C'},{'utp_c':'C'}]
NOTE: The order matters if using multiple elements! will need to further test in future versions
Base metabolites: default base metabolite is co2 for carbon and oh for hydrogen
Base reaction: co2 + oh- + h+ = ch2o + o2"""
def __init__(self,
mapping_id_I=None,
#met_name_I=None,
met_id_I=None,
#formula_I=None,
met_elements_I=[],
met_atompositions_I=[],
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=[],
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]):
#self.session = Session();
self.stage02_isotopomer_query = stage02_isotopomer_query();
self.calculate = base_calculate();
self.metaboliteMapping={};
self.metaboliteMapping['mapping_id']=mapping_id_I;
#self.metaboliteMapping['met_name']=met_name_I;
self.metaboliteMapping['met_id']=met_id_I;
#self.metaboliteMapping['formula']=formula_I;
self.metaboliteMapping['met_elements']=met_elements_I;
self.metaboliteMapping['met_atompositions']=met_atompositions_I;
self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I;
self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I;
self.metaboliteMapping['used_']=used__I;
self.metaboliteMapping['comment_']=comment__I;
self.metaboliteMapping['met_mapping']=met_mapping_I;
self.metaboliteMapping['base_met_ids']=base_met_ids_I;
self.metaboliteMapping['base_met_elements']=base_met_elements_I;
self.metaboliteMapping['base_met_atompositions']=base_met_atompositions_I;
self.metaboliteMapping['base_met_symmetry_elements']=base_met_symmetry_elements_I;
self.metaboliteMapping['base_met_symmetry_atompositions']=base_met_symmetry_atompositions_I;
self.metaboliteMapping['base_met_indices']=base_met_indices_I;
def make_elementsAndPositionsTracked(self,met_id_I,element_I,n_elements_I):
#Input: met_id_I,element_I,n_elements_I
#Output: mapping_O,positions_O,elements_O
#E.g: make_elementsTracked('fdp','C',6)
mapping_O = [];
positions_O = [];
elements_O = [];
for elements_cnt in range(n_elements_I):
mapping = '[' + met_id_I.replace('.','_') + '_' + element_I + str(elements_cnt) + ']';
mapping_O.append(mapping);
positions_O.append(elements_cnt);
elements_O.append(element_I);
return mapping_O,positions_O,elements_O;
def make_trackedMetabolite(self,mapping_id_I,model_id_I,met_id_element_I,met_index_I=None):
'''Make an unique atom mapping for the given metabolite and element'''
currentElementPos = 0;
mapping_O = [];
positions_O = [];
elements_O = [];
base_met_ids_O = [];
base_met_elements_O = [];
base_met_atompositions_O = [];
base_met_symmetry_elements_O = [];
base_met_symmetry_atompositions_O = [];
base_met_indices_O = [];
for k,v in met_id_element_I.items():
# check if the metabolite is already in the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_I,k)
#NOTE: need to add in a constraint to make sure that the elements in the database and the elments in the input match!
if met_data and 'met_elements' in met_data and v==met_data['met_elements'][0]:
nElements = len(met_data['met_elements']);
else:
# get the formula for the met_id
formula_I = self.stage02_isotopomer_query.get_formula_modelIDAndMetID_dataStage02IsotopomerModelMetabolites(model_id_I,k);
# get the number of elements
if v not in Formula(formula_I)._elements: break; #check if the element is even contained in the formula
if 0 in Formula(formula_I)._elements[v]:
nElements = Formula(formula_I)._elements[v][0]; #get the # of the elements
# make the tracking
nMet = 0;
if met_index_I: nMet = met_index_I
mapping,positions,elements = self.make_elementsAndPositionsTracked(k+str(nMet),v,nElements);
positions_corrected = [currentElementPos+pos for pos in positions];
currentElementPos += max(positions)+1;
mapping_O.append(mapping);
positions_O.extend(positions_corrected);
elements_O.extend(elements);
base_met_ids_O.append(k)
base_met_elements_O.append(elements)
base_met_atompositions_O.append(positions)
base_met_indices_O.append(nMet)
self.metaboliteMapping['mapping_id']=mapping_id_I
self.metaboliteMapping['met_id']=k
self.metaboliteMapping['met_elements']=elements_O
self.metaboliteMapping['met_atompositions']=positions_O
self.metaboliteMapping['met_mapping']=mapping_O
self.metaboliteMapping['base_met_ids']=base_met_ids_O
self.metaboliteMapping['base_met_elements']=base_met_elements_O
self.metaboliteMapping['base_met_atompositions']=base_met_atompositions_O
self.metaboliteMapping['base_met_indices']=base_met_indices_O
def make_compoundTrackedMetabolite(self,mapping_id_I,model_id_I,met_ids_elements_I,met_id_O,met_ids_indices_I = []):
'''Make an unique atom mapping for the given metabolite based on base metabolites and elements'''
#Input:
# metIDs_elements_I = [{met_id:element},..]
# met_ids_elements_I = [{'f6p_c':'C'},{'ac_c':'C'},{'utp_c':'C'}}]
# metIDs_elements_I = [met_id:{elements=[string,...],stoichiometry:float}},..]
# met_ids_elements_I = [{'f6p_c':{'elements':['C'],'stoichiometry':1}},{'ac_c':{'elements':['C'],'stoichiometry':1}},{'utp_c':{'elements':['C'],'stoichiometry':1}}]
# make_compoundTrackedMetabolite('full04','140407_iDM2014',met_ids_elements_I,'uacgam_c')
currentElementPos = 0;
mapping_O = [];
positions_O = [];
elements_O = [];
base_met_ids_O = [];
base_met_elements_O = [];
base_met_atompositions_O = [];
base_met_symmetry_elements_O = [];
base_met_symmetry_atompositions_O = [];
base_met_indices_O = [];
# get unique met_ids
met_ids_all = [];
for row in met_ids_elements_I:
for k,v in row.items():
met_ids_all.append(k);
met_ids_unique = list(set(met_ids_all))
met_ids_cnt = {};
met_ids_elements = {};
for met_id in met_ids_unique:
met_ids_cnt[met_id] = 0;
met_ids_elements[met_id] = [];
# make the compound mapping
for row_cnt,row in enumerate(met_ids_elements_I):
for k,v in row.items():
# check if the metabolite is already in the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_I,k)
#NOTE: need to add in a constraint to make sure that the elements in the database and the elments in the input match!
if met_data and 'met_elements' in met_data and v==met_data['met_elements'][0]:
nElements = len(met_data['met_elements']);
else:
# get the formula for the met_id
formula_I = self.stage02_isotopomer_query.get_formula_modelIDAndMetID_dataStage02IsotopomerModelMetabolites(model_id_I,k);
# get the number of elements
if v not in Formula(formula_I)._elements: break; #check if the element is even contained in the formula
if 0 in Formula(formula_I)._elements[v]:
nElements = Formula(formula_I)._elements[v][0]; #get the # of the elements
# determine the metabolite index
nMets = met_ids_cnt[k];
if met_ids_indices_I: nMets = met_ids_indices_I[row_cnt]
# make the tracking
mapping,positions,elements = self.make_elementsAndPositionsTracked(k+str(nMets),v,nElements);
positions_corrected = [currentElementPos+pos for pos in positions];
currentElementPos += max(positions)+1;
# add to the compound tracking
mapping_O.append(mapping);
positions_O.extend(positions_corrected);
elements_O.extend(elements);
base_met_ids_O.append(k)
base_met_elements_O.append(elements)
base_met_atompositions_O.append(positions)
base_met_indices_O.append(nMets)
met_ids_cnt[k] += 1; # needed to ensure a unique metabolite mapping if the same met_id is used multiple times
self.metaboliteMapping['mapping_id']=mapping_id_I
self.metaboliteMapping['met_id']=met_id_O
self.metaboliteMapping['met_elements']=elements_O
self.metaboliteMapping['met_atompositions']=positions_O
self.metaboliteMapping['met_mapping']=mapping_O
self.metaboliteMapping['base_met_ids']=base_met_ids_O
self.metaboliteMapping['base_met_elements']=base_met_elements_O
self.metaboliteMapping['base_met_atompositions']=base_met_atompositions_O
self.metaboliteMapping['base_met_indices']=base_met_indices_O
def append_baseMetabolites_toMetabolite(self,model_id_I,met_ids_elements_I,met_id_O=None):
'''Append a base metabolite to the current metabolite'''
#get the currentElementPos
currentElementPos = max(self.metaboliteMapping['met_atompositions'])+1;
# get unique met_ids
met_ids_unique = list(set(self.metaboliteMapping['base_met_ids']))
met_ids_cnt = {};
met_ids_elements = {};
for met_id in met_ids_unique:
met_ids_cnt[met_id] = 0;
met_ids_elements[met_id] = [];
for met_id_cnt,met_id in enumerate(self.metaboliteMapping['base_met_ids']):
# determine the number of met_ids
met_ids_cnt[met_id]+=1
# determine the unique elements
if not self.metaboliteMapping['met_elements'][0] in met_ids_elements[met_id]:
met_ids_elements[met_id].append(self.metaboliteMapping['met_elements'][met_id_cnt][0]);
# add the mapping for the new metabolites
for row in met_ids_elements_I:
for k,v in row.items():
# check if the metabolite is already in the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.metaboliteMapping['mapping_id'],k)
#NOTE: need to add in a constraint to make sure that the elements in the database and the elments in the input match!
if met_data and 'met_elements' in met_data and v==met_data['met_elements'][0]:
nElements = len(met_data['met_elements']);
else:
# get the formula for the met_id
formula_I = self.stage02_isotopomer_query.get_formula_modelIDAndMetID_dataStage02IsotopomerModelMetabolites(model_id_I,k);
# get the number of elements
if v not in Formula(formula_I)._elements: break; #check if the element is even contained in the formula
if 0 in Formula(formula_I)._elements[v]:
nElements = Formula(formula_I)._elements[v][0]; #get the # of the elements
# adjust the metabolite number if the same metabolite already exists
nMets = met_ids_cnt[k];
met_id_mapping = k+nMets;
# make the tracking
mapping,positions,elements = self.make_elementsAndPositionsTracked(met_id_mapping,v,nElements);
positions_corrected = [currentElementPos+pos for pos in positions];
currentElementPos += max(positions)+1;
# add to the compound tracking
self.metaboliteMapping['met_mapping'].append(mapping);
self.metaboliteMapping['met_atompositions'].extend(positions_corrected);
self.metaboliteMapping['met_elements'].extend(elements);
self.metaboliteMapping['base_met_ids'].append(k)
self.metaboliteMapping['base_met_elements'].append(elements)
self.metaboliteMapping['base_met_atompositions'].append(positions)
self.metaboliteMapping['base_met_indices'].append(met_ids_cnt[k]);
met_ids_cnt[met_id]+=1;
if met_id_O: self.metaboliteMapping['met_id']=met_id_O
def pop_baseMetabolite_fromMetabolite(self,model_id_I,met_id_element_I,met_id_O=None):
'''Remove a base metabolite from the current metabolite:
metabolites are removed FILO;
NOTE: this can lead to problems downstream when the mapping
is reconstructed from the base metabolites if multiple elements are used'''
#Input:
# met_id_element_I = {met_id:element}
'''Unit Test:
'''
met_mapping = self.metaboliteMapping['met_mapping'];
base_met_ids = self.metaboliteMapping['base_met_ids'];
base_met_elements = self.metaboliteMapping['base_met_elements'];
base_met_atompositions = self.metaboliteMapping['base_met_atompositions'];
base_met_indices = self.metaboliteMapping['base_met_indices'];
#base_met_symmetry_elements=self.metaboliteMapping['base_met_symmetry_elements'];
#base_met_symmetry_atompositions=self.metaboliteMapping['base_met_symmetry_atompositions'];
met_mapping.reverse();
base_met_ids.reverse();
base_met_elements.reverse();
base_met_atompositions.reverse();
base_met_indices.reverse();
#base_met_symmetry_elements.reverse();
#base_met_symmetry_atompositions.reverse();
self.metaboliteMapping['met_mapping']=[]
self.metaboliteMapping['base_met_ids']=[]
self.metaboliteMapping['base_met_elements']=[]
self.metaboliteMapping['base_met_atompositions']=[]
self.metaboliteMapping['base_met_indices']=[]
#self.metaboliteMapping['base_met_symmetry_elements']=[]
#self.metaboliteMapping['base_met_symmetry_atompositions']=[]
for met_id_remove,v in met_id_element_I.items():
removed = False
for met_cnt,met_id in enumerate(base_met_ids):
if met_id_remove == met_id and v==base_met_elements[met_cnt][0] and not removed:
removed = True;
else:
self.metaboliteMapping['met_mapping'].insert(0,met_mapping[met_cnt]);
self.metaboliteMapping['base_met_ids'].insert(0,base_met_ids[met_cnt]);
self.metaboliteMapping['base_met_elements'].insert(0,base_met_elements[met_cnt]);
self.metaboliteMapping['base_met_atompositions'].insert(0,base_met_atompositions[met_cnt]);
self.metaboliteMapping['base_met_indices'].insert(0,base_met_indices[met_cnt])
#self.metaboliteMapping['base_met_symmetry_elements'].insert(0,base_met_symmetry_elements[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_atompositions'].insert(0,base_met_symmetry_atompositions[met_cnt]);
'''v1: removes ALL base metabolites that match the met_id'''
#for met_id_remove in met_ids_I:
# for met_cnt,met_id in enumerate(base_met_ids):
# if met_id_remove != met_id:
# self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]);
# self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]);
# self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]);
# self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]);
# #self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]);
# #self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]);
if met_id_O: self.metaboliteMapping['met_id']=met_id_O
self.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
def remove_baseMetabolite_fromMetabolite(self,model_id_I,met_id_element_I,met_id_O=None,met_index_I=None):
'''Remove a base metabolite from the current metabolite:
metabolites are removed FIFO if the index is not specified;'''
#Input:
# met_id_element = {met_id:element}
'''Unit Test:'''
met_mapping = self.metaboliteMapping['met_mapping'];
base_met_ids = self.metaboliteMapping['base_met_ids'];
base_met_elements = self.metaboliteMapping['base_met_elements'];
base_met_atompositions = self.metaboliteMapping['base_met_atompositions'];
base_met_indices = self.metaboliteMapping['base_met_indices'];
#base_met_symmetry_elements=self.metaboliteMapping['base_met_symmetry_elements'];
#base_met_symmetry_atompositions=self.metaboliteMapping['base_met_symmetry_atompositions'];
self.metaboliteMapping['met_mapping']=[]
self.metaboliteMapping['base_met_ids']=[]
self.metaboliteMapping['base_met_elements']=[]
self.metaboliteMapping['base_met_atompositions']=[]
self.metaboliteMapping['base_met_indices']=[]
#self.metaboliteMapping['base_met_symmetry_elements']=[]
#self.metaboliteMapping['base_met_symmetry_atompositions']=[]
for met_id_remove,v in met_id_element_I.items():
removed = False
for met_cnt,met_id in enumerate(base_met_ids):
if met_index_I:
if met_index_I == base_met_indices[met_cnt] and met_id_remove == met_id and v==base_met_elements[met_cnt][0] and not removed:
removed = True
else:
self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]);
self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]);
self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]);
self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]);
self.metaboliteMapping['base_met_indices'].append(base_met_indices[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]);
else:
if met_id_remove == met_id and v==base_met_elements[met_cnt][0] and not removed:
removed = True
else:
self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]);
self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]);
self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]);
self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]);
self.metaboliteMapping['base_met_indices'].append(base_met_indices[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]);
#self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]);
'''v1: removes ALL base metabolites that match the met_id'''
#for met_id_remove in met_ids_I:
# for met_cnt,met_id in enumerate(base_met_ids):
# if met_id_remove != met_id:
# self.metaboliteMapping['met_mapping'].append(met_mapping[met_cnt]);
# self.metaboliteMapping['base_met_ids'].append(base_met_ids[met_cnt]);
# self.metaboliteMapping['base_met_elements'].append(base_met_elements[met_cnt]);
# self.metaboliteMapping['base_met_atompositions'].append(base_met_atompositions[met_cnt]);
# #self.metaboliteMapping['base_met_symmetry_elements'].append(base_met_symmetry_elements[met_cnt]);
# #self.metaboliteMapping['base_met_symmetry_atompositions'].append(base_met_symmetry_atompositions[met_cnt]);
if met_id_O: self.metaboliteMapping['met_id']=met_id_O
self.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
def extract_baseMetabolite_fromMetabolite(self,model_id_I,met_id_element_I,met_index_I=None):
'''Returns a base metabolites from the current metabolite:
returns metabolites in FIFO'''
base_metaboliteMapping = stage02_isotopomer_metaboliteMapping();
base_met_ids = self.metaboliteMapping['base_met_ids'];
met_id_remove = {};
met_index = None
for k,v in met_id_element_I.items():
for met_cnt,met_id in enumerate(base_met_ids):
if met_index_I:
if met_index_I == self.metaboliteMapping['base_met_indices'][met_cnt] and k == met_id and v==self.metaboliteMapping['base_met_elements'][met_cnt][0]:
met_id_remove = {k:self.metaboliteMapping['base_met_elements'][met_cnt][0]};
met_index = met_index_I;
break;
else:
if k == met_id and v==self.metaboliteMapping['base_met_elements'][met_cnt][0]:
met_id_remove = {k:self.metaboliteMapping['base_met_elements'][met_cnt][0]};
met_index = self.metaboliteMapping['base_met_indices'][met_cnt]
break;
base_metaboliteMapping.make_trackedMetabolite(self.metaboliteMapping['mapping_id'],model_id_I,met_id_remove,met_index);
return base_metaboliteMapping
def update_trackedMetabolite_fromBaseMetabolites(self,model_id_I):
'''update mapping, elements, and atompositions from base metabolites;
NOTE: issues may arise in the number assigned to each metabolite if multiple elements are used'''
# get unique met_ids
met_ids_unique = list(set(self.metaboliteMapping['base_met_ids']))
met_ids_cnt = {};
met_ids_elements = {};
for met_id in met_ids_unique:
met_ids_cnt[met_id] = 0;
met_ids_elements[met_id] = [];
# make the input structure
met_ids_elements_I = [];
for met_id_cnt,met_id in enumerate(self.metaboliteMapping['base_met_ids']):
met_ids_elements_I.append({met_id:self.metaboliteMapping['base_met_elements'][met_id_cnt][0]})
self.make_compoundTrackedMetabolite(self.metaboliteMapping['mapping_id'],model_id_I,met_ids_elements_I,self.metaboliteMapping['met_id'],self.metaboliteMapping['base_met_indices'])
def make_newMetaboliteMapping(self):
'''Make a new mapping for the metabolite that switches out the names of the base metabolites
for the current metabolite'''
mapping_O= [];
elements = list(set(self.metaboliteMapping['met_elements']))
element_cnt = {};
for element in elements:
element_cnt[element] = 0;
for met_element in self.metaboliteMapping['met_elements']:
mapping = '[' + self.metaboliteMapping['met_id'].replace('.','_') + '_' + met_element + str(element_cnt[met_element]) + ']';
mapping_O.append(mapping);
element_cnt[met_element]+=1
return mapping_O
def make_defaultBaseMetabolites(self):
'''Add default base metabolite to the metabolite'''
self.metaboliteMapping['base_met_ids']=[];
self.metaboliteMapping['base_met_elements']=[];
self.metaboliteMapping['base_met_atompositions']=[];
self.metaboliteMapping['base_met_symmetry_elements']=[];
self.metaboliteMapping['base_met_symmetry_atompositions']=[];
self.metaboliteMapping['base_met_indices']=[];
compartment = self.metaboliteMapping['met_id'].split('_')[-1]
for cnt,element in enumerate(self.metaboliteMapping['met_elements']):
if element == 'C':
self.metaboliteMapping['base_met_ids'].append('co2'+'_'+compartment);
self.metaboliteMapping['base_met_elements'].append([element]);
self.metaboliteMapping['base_met_atompositions'].append([0]);
self.metaboliteMapping['base_met_indices'].append(cnt);
elif element == 'H':
self.metaboliteMapping['base_met_ids'].append('h'+'_'+element);
self.metaboliteMapping['base_met_elements'].append([element]);
self.metaboliteMapping['base_met_atompositions'].append([0]);
self.metaboliteMapping['base_met_indices'].append(cnt);
else: print("element not yet supported")
def convert_arrayMapping2StringMapping(self):
'''Convert an array representation of a mapping to a string representation'''
arrayMapping = self.metaboliteMapping['met_mapping']
stringMapping = ''
for mapping in self.metaboliteMapping['met_mapping']:
stringMapping+=''.join(mapping)
return stringMapping;
def convert_stringMapping2ArrayMapping(self):
'''Convert a string representation of a mapping to an array representation'''
stringMapping = self.metaboliteMapping['met_mapping']
if '[' in self.metaboliteMapping['met_mapping']:
stringMapping = self.metaboliteMapping['met_mapping'].split('][');
stringMapping = [m.replace('[','') for m in stringMapping];
stringMapping = [m.replace(']','') for m in stringMapping];
else:
stringMapping = [m for m in stringMapping];
# add in '[]'
arrayMapping = [];
for m in stringMapping:
arrayMapping.append('['+m+']')
return arrayMapping;
def add_metaboliteMapping(self,
mapping_id_I=None,
met_id_I=None,
met_elements_I=None,
met_atompositions_I=None,
met_symmetry_elements_I=None,
met_symmetry_atompositions_I=None,
used__I=True,
comment__I=None):
'''Add tracked metabolite to the database'''
if mapping_id_I: self.metaboliteMapping['mapping_id']=mapping_id_I;
if met_id_I: self.metaboliteMapping['met_id']=met_id_I;
if met_elements_I: self.metaboliteMapping['met_elements']=met_elements_I;
if met_atompositions_I: self.metaboliteMapping['met_atompositions']=met_atompositions_I;
if met_symmetry_elements_I: self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I;
if met_symmetry_atompositions_I: self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I;
if used__I: self.metaboliteMapping['used_']=used__I;
if comment__I: self.metaboliteMapping['comment_']=comment__I;
#add data to the database
#row = None;
#row = data_stage02_isotopomer_atomMappingMetabolites(self.metaboliteMapping['mapping_id'],
# self.metaboliteMapping['met_id'],
# self.metaboliteMapping['met_elements'],
# self.metaboliteMapping['met_atompositions'],
# self.metaboliteMapping['met_symmetry_elements'],
# self.metaboliteMapping['met_symmetry_atompositions'],
# self.metaboliteMapping['used_'],
# self.metaboliteMapping['comment_'],
# self.make_newMetaboliteMapping(),
# self.metaboliteMapping['base_met_ids'],
# self.metaboliteMapping['base_met_elements'],
# self.metaboliteMapping['base_met_atompositions'],
# self.metaboliteMapping['base_met_symmetry_elements'],
# self.metaboliteMapping['base_met_symmetry_atompositions'],
# self.metaboliteMapping['base_met_indices']);
#self.session.add(row);
#self.session.commit();
data = self.metaboliteMapping;
data['met_mapping'] = self.make_newMetaboliteMapping();
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites([data]);
def update_metaboliteMapping(self,
mapping_id_I=None,
met_id_I=None,
met_elements_I=None,
met_atompositions_I=None,
met_symmetry_elements_I=None,
met_symmetry_atompositions_I=None,
used__I=True,
comment__I=None):
'''Add tracked metabolite to the database'''
if mapping_id_I: self.metaboliteMapping['mapping_id']=mapping_id_I;
if met_id_I: self.metaboliteMapping['met_id']=met_id_I;
if met_elements_I: self.metaboliteMapping['met_elements']=met_elements_I;
if met_atompositions_I: self.metaboliteMapping['met_atompositions']=met_atompositions_I;
if met_symmetry_elements_I: self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I;
if met_symmetry_atompositions_I: self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I;
if used__I: self.metaboliteMapping['used_']=used__I;
if comment__I: self.metaboliteMapping['comment_']=comment__I;
self.metaboliteMapping['met_mapping']=self.make_newMetaboliteMapping()
#add update data in the database
self.stage02_isotopomer_query.update_rows_dataStage02IsotopomerAtomMappingMetabolites([self.metaboliteMapping]);
def get_metaboliteMapping(self,mapping_id_I,met_id_I):
'''Get tracked metabolite from the database'''
row = {}
row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_I,met_id_I);
self.metaboliteMapping=row;
def get_baseMetabolites(self):
'''Get base metabolite from the database for the current metabolite'''
row = {}
row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.metaboliteMapping['mapping_id'],self.metaboliteMapping['met_id']);
self.metaboliteMapping['base_met_ids']=row['base_met_ids'];
self.metaboliteMapping['base_met_elements']=row['base_met_elements']
self.metaboliteMapping['base_met_atompositions']=row['base_met_atompositions']
self.metaboliteMapping['base_met_symmetry_elements']=row['base_met_symmetry_elements']
self.metaboliteMapping['base_met_symmetry_atompositions']=row['base_met_symmetry_atompositions']
## if the current base_met_indices are already set, add to them
## NOTE: works only if the base metabolite is also the current metabolite
#if len(self.metaboliteMapping['base_met_indices'])==1:
# currentIndex = self.metaboliteMapping['base_met_indices'][0]
# self.metaboliteMapping['base_met_indices'] = [currentIndex + i for i in row['base_met_indices']];
## else ensure that all met_id/base_met_index pairs are unique
#else:
# self.metaboliteMapping['base_met_indices']=row['base_met_indices']
self.metaboliteMapping['base_met_indices']=row['base_met_indices']
def clear_metaboliteMapping(self):
self.metaboliteMapping={};
self.metaboliteMapping['mapping_id']=None;
#self.metaboliteMapping['met_name']=None;
self.metaboliteMapping['met_id']=None;
#self.metaboliteMapping['formula']=None;
self.metaboliteMapping['met_elements']=None;
self.metaboliteMapping['met_atompositions']=None;
self.metaboliteMapping['met_symmetry_elements']=None;
self.metaboliteMapping['met_symmetry_atompositions']=None;
self.metaboliteMapping['used_']=True;
self.metaboliteMapping['comment_']=None;
self.metaboliteMapping['met_mapping']=None;
self.metaboliteMapping['base_met_ids']=None;
self.metaboliteMapping['base_met_elements']=None;
self.metaboliteMapping['base_met_atompositions']=None;
self.metaboliteMapping['base_met_symmetry_elements']=None;
self.metaboliteMapping['base_met_symmetry_atompositions']=None;
self.metaboliteMapping['base_met_indices']=None;
def make_symmetric(self,met_symmetry_elements_I=[],met_symmetry_atompositions_I=[]):
'''Make the current metabolite symmetric
default = 180 symmetry'''
if met_symmetry_elements_I and met_symmetry_atompositions_I:
self.metaboliteMapping['met_symmetry_elements']=met_symmetry_elements_I;
self.metaboliteMapping['met_symmetry_atompositions']=met_symmetry_atompositions_I;
else:
self.metaboliteMapping['met_symmetry_elements']=[m for m in reversed(self.metaboliteMapping['met_elements'])];
self.metaboliteMapping['met_symmetry_atompositions']=[m for m in reversed(self.metaboliteMapping['met_atompositions'])];
def copy_metaboliteMappingDict(self):
'''Copy the current metabolite mapping'''
copy_metaboliteMapping = {};
copy_metaboliteMapping['mapping_id']=self.metaboliteMapping['mapping_id']
#copy_metaboliteMapping['met_name']=self.metaboliteMapping['met_name']
copy_metaboliteMapping['met_id']=self.metaboliteMapping['met_id']
#copy_metaboliteMapping['formula']=self.metaboliteMapping['formula']
copy_metaboliteMapping['met_elements']=self.metaboliteMapping['met_elements']
copy_metaboliteMapping['met_atompositions']=self.metaboliteMapping['met_atompositions']
copy_metaboliteMapping['met_symmetry_elements']=self.metaboliteMapping['met_symmetry_elements']
copy_metaboliteMapping['met_symmetry_atompositions']=self.metaboliteMapping['met_symmetry_atompositions']
copy_metaboliteMapping['used_']=self.metaboliteMapping['used_']
copy_metaboliteMapping['comment_']=self.metaboliteMapping['comment_']
copy_metaboliteMapping['met_mapping']=self.metaboliteMapping['met_mapping']
copy_metaboliteMapping['base_met_ids']=self.metaboliteMapping['base_met_ids']
copy_metaboliteMapping['base_met_elements']=self.metaboliteMapping['base_met_elements']
copy_metaboliteMapping['base_met_atompositions']=self.metaboliteMapping['base_met_atompositions']
copy_metaboliteMapping['base_met_symmetry_elements']=self.metaboliteMapping['base_met_symmetry_elements']
copy_metaboliteMapping['base_met_symmetry_atompositions']=self.metaboliteMapping['base_met_symmetry_atompositions']
copy_metaboliteMapping['base_met_indices']=self.metaboliteMapping['base_met_indices'];
return copy_metaboliteMapping
def copy_metaboliteMapping(self):
'''Copy the current metabolite mapping'''
return self;
class stage02_isotopomer_reactionMapping():
def __init__(self,
mapping_id_I=None,
rxn_id_I=None,
rxn_description_I=None,
reactants_stoichiometry_tracked_I=[],
products_stoichiometry_tracked_I=[],
reactants_ids_tracked_I=[],
products_ids_tracked_I=[],
reactants_elements_tracked_I=[],
products_elements_tracked_I=[],
reactants_positions_tracked_I=[],
products_positions_tracked_I=[],
reactants_mapping_I=[],
products_mapping_I=[],
rxn_equation_I=None,
used__I=None,
comment__I=None,
reactants_metaboliteMappings_I=[],
products_metaboliteMappings_I=[]):
#self.session = Session();
self.stage02_isotopomer_query = stage02_isotopomer_query();
self.calculate = base_calculate();
self.reactionMapping={}
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=rxn_description_I
self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_I
self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_I
self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_I
self.reactionMapping['products_ids_tracked']=products_ids_tracked_I
self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_I
self.reactionMapping['products_elements_tracked']=products_elements_tracked_I
self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_I
self.reactionMapping['products_positions_tracked']=products_positions_tracked_I
self.reactionMapping['reactants_mapping']=reactants_mapping_I
self.reactionMapping['products_mapping']=products_mapping_I
self.reactionMapping['rxn_equation']=rxn_equation_I
self.reactionMapping['used_']=used__I
self.reactionMapping['comment_']=comment__I
self.reactionMapping['reactants_metaboliteMappings']=reactants_metaboliteMappings_I
self.reactionMapping['products_metaboliteMappings']=products_metaboliteMappings_I
self.reactants_base_met_ids=[];
self.reactants_base_met_elements=[];
self.reactants_base_met_atompositions=[];
self.reactants_base_met_symmetry_elements=[];
self.reactants_base_met_symmetry_atompositions=[];
self.reactants_base_met_indices=[];
self.products_base_met_ids=[];
self.products_base_met_elements=[];
self.products_base_met_atompositions=[];
self.products_base_met_symmetry_elements=[];
self.products_base_met_symmetry_atompositions=[];
self.products_base_met_indices=[];
def make_trackedCompoundReaction_fromRow(self,mapping_id_I,model_id_I,rxn_id_I,
rxn_description_I=None,
reactants_stoichiometry_tracked_I=[],
products_stoichiometry_tracked_I=[],
reactants_ids_tracked_I=[],
products_ids_tracked_I=[],
reactants_mapping_I=[],
products_mapping_I=[],
rxn_equation_I=None,
used__I=True,
comment__I=None):
irm = stage02_isotopomer_reactionMapping(
mapping_id_I=mapping_id_I,
rxn_id_I=rxn_id_I,
rxn_description_I=rxn_id_I,
reactants_stoichiometry_tracked_I=reactants_stoichiometry_tracked_I,
products_stoichiometry_tracked_I=products_stoichiometry_tracked_I,
reactants_ids_tracked_I=reactants_ids_tracked_I,
products_ids_tracked_I=products_ids_tracked_I,
reactants_mapping_I=reactants_mapping_I,
products_mapping_I=products_mapping_I,
rxn_equation_I=rxn_equation_I,
used__I=used__I,
comment__I=comment__I);
irm.reactionMapping['reactants_elements_tracked']=None;
irm.reactionMapping['reactants_positions_tracked']=None;
irm.reactionMapping['products_elements_tracked']=None;
irm.reactionMapping['products_positions_tracked']=None;
irm.checkAndCorrect_elementsAndPositions();
self.reactionMapping['mapping_id']=irm.reactionMapping['mapping_id']
self.reactionMapping['rxn_id']=irm.reactionMapping['rxn_id']
self.reactionMapping['rxn_description']=irm.reactionMapping['rxn_description']
self.reactionMapping['rxn_equation']=irm.reactionMapping['rxn_equation']
self.reactionMapping['used_']=irm.reactionMapping['used_']
self.reactionMapping['comment_']=irm.reactionMapping['comment_']
for reactant_id_cnt,reactant_id in enumerate(irm.reactionMapping['reactants_ids_tracked']):
self.reactionMapping['reactants_stoichiometry_tracked'].append(irm.reactionMapping['reactants_stoichiometry_tracked'][reactant_id_cnt])
self.reactionMapping['reactants_ids_tracked'].append(irm.reactionMapping['reactants_ids_tracked'][reactant_id_cnt])
self.reactionMapping['reactants_elements_tracked'].append(irm.reactionMapping['reactants_elements_tracked'][reactant_id_cnt])
self.reactionMapping['reactants_positions_tracked'].append(irm.reactionMapping['reactants_positions_tracked'][reactant_id_cnt])
self.reactionMapping['reactants_mapping'].append(irm.reactionMapping['reactants_mapping'][reactant_id_cnt])
for product_id_cnt,product_id in enumerate(irm.reactionMapping['products_ids_tracked']):
self.reactionMapping['products_stoichiometry_tracked'].append(irm.reactionMapping['products_stoichiometry_tracked'][product_id_cnt])
self.reactionMapping['products_ids_tracked'].append(irm.reactionMapping['products_ids_tracked'][product_id_cnt])
self.reactionMapping['products_elements_tracked'].append(irm.reactionMapping['products_elements_tracked'][product_id_cnt])
self.reactionMapping['products_positions_tracked'].append(irm.reactionMapping['products_positions_tracked'][product_id_cnt])
self.reactionMapping['products_mapping'].append(irm.reactionMapping['products_mapping'][product_id_cnt])
self.make_reactantsAndProductsMetaboliteMappings(reactionMapping_I=irm.reactionMapping);
def make_trackedBinaryReaction(self,mapping_id_I,model_id_I,rxn_id_I,reactant_ids_elements_I,product_id_I):
'''Make a binary reaction of the form A + B + ... = C'''
#Input
# reactant_ids_elements_I = [met_id:{elements=[string,...],stoichiometry:float}},..]
# product_ids_elements_I = {met_id:{elements=[string,...],stoichiometry:float}}}
# e.g. met_ids_elements_I = [{'f6p_c':'C'},{'ac_c':'C'},{'utp_c','C'}]
# e.g. irm.make_trackedBinaryReaction('full04','140407_iDM2014','rxn01',met_ids_elements_I,'uacgam_c')
imm = stage02_isotopomer_metaboliteMapping();
# get unique met_ids
reactant_ids_all = [];
for row in reactant_ids_elements_I:
for k,v in row.items():
reactant_ids_all.append(k);
reactant_ids_unique = list(set(reactant_ids_all))
reactant_ids_cnt = {};
for reactant_id in reactant_ids_unique:
reactant_ids_cnt[reactant_id] = 0;
# make the reactants mapping
reactants_stoichiometry_tracked_O = [];
reactants_ids_tracked_O = [];
reactants_elements_tracked_O = [];
reactants_positions_tracked_O = [];
reactants_mapping_O = [];
reactants_metaboliteMappings_O = [];
for row in reactant_ids_elements_I:
for k,v in row.items():
imm.make_trackedMetabolite(mapping_id_I,model_id_I,{k:v},reactant_ids_cnt[k]);
reactants_elements_tracked_O.append(imm.metaboliteMapping['met_elements']);
reactants_positions_tracked_O.append(imm.metaboliteMapping['met_atompositions']);
reactants_mapping_O.append(imm.convert_arrayMapping2StringMapping());
reactants_stoichiometry_tracked_O.append(-1.0);
reactants_ids_tracked_O.append(k);
reactants_metaboliteMappings_O.append(copy(imm.copy_metaboliteMapping()));
imm.clear_metaboliteMapping()
reactant_ids_cnt[k]+=1
# make the products mapping
products_stoichiometry_tracked_O = [];
products_ids_tracked_O = [];
products_elements_tracked_O = [];
products_positions_tracked_O = [];
products_mapping_O = [];
products_metaboliteMappings_O = [];
if product_id_I:
imm.make_compoundTrackedMetabolite(mapping_id_I,model_id_I,reactant_ids_elements_I,product_id_I);
products_elements_tracked_O.append(imm.metaboliteMapping['met_elements']);
products_positions_tracked_O.append(imm.metaboliteMapping['met_atompositions']);
products_mapping_O.append(imm.convert_arrayMapping2StringMapping());
products_stoichiometry_tracked_O.append(1.0);
products_ids_tracked_O.append(product_id_I);
products_metaboliteMappings_O.append(copy(imm.copy_metaboliteMapping()));
# save the reaction
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=None
self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_O
self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_O
self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_O
self.reactionMapping['products_ids_tracked']=products_ids_tracked_O
self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_O
self.reactionMapping['products_elements_tracked']=products_elements_tracked_O
self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_O
self.reactionMapping['products_positions_tracked']=products_positions_tracked_O
self.reactionMapping['reactants_mapping']=reactants_mapping_O
self.reactionMapping['products_mapping']=products_mapping_O
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
self.reactionMapping['reactants_metaboliteMappings']=reactants_metaboliteMappings_O
self.reactionMapping['products_metaboliteMappings']=products_metaboliteMappings_O
def make_trackedCompoundReaction(self,mapping_id_I,model_id_I,rxn_id_I,reactant_ids_elements_I,base_reactant_positions_I,base_reactant_indices_I,compound_product_id_I,base_product_ids_elements_I,base_product_ids_O):
'''Make a compound tracked reaction
1. make compound product
2. remove specified base products from compound product
3. update the compound product
4. rename the base products
5. append base products to products list'''
#Input
# reactant_ids_elements_I = [{met_id:elements},...]
# base_reactant_positions_I = [{met_id_reactant:position},...] #Note: must be listed in order (positions of the reactant to be partitioned)
# base_reactant_indices_I = [{met_id_product:position in base_reactants_ids},...] #Note: must be listed in order (positions of the reactant to be partitioned)
# index referes to the position of the base met_id in the reactant to be partitioned
# compound_product_id_I = met_id
# base_product_ids_elements_I = [{met_id:elements},...] #Note: must be listed in order
# base_product_ids_O = [met_id_new,...] #Note: must be listed in order
imm = stage02_isotopomer_metaboliteMapping();
imm_product = stage02_isotopomer_metaboliteMapping();
# initialize the structure to track the base_met_ids
reactant_ids_all = [];
for k in self.reactionMapping['reactants_ids_tracked']:
reactant_ids_all.append(k);
reactant_ids_unique = list(set(reactant_ids_all))
reactant_ids_cnt = {};
for reactant_id in reactant_ids_unique:
reactant_ids_cnt[reactant_id] = 0;
for reactant_id in reactant_ids_all:
reactant_ids_cnt[reactant_id]+=1;
# initialize the count for unique base_met_ids
reactants_base_met_ids = [];
reactants_base_indices = [];
for cnt,mm in enumerate(self.reactionMapping['reactants_metaboliteMappings']):
reactants_base_met_ids.extend(mm.metaboliteMapping['base_met_ids'])
reactants_base_indices.extend(self.reactionMapping['reactants_metaboliteMappings'][cnt].metaboliteMapping['base_met_indices'])
reactants_base_met_ids_I = [];
# get unique reactants_base_met_ids
reactants_base_met_ids_unique = list(set(reactants_base_met_ids));
reactants_base_met_ids_cnt = {};
for base_met_id in reactants_base_met_ids_unique:
reactants_base_met_ids_cnt[base_met_id]=0;
for cnt,base_met_id in enumerate(reactants_base_met_ids):
reactants_base_met_ids_cnt[base_met_id]=reactants_base_indices[cnt]+1
# make the reactants mapping
imm_product.metaboliteMapping['mapping_id'] = mapping_id_I
imm_product.metaboliteMapping['base_met_ids']=[];
imm_product.metaboliteMapping['base_met_elements']=[];
imm_product.metaboliteMapping['base_met_atompositions']=[];
imm_product.metaboliteMapping['base_met_symmetry_elements']=[];
imm_product.metaboliteMapping['base_met_symmetry_atompositions']=[];
imm_product.metaboliteMapping['base_met_indices']=[];
# initialize the counter the input
matched_cnt = 0;
for row_cnt,row in enumerate(reactant_ids_elements_I):
for k,v in row.items():
# initialize new metabolites
if not k in list(reactant_ids_cnt.keys()):
reactant_ids_cnt[k]=0
# make the metabolite mapping
imm.make_trackedMetabolite(mapping_id_I,model_id_I,{k:v},reactant_ids_cnt[k]);
#update the counter for unique met_ids
reactant_ids_cnt[k]+=1
# update base_metabolites from the database for reactant that will be partitioned
base_found = False;
if matched_cnt < len(base_reactant_positions_I):
for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair
if k1 == k and row_cnt == v1:
imm.get_baseMetabolites();
imm.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
base_found = True;
break;
# assign new indices for each base metabolite based on the current indices in the reactants
base_met_indices_tmp = copy(imm.metaboliteMapping['base_met_indices']);
for cnt1,met_id1 in enumerate(imm.metaboliteMapping['base_met_ids']):
# initialize new base metabolites
if not met_id1 in list(reactants_base_met_ids_cnt.keys()):
reactants_base_met_ids_cnt[met_id1]=0;
# assign the next current base_metabolite_index
imm.metaboliteMapping['base_met_indices'][cnt1]=reactants_base_met_ids_cnt[met_id1]
# update the base_reactant_indices_I if the corresponding base_met_index was changed
if matched_cnt < len(base_reactant_positions_I):
for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair
if k1 == k and row_cnt == v1: # does the met_id and position in the reactant list match?
for k2,v2 in base_reactant_indices_I[matched_cnt].items():
if k2==met_id1 and v2==base_met_indices_tmp[cnt1]: # does the base_met_id and previous index match?
base_reactant_indices_I[matched_cnt][k2]=imm.metaboliteMapping['base_met_indices'][cnt1];
reactants_base_met_ids_cnt[met_id1]+=1;
# update counter for matched input
if base_found: matched_cnt+=1;
# update met_mapping
imm.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
# add in the new metaboliteMapping information
self.reactionMapping['reactants_elements_tracked'].append(imm.metaboliteMapping['met_elements']);
self.reactionMapping['reactants_positions_tracked'].append(imm.metaboliteMapping['met_atompositions']);
self.reactionMapping['reactants_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['reactants_stoichiometry_tracked'].append(-1.0);
self.reactionMapping['reactants_ids_tracked'].append(k);
self.reactionMapping['reactants_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
self.reactants_base_met_ids.extend(imm.metaboliteMapping['base_met_ids']);
self.reactants_base_met_elements.extend(imm.metaboliteMapping['base_met_elements']);
self.reactants_base_met_atompositions.extend(imm.metaboliteMapping['base_met_atompositions']);
#self.reactants_base_met_symmetry_elements.extend(imm.metaboliteMapping['base_met_symmetry_elements']);
#self.reactants_base_met_symmetry_atompositions.extend(imm.metaboliteMapping['base_met_symmetry_atompositions']);
self.reactants_base_met_indices.extend(imm.metaboliteMapping['base_met_indices']);
# copy out all of the base information for the product
imm_product.metaboliteMapping['base_met_ids'].extend(imm.metaboliteMapping['base_met_ids']);
imm_product.metaboliteMapping['base_met_elements'].extend(imm.metaboliteMapping['base_met_elements']);
imm_product.metaboliteMapping['base_met_atompositions'].extend(imm.metaboliteMapping['base_met_atompositions']);
#imm_product.metaboliteMapping['base_met_symmetry_elements'].extend(imm.metaboliteMapping['base_met_symmetry_elements']);
#imm_product.metaboliteMapping['base_met_symmetry_atompositions'].extend(imm.metaboliteMapping['base_met_symmetry_atompositions']);
imm_product.metaboliteMapping['base_met_indices'].extend(imm.metaboliteMapping['base_met_indices']);
#
imm.clear_metaboliteMapping()
# make the initial compound product mapping
imm_product.update_trackedMetabolite_fromBaseMetabolites(model_id_I)
imm_product.metaboliteMapping['met_id']=compound_product_id_I;
# extract out the products from the compound product
base_products = [];
for cnt,row in enumerate(base_product_ids_elements_I):
for k,v in row.items():
base_products.append(imm_product.extract_baseMetabolite_fromMetabolite(model_id_I,{k:v},base_reactant_indices_I[cnt][k]));
# remove the base_products from the compound product
for cnt,row in enumerate(base_product_ids_elements_I):
for k,v in row.items():
imm_product.remove_baseMetabolite_fromMetabolite(model_id_I,{k:v},met_id_O=compound_product_id_I,met_index_I=base_reactant_indices_I[cnt][k]);
# make the final products
if compound_product_id_I: imm_final_products = [imm_product];
else: imm_final_products = [];
for d in base_products:
imm_final_products.append(d);
if compound_product_id_I: imm_final_products_ids = [compound_product_id_I];
else: imm_final_products_ids = [];
for id in base_product_ids_O:
imm_final_products_ids.append(id);
for cnt,d in enumerate(imm_final_products):
self.reactionMapping['products_elements_tracked'].append(d.metaboliteMapping['met_elements']);
self.reactionMapping['products_positions_tracked'].append(d.metaboliteMapping['met_atompositions']);
self.reactionMapping['products_mapping'].append(d.convert_arrayMapping2StringMapping());
self.reactionMapping['products_stoichiometry_tracked'].append(1.0);
self.reactionMapping['products_ids_tracked'].append(imm_final_products_ids[cnt]);
self.reactionMapping['products_metaboliteMappings'].append(copy(d.copy_metaboliteMapping()));
# save the reaction
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=None
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
def make_trackedCompoundReaction_fromMetaboliteMappings(self,mapping_id_I,model_id_I,rxn_id_I,reactant_metaboliteMappings_I,base_reactant_positions_I,base_reactant_indices_I,compound_product_id_I,base_product_ids_elements_I,base_product_ids_O):
'''Make a compound tracked reaction
1. make compound product
2. remove specified base products from compound product
3. update the compound product
4. rename the base products
5. append base products to products list'''
#Input
# reactant_metaboliteMappings_I = [mm_1,mm_2,...]
# base_reactant_positions_I = [{met_id_reactant:position},...] #Note: must be listed in order (positions of the reactant to be partitioned)
# base_reactant_indices_I = [{met_id_product:position in base_reactants_ids},...] #Note: must be listed in order (positions of the reactant to be partitioned)
# index referes to the position of the base met_id in the reactant to be partitioned
# compound_product_id_I = met_id
# base_product_ids_elements_I = [{met_id:elements},...] #Note: must be listed in order
# base_product_ids_O = [met_id_new,...] #Note: must be listed in order
imm_product = stage02_isotopomer_metaboliteMapping();
# initialize the structure to track the base_met_ids
reactant_ids_all = [];
for k in self.reactionMapping['reactants_ids_tracked']:
reactant_ids_all.append(k);
reactant_ids_unique = list(set(reactant_ids_all))
reactant_ids_cnt = {};
for reactant_id in reactant_ids_unique:
reactant_ids_cnt[reactant_id] = 0;
for reactant_id in reactant_ids_all:
reactant_ids_cnt[reactant_id]+=1;
# initialize the count for unique base_met_ids
reactants_base_met_ids = [];
reactants_base_indices = [];
for cnt,mm in enumerate(self.reactionMapping['reactants_metaboliteMappings']):
reactants_base_met_ids.extend(mm.metaboliteMapping['base_met_ids'])
reactants_base_indices.extend(self.reactionMapping['reactants_metaboliteMappings'][cnt].metaboliteMapping['base_met_indices'])
reactants_base_met_ids_I = [];
# get unique reactants_base_met_ids
reactants_base_met_ids_unique = list(set(reactants_base_met_ids));
reactants_base_met_ids_cnt = {};
for base_met_id in reactants_base_met_ids_unique:
reactants_base_met_ids_cnt[base_met_id]=0;
for cnt,base_met_id in enumerate(reactants_base_met_ids):
reactants_base_met_ids_cnt[base_met_id]=reactants_base_indices[cnt]+1
# make the reactants mapping
imm_product.metaboliteMapping['mapping_id'] = mapping_id_I
imm_product.metaboliteMapping['base_met_ids']=[];
imm_product.metaboliteMapping['base_met_elements']=[];
imm_product.metaboliteMapping['base_met_atompositions']=[];
imm_product.metaboliteMapping['base_met_symmetry_elements']=[];
imm_product.metaboliteMapping['base_met_symmetry_atompositions']=[];
imm_product.metaboliteMapping['base_met_indices']=[];
# initialize the counter the input
matched_cnt = 0;
for row_cnt,imm in enumerate(reactant_metaboliteMappings_I):
# initialize new metabolites
if not imm.metaboliteMapping['met_id'] in list(reactant_ids_cnt.keys()):
reactant_ids_cnt[imm.metaboliteMapping['met_id']]=0
# make the metabolite mapping
#update the counter for unique met_ids
reactant_ids_cnt[imm.metaboliteMapping['met_id']]+=1
# update base_metabolites from the database for reactant that will be partitioned
base_found = False;
if matched_cnt < len(base_reactant_positions_I):
for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair
if k1 == imm.metaboliteMapping['met_id'] and row_cnt == v1:
base_found = True;
break;
# assign new indices for each base metabolite based on the current indices in the reactants
base_met_indices_tmp = copy(imm.metaboliteMapping['base_met_indices']);
for cnt1,met_id1 in enumerate(imm.metaboliteMapping['base_met_ids']):
# initialize new base metabolites
if not met_id1 in list(reactants_base_met_ids_cnt.keys()):
reactants_base_met_ids_cnt[met_id1]=0;
# assign the next current base_metabolite_index
imm.metaboliteMapping['base_met_indices'][cnt1]=reactants_base_met_ids_cnt[met_id1]
# update the base_reactant_indices_I if the corresponding base_met_index was changed
if matched_cnt < len(base_reactant_positions_I):
for k1,v1 in base_reactant_positions_I[matched_cnt].items(): #there will be only 1 key-value pair
if k1 == imm.metaboliteMapping['met_id'] and row_cnt == v1: # does the met_id and position in the reactant list match?
for k2,v2 in base_reactant_indices_I[matched_cnt].items():
if k2==met_id1 and v2==base_met_indices_tmp[cnt1]: # does the base_met_id and previous index match?
base_reactant_indices_I[matched_cnt][k2]=imm.metaboliteMapping['base_met_indices'][cnt1];
reactants_base_met_ids_cnt[met_id1]+=1;
# update counter for matched input
if base_found: matched_cnt+=1;
# update met_mapping
imm.update_trackedMetabolite_fromBaseMetabolites(model_id_I);
# add in the new metaboliteMapping information
self.reactionMapping['reactants_elements_tracked'].append(imm.metaboliteMapping['met_elements']);
self.reactionMapping['reactants_positions_tracked'].append(imm.metaboliteMapping['met_atompositions']);
self.reactionMapping['reactants_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['reactants_stoichiometry_tracked'].append(-1.0);
self.reactionMapping['reactants_ids_tracked'].append(imm.metaboliteMapping['met_id']);
self.reactionMapping['reactants_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
self.reactants_base_met_ids.extend(imm.metaboliteMapping['base_met_ids']);
self.reactants_base_met_elements.extend(imm.metaboliteMapping['base_met_elements']);
self.reactants_base_met_atompositions.extend(imm.metaboliteMapping['base_met_atompositions']);
#self.reactants_base_met_symmetry_elements.extend(imm.metaboliteMapping['base_met_symmetry_elements']);
#self.reactants_base_met_symmetry_atompositions.extend(imm.metaboliteMapping['base_met_symmetry_atompositions']);
self.reactants_base_met_indices.extend(imm.metaboliteMapping['base_met_indices']);
# copy out all of the base information for the product
imm_product.metaboliteMapping['base_met_ids'].extend(imm.metaboliteMapping['base_met_ids']);
imm_product.metaboliteMapping['base_met_elements'].extend(imm.metaboliteMapping['base_met_elements']);
imm_product.metaboliteMapping['base_met_atompositions'].extend(imm.metaboliteMapping['base_met_atompositions']);
#imm_product.metaboliteMapping['base_met_symmetry_elements'].extend(imm.metaboliteMapping['base_met_symmetry_elements']);
#imm_product.metaboliteMapping['base_met_symmetry_atompositions'].extend(imm.metaboliteMapping['base_met_symmetry_atompositions']);
imm_product.metaboliteMapping['base_met_indices'].extend(imm.metaboliteMapping['base_met_indices']);
# make the initial compound product mapping
imm_product.update_trackedMetabolite_fromBaseMetabolites(model_id_I)
imm_product.metaboliteMapping['met_id']=compound_product_id_I;
# extract out the products from the compound product
base_products = [];
for cnt,row in enumerate(base_product_ids_elements_I):
for k,v in row.items():
base_products.append(imm_product.extract_baseMetabolite_fromMetabolite(model_id_I,{k:v},base_reactant_indices_I[cnt][k]));
# remove the base_products from the compound product
for cnt,row in enumerate(base_product_ids_elements_I):
for k,v in row.items():
imm_product.remove_baseMetabolite_fromMetabolite(model_id_I,{k:v},met_id_O=compound_product_id_I,met_index_I=base_reactant_indices_I[cnt][k]);
# make the final products
if compound_product_id_I: imm_final_products = [imm_product];
else: imm_final_products = [];
for d in base_products:
imm_final_products.append(d);
if compound_product_id_I: imm_final_products_ids = [compound_product_id_I];
else: imm_final_products_ids = [];
for id in base_product_ids_O:
imm_final_products_ids.append(id);
for cnt,d in enumerate(imm_final_products):
self.reactionMapping['products_elements_tracked'].append(d.metaboliteMapping['met_elements']);
self.reactionMapping['products_positions_tracked'].append(d.metaboliteMapping['met_atompositions']);
self.reactionMapping['products_mapping'].append(d.convert_arrayMapping2StringMapping());
self.reactionMapping['products_stoichiometry_tracked'].append(1.0);
self.reactionMapping['products_ids_tracked'].append(imm_final_products_ids[cnt]);
self.reactionMapping['products_metaboliteMappings'].append(copy(d.copy_metaboliteMapping()));
# save the reaction
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=None
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
def make_trackedUnitaryReactions(self,mapping_id_I,model_id_I,rxn_id_I,reactant_ids_elements_I,product_ids_I):
'''Make a unitary reaction of the form aA = bB where the coefficient a = b'''
#Input
# reactant_ids_elements_I = [{met_id:elements},]
# product_ids_elements_I = [met_id,...]
# check input
if len(reactant_ids_elements_I)!=len(product_ids_I):
print("length of reactants_ids does not match the length of products_ids");
return;
imm = stage02_isotopomer_metaboliteMapping();
# get unique met_ids
reactant_ids_all = [];
for row in reactant_ids_elements_I:
for k,v in row.items():
reactant_ids_all.append(k);
reactant_ids_unique = list(set(reactant_ids_all))
reactant_ids_cnt = {};
for reactant_id in reactant_ids_unique:
reactant_ids_cnt[reactant_id] = 0;
# make the reactants mapping
reactants_stoichiometry_tracked_O = [];
reactants_ids_tracked_O = [];
reactants_elements_tracked_O = [];
reactants_positions_tracked_O = [];
reactants_mapping_O = [];
reactants_metaboliteMappings_O = [];
for row in reactant_ids_elements_I:
for k,v in row.items():
imm.make_trackedMetabolite(mapping_id_I,model_id_I,{k:v},reactant_ids_cnt[k]);
reactants_elements_tracked_O.append(imm.metaboliteMapping['met_elements']);
reactants_positions_tracked_O.append(imm.metaboliteMapping['met_atompositions']);
reactants_mapping_O.append(imm.convert_arrayMapping2StringMapping());
reactants_stoichiometry_tracked_O.append(-abs(1));
reactants_ids_tracked_O.append(k);
reactants_metaboliteMappings_O.append(copy(imm.copy_metaboliteMapping()));
imm.clear_metaboliteMapping()
reactant_ids_cnt[k]+=1
# make the products mapping
products_stoichiometry_tracked_O = [];
products_ids_tracked_O = [];
products_elements_tracked_O = [];
products_positions_tracked_O = [];
products_mapping_O = [];
products_metaboliteMappings_O = [];
for product_cnt,product in enumerate(product_ids_I):
products_elements_tracked_O.append(reactants_elements_tracked_O[product_cnt]);
products_positions_tracked_O.append(reactants_positions_tracked_O[product_cnt]);
products_mapping_O.append(reactants_mapping_O[product_cnt]);
products_stoichiometry_tracked_O.append(abs(reactants_stoichiometry_tracked_O[product_cnt]));
products_ids_tracked_O.append(product);
imm_tmp = copy(reactants_metaboliteMappings_O[product_cnt].copy_metaboliteMapping());
imm_tmp.metaboliteMapping['met_id']=product; # change the name
products_metaboliteMappings_O.append(imm_tmp);
# save the reaction
self.reactionMapping['mapping_id']=mapping_id_I
self.reactionMapping['rxn_id']=rxn_id_I
self.reactionMapping['rxn_description']=None
self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_O
self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_O
self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_O
self.reactionMapping['products_ids_tracked']=products_ids_tracked_O
self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_O
self.reactionMapping['products_elements_tracked']=products_elements_tracked_O
self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_O
self.reactionMapping['products_positions_tracked']=products_positions_tracked_O
self.reactionMapping['reactants_mapping']=reactants_mapping_O
self.reactionMapping['products_mapping']=products_mapping_O
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
self.reactionMapping['reactants_metaboliteMappings']=reactants_metaboliteMappings_O
self.reactionMapping['products_metaboliteMappings']=products_metaboliteMappings_O
def make_reverseReaction(self,rxn_id_I=None):
'''Make the reverse of the current reaction'''
forward_reactionMapping = {}
forward_reactionMapping['mapping_id']=self.reactionMapping['mapping_id']
forward_reactionMapping['rxn_id']=self.reactionMapping['rxn_id']
forward_reactionMapping['rxn_description']=self.reactionMapping['rxn_description']
forward_reactionMapping['reactants_stoichiometry_tracked']=self.reactionMapping['reactants_stoichiometry_tracked']
forward_reactionMapping['products_stoichiometry_tracked']=self.reactionMapping['products_stoichiometry_tracked']
forward_reactionMapping['reactants_ids_tracked']=self.reactionMapping['reactants_ids_tracked']
forward_reactionMapping['products_ids_tracked']=self.reactionMapping['products_ids_tracked']
forward_reactionMapping['reactants_elements_tracked']=self.reactionMapping['reactants_elements_tracked']
forward_reactionMapping['products_elements_tracked']=self.reactionMapping['products_elements_tracked']
forward_reactionMapping['reactants_positions_tracked']=self.reactionMapping['reactants_positions_tracked']
forward_reactionMapping['products_positions_tracked']=self.reactionMapping['products_positions_tracked']
forward_reactionMapping['reactants_mapping']=self.reactionMapping['reactants_mapping']
forward_reactionMapping['products_mapping']=self.reactionMapping['products_mapping']
forward_reactionMapping['rxn_equation']=self.reactionMapping['rxn_equation']
forward_reactionMapping['used_']=self.reactionMapping['used_']
forward_reactionMapping['comment_']=self.reactionMapping['comment_']
forward_reactionMapping['reactants_metaboliteMappings']=self.reactionMapping['reactants_metaboliteMappings']
forward_reactionMapping['products_metaboliteMappings']=self.reactionMapping['products_metaboliteMappings']
reverse_reactionMapping = {}
reverse_reactionMapping['mapping_id']=self.reactionMapping['mapping_id']
if rxn_id_I: reverse_reactionMapping['rxn_id']=rxn_id_I
else: reverse_reactionMapping['rxn_id']=self.reactionMapping['rxn_id']
reverse_reactionMapping['rxn_description']=self.reactionMapping['rxn_description']
reverse_reactionMapping['reactants_stoichiometry_tracked']=[-s for s in self.reactionMapping['products_stoichiometry_tracked']]
reverse_reactionMapping['products_stoichiometry_tracked']=[-s for s in self.reactionMapping['reactants_stoichiometry_tracked']]
reverse_reactionMapping['reactants_ids_tracked']=self.reactionMapping['products_ids_tracked']
reverse_reactionMapping['products_ids_tracked']=self.reactionMapping['reactants_ids_tracked']
reverse_reactionMapping['reactants_elements_tracked']=self.reactionMapping['products_elements_tracked']
reverse_reactionMapping['products_elements_tracked']=self.reactionMapping['reactants_elements_tracked']
reverse_reactionMapping['reactants_positions_tracked']=self.reactionMapping['products_positions_tracked']
reverse_reactionMapping['products_positions_tracked']=self.reactionMapping['reactants_positions_tracked']
reverse_reactionMapping['reactants_mapping']=self.reactionMapping['products_mapping']
reverse_reactionMapping['products_mapping']=self.reactionMapping['reactants_mapping']
reverse_reactionMapping['rxn_equation']=self.reactionMapping['rxn_equation']
reverse_reactionMapping['used_']=self.reactionMapping['used_']
reverse_reactionMapping['comment_']=self.reactionMapping['comment_']
reverse_reactionMapping['reactants_metaboliteMappings']=self.reactionMapping['products_metaboliteMappings']
reverse_reactionMapping['products_metaboliteMappings']=self.reactionMapping['reactants_metaboliteMappings']
self.reactionMapping = reverse_reactionMapping;
def add_reactionMapping(self,
mapping_id_I=None,
rxn_id_I=None,
rxn_description_I=None,
reactants_stoichiometry_tracked_I=[],
products_stoichiometry_tracked_I=[],
reactants_ids_tracked_I=[],
products_ids_tracked_I=[],
reactants_elements_tracked_I=[],
products_elements_tracked_I=[],
reactants_positions_tracked_I=[],
products_positions_tracked_I=[],
reactants_mapping_I=[],
products_mapping_I=[],
rxn_equation_I=None,
used__I=None,
comment__I=None):
if mapping_id_I: self.reactionMapping['mapping_id']=mapping_id_I
if rxn_id_I: self.reactionMapping['rxn_id']=rxn_id_I
if rxn_description_I: self.reactionMapping['rxn_description']=rxn_description_I
if reactants_stoichiometry_tracked_I: self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_I
if products_stoichiometry_tracked_I: self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_I
if reactants_ids_tracked_I: self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_I
if products_ids_tracked_I: self.reactionMapping['products_ids_tracked']=products_ids_tracked_I
if reactants_elements_tracked_I: self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_I
if products_elements_tracked_I: self.reactionMapping['products_elements_tracked']=products_elements_tracked_I
if reactants_positions_tracked_I: self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_I
if products_positions_tracked_I: self.reactionMapping['products_positions_tracked']=products_positions_tracked_I
if reactants_mapping_I: self.reactionMapping['reactants_mapping']=reactants_mapping_I
if products_mapping_I: self.reactionMapping['products_mapping']=products_mapping_I
if rxn_equation_I: self.reactionMapping['rxn_equation']=rxn_equation_I
if used__I: self.reactionMapping['used_']=used__I
if comment__I: self.reactionMapping['comment_']=comment__I
# add data to the database
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingReactions([self.reactionMapping])
def add_productMapping(self,product_ids_I):
'''Add newly made products to the atomMappingMetabolite table for future use'''
for product in self.reactionMapping['products_metaboliteMappings']:
if product.metaboliteMapping['met_id'] in product_ids_I:
product.add_metaboliteMapping();
def update_productMapping(self,product_ids_I):
'''Update newly made products to the atomMappingMetabolite table for future use'''
for product in self.reactionMapping['products_metaboliteMappings']:
if product.metaboliteMapping['met_id'] in product_ids_I:
product.update_metaboliteMapping();
def update_reactionMapping(self,
mapping_id_I=None,
rxn_id_I=None,
rxn_description_I=None,
reactants_stoichiometry_tracked_I=[],
products_stoichiometry_tracked_I=[],
reactants_ids_tracked_I=[],
products_ids_tracked_I=[],
reactants_elements_tracked_I=[],
products_elements_tracked_I=[],
reactants_positions_tracked_I=[],
products_positions_tracked_I=[],
reactants_mapping_I=[],
products_mapping_I=[],
rxn_equation_I=None,
used__I=None,
comment__I=None):
if mapping_id_I: self.reactionMapping['mapping_id']=mapping_id_I
if rxn_id_I: self.reactionMapping['rxn_id']=rxn_id_I
if rxn_description_I: self.reactionMapping['rxn_description']=rxn_description_I
if reactants_stoichiometry_tracked_I: self.reactionMapping['reactants_stoichiometry_tracked']=reactants_stoichiometry_tracked_I
if products_stoichiometry_tracked_I: self.reactionMapping['products_stoichiometry_tracked']=products_stoichiometry_tracked_I
if reactants_ids_tracked_I: self.reactionMapping['reactants_ids_tracked']=reactants_ids_tracked_I
if products_ids_tracked_I: self.reactionMapping['products_ids_tracked']=products_ids_tracked_I
if reactants_elements_tracked_I: self.reactionMapping['reactants_elements_tracked']=reactants_elements_tracked_I
if products_elements_tracked_I: self.reactionMapping['products_elements_tracked']=products_elements_tracked_I
if reactants_positions_tracked_I: self.reactionMapping['reactants_positions_tracked']=reactants_positions_tracked_I
if products_positions_tracked_I: self.reactionMapping['products_positions_tracked']=products_positions_tracked_I
if reactants_mapping_I: self.reactionMapping['reactants_mapping']=reactants_mapping_I
if products_mapping_I: self.reactionMapping['products_mapping']=products_mapping_I
if rxn_equation_I: self.reactionMapping['rxn_equation']=rxn_equation_I
if used__I: self.reactionMapping['used_']=used__I
if comment__I: self.reactionMapping['comment_']=comment__I
self.stage02_isotopomer_query.update_rows_dataStage02IsotopomerAtomMappingReactions([self.reactionMapping]);
def get_reactionMapping(self,mapping_id_I,rxn_id_I):
row = {};
row = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_I,rxn_id_I);
self.reactionMapping = row;
self.reactionMapping['reactants_metaboliteMappings']=[]
self.reactionMapping['products_metaboliteMappings']=[]
self.make_reactantsAndProductsMetaboliteMappings();
def make_reactantsAndProductsMetaboliteMappings(self,reactionMapping_I=None):
'''Make reactants and products metabolite mapping from atomMappingReaction information'''
#Input:
# reactionMapping_I = row of atomMappingReactions
# default: None, user current self
if reactionMapping_I: reactionMapping_tmp = reactionMapping_I;
else: reactionMapping_tmp = self.reactionMapping;
for cnt,met in enumerate(reactionMapping_tmp['reactants_ids_tracked']):
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=reactionMapping_tmp['mapping_id'],
met_id_I=met,
met_elements_I=reactionMapping_tmp['reactants_elements_tracked'][cnt],
met_atompositions_I=reactionMapping_tmp['reactants_positions_tracked'][cnt],
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=reactionMapping_tmp['reactants_mapping'][cnt],
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
self.reactionMapping['reactants_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
for cnt,met in enumerate(reactionMapping_tmp['products_ids_tracked']):
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=reactionMapping_tmp['mapping_id'],
met_id_I=met,
met_elements_I=reactionMapping_tmp['products_elements_tracked'][cnt],
met_atompositions_I=reactionMapping_tmp['products_positions_tracked'][cnt],
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=reactionMapping_tmp['products_mapping'][cnt],
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
def clear_reactionMapping(self):
self.reactionMapping={}
self.reactionMapping['mapping_id']=None
self.reactionMapping['rxn_id']=None
self.reactionMapping['rxn_description']=None
self.reactionMapping['reactants_stoichiometry_tracked']=[]
self.reactionMapping['products_stoichiometry_tracked']=[]
self.reactionMapping['reactants_ids_tracked']=[]
self.reactionMapping['products_ids_tracked']=[]
self.reactionMapping['reactants_elements_tracked']=[]
self.reactionMapping['products_elements_tracked']=[]
self.reactionMapping['reactants_positions_tracked']=[]
self.reactionMapping['products_positions_tracked']=[]
self.reactionMapping['reactants_mapping']=[]
self.reactionMapping['products_mapping']=[]
self.reactionMapping['rxn_equation']=None
self.reactionMapping['used_']=True
self.reactionMapping['comment_']=None
self.reactionMapping['reactants_metaboliteMappings']=[]
self.reactionMapping['products_metaboliteMappings']=[]
self.reactants_base_met_ids=[];
self.reactants_base_met_elements=[];
self.reactants_base_met_atompositions=[];
self.reactants_base_met_symmetry_elements=[];
self.reactants_base_met_symmetry_atompositions=[];
self.reactants_base_met_indices=[];
self.products_base_met_ids=[];
self.products_base_met_elements=[];
self.products_base_met_atompositions=[];
self.products_base_met_symmetry_elements=[];
self.products_base_met_symmetry_atompositions=[];
self.products_base_met_indices=[];
def checkAndCorrect_elementsAndPositions(self):
'''Check that the reactant/product elements/positions are consistent with the
reactants/products ids_tracked; if they are not, correct them'''
# check that elements/positions are initialized
if not self.reactionMapping['reactants_elements_tracked']:
self.reactionMapping['reactants_elements_tracked']=[];
for cnt,reactant_id in enumerate(self.reactionMapping['reactants_ids_tracked']):
self.reactionMapping['reactants_elements_tracked'].append([]);
if not self.reactionMapping['reactants_positions_tracked']:
self.reactionMapping['reactants_positions_tracked']=[];
for cnt,reactant_id in enumerate(self.reactionMapping['reactants_ids_tracked']):
self.reactionMapping['reactants_positions_tracked'].append([]);
# check that the length of the elements/positions match the length of the ids_tracked
#TODO...
# check each elements/positions
for cnt,reactant_id in enumerate(self.reactionMapping['reactants_ids_tracked']):
# get the metabolite data from the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.reactionMapping['mapping_id'],reactant_id);
if len(met_data['met_elements'])!=len(self.reactionMapping['reactants_elements_tracked'][cnt]):
self.reactionMapping['reactants_elements_tracked'][cnt]=met_data['met_elements'];
if len(met_data['met_atompositions'])!=len(self.reactionMapping['reactants_positions_tracked'][cnt]):
self.reactionMapping['reactants_positions_tracked'][cnt]=met_data['met_atompositions'];
# check that elements/positions are initialized
if not self.reactionMapping['products_elements_tracked']:
self.reactionMapping['products_elements_tracked']=[];
for cnt,product_id in enumerate(self.reactionMapping['products_ids_tracked']):
self.reactionMapping['products_elements_tracked'].append([]);
if not self.reactionMapping['products_positions_tracked']:
self.reactionMapping['products_positions_tracked']=[];
for cnt,product_id in enumerate(self.reactionMapping['products_ids_tracked']):
self.reactionMapping['products_positions_tracked'].append([]);
# check that the length of the elements/positions match the length of the ids_tracked
#TODO...
# check each elements/positions
for cnt,product_id in enumerate(self.reactionMapping['products_ids_tracked']):
# get the metabolite data from the database
met_data = {}
met_data = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(self.reactionMapping['mapping_id'],product_id);
if len(met_data['met_elements'])!=len(self.reactionMapping['products_elements_tracked'][cnt]):
self.reactionMapping['products_elements_tracked'][cnt]=met_data['met_elements'];
if len(met_data['met_atompositions'])!=len(self.reactionMapping['products_positions_tracked'][cnt]):
self.reactionMapping['products_positions_tracked'][cnt]=met_data['met_atompositions'];
def add_balanceProducts(self,unbalanced_met_I=None,unbalanced_met_position_I=None,unbalanced_met_positions_tracked_I=[],make_lumped_unbalanced_met_I=False,make_unique_unbalanced_mets_I=True):
'''Add psuedo metabolites to the product in order to elementally balance the tracked reaction'''
#Input:
# unbalanced_met_I = reactant_id that is not elementally balanced
# unbalanced_met_position_I = position of the reactant_id in the reactants_list
# unbalanced_met_positions_tracked_I = positions of the elements that are not elementally balanced
# make_lumped_unbalanced_met_I = boolean,
# automatically detect mappings that are not elementally balanced and make an unbalanced product metabolite to balance all elementally unbalanced reactants
# NOTE: does not work if the stoichiometry of all unbalanced reactants are not 1
# make_unique_unbalanced_mets_I = boolean,
# automatically detect mappings/metabolites that are not elementally balanced and makes unbalanced product mappings/metabolites to balance each elementally unbalanced reactant mapping/metabolite
if make_lumped_unbalanced_met_I:
#TODO: check that all unbalanced reactants have a stoichiometry of 1
balance_met = self.reactionMapping['rxn_id'] + '_' + 'balance_c' + '.balance';
reactants_mappings = []; #list of a list
products_mappings = []; #list
# extract out reactants and products mappings
for imm in self.reactionMapping['reactants_metaboliteMappings']:
reactant_mapping=[];
reactant_mapping = imm.convert_stringMapping2ArrayMapping();
reactants_mappings.append(reactant_mapping);
for imm in self.reactionMapping['products_metaboliteMappings']:
product_mapping=[];
product_mapping = imm.convert_stringMapping2ArrayMapping();
products_mappings.extend(product_mapping);
# find unbalanced reactant_mappings and
# make the product mapping, positions, and elements
product_mapping = [];
product_positions_tracked = [];
product_elements_tracked = [];
product_cnt = 0;
for reactant_cnt,reactants_mapping in enumerate(reactants_mappings):
for element_cnt,reactant_mapping in enumerate(reactants_mapping):
if not reactant_mapping in products_mappings:
product_mapping.append(reactant_mapping);
product_elements_tracked.append(self.reactionMapping['reactants_elements_tracked'][reactant_cnt][element_cnt]);
product_positions_tracked.append(product_cnt);
product_cnt += 1;
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=self.reactionMapping['mapping_id'],
met_id_I=balance_met,
met_elements_I=product_elements_tracked,
met_atompositions_I=product_positions_tracked,
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=product_mapping,
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
# add balance metabolite to the products
self.reactionMapping['products_ids_tracked'].append(balance_met);
self.reactionMapping['products_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['products_positions_tracked'].append(product_positions_tracked);
self.reactionMapping['products_stoichiometry_tracked'].append(1);
self.reactionMapping['products_elements_tracked'].append(product_elements_tracked);
self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
elif make_unique_unbalanced_mets_I:
products_mappings = []; #list
# extract out products mappings
for imm in self.reactionMapping['products_metaboliteMappings']:
product_mapping=[];
product_mapping = imm.convert_stringMapping2ArrayMapping();
products_mappings.extend(product_mapping);
# check each reactant mapping/metabolite
for reactant_pos,imm in enumerate(self.reactionMapping['reactants_metaboliteMappings']):
reactant_mapping=[];
reactant_mapping = imm.convert_stringMapping2ArrayMapping();
# find missing mappings
product_mapping = [];
product_positions_tracked = [];
product_elements_tracked = [];
balance_met = None;
product_cnt = 0;
for mapping_pos,mapping in enumerate(reactant_mapping):
if mapping not in products_mappings:
balance_met = self.reactionMapping['rxn_id'] + '_' + self.reactionMapping['reactants_ids_tracked'][reactant_pos] + '_' + str(reactant_pos) + '.balance';
product_mapping.append(mapping);
#product_positions_tracked.append(self.reactionMapping['reactants_positions_tracked'][reactant_pos][mapping_pos]);
product_positions_tracked.append(product_cnt);
product_elements_tracked.append(self.reactionMapping['reactants_elements_tracked'][reactant_pos][mapping_pos]);
product_cnt += 1;
if balance_met:
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=self.reactionMapping['mapping_id'],
met_id_I=balance_met,
met_elements_I=product_elements_tracked,
met_atompositions_I=product_positions_tracked,
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=product_mapping,
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
# add balance metabolite to the products
self.reactionMapping['products_ids_tracked'].append(balance_met);
self.reactionMapping['products_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['products_positions_tracked'].append(product_positions_tracked);
self.reactionMapping['products_elements_tracked'].append(product_elements_tracked);
self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
self.reactionMapping['products_stoichiometry_tracked'].append(abs(self.reactionMapping['reactants_stoichiometry_tracked'][reactant_pos]));
# use user specifications
else:
# find the position of the tracked metabolite
if self.reactionMapping['reactants_ids_tracked'].index(unbalanced_met_I):
if unbalanced_met_position_I: unbalanced_met_pos = unbalanced_met_position_I;
else: unbalanced_met_pos = self.reactionMapping['reactants_ids_tracked'].index(unbalanced_met_I);
balance_met = self.reactionMapping['rxn_id'] + '_' + unbalanced_met_I + '_' + str(unbalanced_met_pos) + '.balance';
# extract out mapping, positions, and elements
reactant_mapping = self.reactionMapping['reactants_metaboliteMappings'][unbalanced_met_pos].convert_stringMapping2ArrayMapping();
reactant_positions_tracked = self.reactionMapping['reactants_positions_tracked'][unbalanced_met_pos];
reactant_elements_tracked = self.reactionMapping['reactants_elements_tracked'][unbalanced_met_pos];
# make the product mapping, positions, and elements
product_mapping = [];
product_positions_tracked = [];
product_elements_tracked = [];
if unbalanced_met_positions_tracked_I:
for pos_cnt,pos in enumerate(unbalanced_met_positions_tracked_I):
product_mapping.append(reactant_mapping[pos]);
product_positions_tracked.append(pos_cnt);
product_elements_tracked.append(reactant_elements_tracked[pos]);
else:
product_mapping=reactant_mapping
product_positions_tracked=reactant_positions_tracked
product_elements_tracked=reactant_elements_tracked
imm = stage02_isotopomer_metaboliteMapping(mapping_id_I=self.reactionMapping['mapping_id'],
met_id_I=balance_met,
met_elements_I=product_elements_tracked,
met_atompositions_I=product_positions_tracked,
met_symmetry_elements_I=[],
met_symmetry_atompositions_I=[],
used__I=True,
comment__I=None,
met_mapping_I=product_mapping,
base_met_ids_I=[],
base_met_elements_I=[],
base_met_atompositions_I=[],
base_met_symmetry_elements_I=[],
base_met_symmetry_atompositions_I=[],
base_met_indices_I=[]);
# add balance metabolite to the products
self.reactionMapping['products_ids_tracked'].append(balance_met);
self.reactionMapping['products_mapping'].append(imm.convert_arrayMapping2StringMapping());
self.reactionMapping['products_positions_tracked'].append(product_positions_tracked);
self.reactionMapping['products_elements_tracked'].append(product_elements_tracked);
self.reactionMapping['products_metaboliteMappings'].append(copy(imm.copy_metaboliteMapping()));
self.reactionMapping['products_stoichiometry_tracked'].append(1);
else:
print('unbalanced metabolite not found!')
def check_elementalBalance(self):
'''
1. Check that the number of elements tracked in the reactant matches the number of elements tracked
in the products
2. Check that the reactant positions tracked match the reactant elements tracked'''
#Output:
# reactants_positions_tracked_cnt
# products_positions_tracked_cnt
element_balance = True;
#check reactants
reactants_positions_tracked_cnt = 0;
for reactant_cnt,reactant in enumerate(self.reactionMapping['reactants_ids_tracked']):
print('checking reactant ' + reactant);
# check that the reactant positions == reactant elements
if len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt])!=len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt]):
print('inconsistent reactants_positions and reactants_elements');
continue;
reactants_positions_tracked_cnt += len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt]);
#check products
products_positions_tracked_cnt = 0;
for product_cnt,product in enumerate(self.reactionMapping['products_ids_tracked']):
print('checking product ' + product);
# check that the product positions == product elements
if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(self.reactionMapping['products_elements_tracked'][product_cnt]):
print('inconsistent products_positions and products_elements');
continue;
products_positions_tracked_cnt += len(self.reactionMapping['products_positions_tracked'][product_cnt]);
#record
if reactants_positions_tracked_cnt!=products_positions_tracked_cnt:
return reactants_positions_tracked_cnt,products_positions_tracked_cnt;
else:
return reactants_positions_tracked_cnt,products_positions_tracked_cnt;
def check_reactionMapping(self):
'''
1. Check that the number of elements tracked in the reactant matches the number of elements tracked
in the products
2. Check that the reactant positions tracked match the reactant elements tracked
3. Check that the mappings are 1-to-1
4. Check that the elements/positions/mappings are of the same length
5. Check that the stoichiometry and ids tracked are of the same length'''
#Output:
# reactants_positions_tracked_cnt
# products_positions_tracked_cnt
#checks:
reactants_ids_stoichiometry_check = True;
reactants_elements_positions_check = True;
reactants_elements_mapping_check = True;
reactants_positions_mapping_check = True;
products_ids_stoichiometry_check = True;
products_elements_positions_check = True;
products_elements_mapping_check = True;
products_positions_mapping_check = True;
element_balance_check = True;
mapping_check = True;
#check reactants
reactants_positions_tracked_cnt = 0;
reactants_elements_tracked_cnt = 0;
reactants_mappings_cnt = 0;
reactants_stoichiometry_cnt = 0;
reactants_ids_cnt = 0;
reactants_mappings = [];
# check that the reactant stoichiometry == reactant ids
if len(self.reactionMapping['reactants_ids_tracked'])!=len(self.reactionMapping['reactants_stoichiometry_tracked']):
print('inconsistent reactants_stoichiometry_tracked and reactants_ids_tracked');
reactants_ids_stoichiometry_check = False;
reactants_ids_cnt += len(self.reactionMapping['reactants_ids_tracked']);
reactants_stoichiometry_cnt += len(self.reactionMapping['reactants_stoichiometry_tracked']);
# check elemental balance
for reactant_cnt,reactant in enumerate(self.reactionMapping['reactants_ids_tracked']):
print('checking reactant elemental balance ' + reactant);
reactant_mapping=[];
reactant_mapping = self.reactionMapping['reactants_metaboliteMappings'][reactant_cnt].convert_stringMapping2ArrayMapping();
# check that the reactant positions == reactant elements
if len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt])!=len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt]):
print('inconsistent reactants_positions and reactants_elements');
reactants_elements_positions_check = False;
# check that the reactant positions == reactant mapping
if len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt])!=len(reactant_mapping):
print('inconsistent reactants_positions and reactants_mapping');
reactants_elements_mapping_check = False;
# check that the reactant elements == reactant mapping
if len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt])!=len(reactant_mapping):
print('inconsistent reactants_elements and reactants_mapping');
reactants_positions_mapping_check = False;
reactants_positions_tracked_cnt += len(self.reactionMapping['reactants_positions_tracked'][reactant_cnt]);
reactants_elements_tracked_cnt += len(self.reactionMapping['reactants_elements_tracked'][reactant_cnt]);
reactants_mappings_cnt += len(reactant_mapping);
reactants_mappings.append(reactant_mapping);
#check products
products_positions_tracked_cnt = 0;
products_elements_tracked_cnt = 0;
products_mappings_cnt = 0;
products_stoichiometry_cnt = 0;
products_ids_cnt = 0;
products_mappings = [];
# check that the product stoichiometry == product ids
if len(self.reactionMapping['products_ids_tracked'])!=len(self.reactionMapping['products_stoichiometry_tracked']):
print('inconsistent products_stoichiometry_tracked and products_ids_tracked');
products_ids_stoichiometry_check = False;
products_ids_cnt += len(self.reactionMapping['products_ids_tracked']);
products_stoichiometry_cnt += len(self.reactionMapping['products_stoichiometry_tracked']);
# check elemental balance
for product_cnt,product in enumerate(self.reactionMapping['products_ids_tracked']):
print('checking product elemental balance ' + product);
product_mapping=[];
product_mapping = self.reactionMapping['products_metaboliteMappings'][product_cnt].convert_stringMapping2ArrayMapping();
# check that the product positions == product elements
if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(self.reactionMapping['products_elements_tracked'][product_cnt]):
print('inconsistent products_positions and products_elements');
products_elements_positions_check = False;
# check that the product positions == product mapping
if len(self.reactionMapping['products_positions_tracked'][product_cnt])!=len(product_mapping):
print('inconsistent products_positions and products_mapping');
products_elements_mapping_check = False;
# check that the product elements == product mapping
if len(self.reactionMapping['products_elements_tracked'][product_cnt])!=len(product_mapping):
print('inconsistent products_elements and products_mapping');
products_positions_mapping_check = False;
products_positions_tracked_cnt += len(self.reactionMapping['products_positions_tracked'][product_cnt]);
products_elements_tracked_cnt += len(self.reactionMapping['products_elements_tracked'][product_cnt]);
products_mappings_cnt += len(product_mapping);
products_mappings.append(product_mapping);
#check elemental balance
if reactants_positions_tracked_cnt != products_positions_tracked_cnt:
print('the length of reactants_positions_tracked does not match the length of products_positions_tracked');
element_balance_check = False;
if reactants_elements_tracked_cnt != products_elements_tracked_cnt:
print('reactants_elements_tracked does not match the length of products_elements_tracked');
element_balance_check = False;
if reactants_mappings_cnt != products_mappings_cnt:
print('the length of reactants_mapping does not match the length of products_mapping');
element_balance_check = False;
#check 1-to-1 mapping
reactants_mappings_list = [];
for reactants_mapping in reactants_mappings:
reactants_mappings_list.extend(reactants_mapping);
# check for duplicate reactant mappings
reactants_mappings_unique = list(set(reactants_mappings_list));
if len(reactants_mappings_list)!=len(reactants_mappings_unique):
print('duplicate reactants_mappings found');
mapping_check = False;
products_mappings_list = [];
for products_mapping in products_mappings:
products_mappings_list.extend(products_mapping);
# check for duplicate product mappings
products_mappings_unique = list(set(products_mappings_list));
if len(products_mappings_list)!=len(products_mappings_unique):
print('duplicate products_mappings found');
mapping_check = False;
# check that each product mapping has a matching reactant mapping, and vice versa
for reactant_cnt,reactant in enumerate(reactants_mappings):
print('checking reactant mapping ' + self.reactionMapping['reactants_ids_tracked'][reactant_cnt]);
for mapping_cnt,mapping in enumerate(reactant):
if not mapping in products_mappings_list:
print('no mapping found for reactant mapping ' + mapping + ' and position ' + str(mapping_cnt));
mapping_check = False;
for product_cnt,product in enumerate(products_mappings):
print('checking product mapping ' + self.reactionMapping['products_ids_tracked'][product_cnt]);
for mapping_cnt,mapping in enumerate(product):
if not mapping in reactants_mappings_list:
print('no mapping found for product mapping ' + mapping + ' and position ' + str(mapping_cnt));
mapping_check = False;
if not element_balance_check or not mapping_check:
print('check reaction mapping');
return reactants_ids_stoichiometry_check,reactants_elements_positions_check,reactants_elements_mapping_check,reactants_positions_mapping_check,\
products_ids_stoichiometry_check,products_elements_positions_check,products_elements_mapping_check,products_positions_mapping_check,\
element_balance_check,mapping_check;
def clear_elementsAndPositions(self):
'''Clear the reactants/products elements/positions'''
self.reactionMapping['reactants_elements_tracked']=None;
self.reactionMapping['reactants_positions_tracked']=None;
self.reactionMapping['products_elements_tracked']=None;
self.reactionMapping['products_positions_tracked']=None;
class stage02_isotopomer_mappingUtilities():
def __init__(self):
self.stage02_isotopomer_query = stage02_isotopomer_query();
def make_missingMetaboliteMappings(self,experiment_id_I,model_id_I=[],mapping_id_rxns_I=[],mapping_id_mets_I=[],mapping_id_new_I=None):
'''Make atom mapping metabolites from atom mapping reactions, QC atom mapping reactions;
and create a new set of metabolite mappings that correspond to the current reaction mappings that need to be QC/QA'd'''
#Input:
# experiment_id_I = experiment_id
# model_id_I = model_id
# mapping_id_rxns_I = reaction mapping id (#default atomMappingMetabolite mapping id to add new metabolites to)
# mapping_id_mets_I = existing metabolite mappings to use when making the new metabolite mappings
# mapping_id_new_I = name of mapping id for the new metabolite mappings
#Output:
# default: new metabolite mappings will be added for the mapping id of the reactions
# existing metabolite mappings will not be added
# mapping_id_new_I != None: new metabolite mappings will be added for the mapping id specified
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
#get mapping ids
if mapping_id_rxns_I and mapping_id_mets_I:
mapping_ids_rxns=mapping_id_rxns_I;
mapping_ids_mets=mapping_id_mets_I;
elif mapping_id_rxns_I:
mapping_ids_rxns=mapping_id_rxns_I;
else:
mapping_ids_rxns=[];
mapping_ids_rxns=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id_rxns in enumerate(mapping_ids_rxns):
# get the metabolite mappings
if mapping_id_rxns_I and mapping_id_mets_I:
mappings=self.stage02_isotopomer_query.get_atomMappingMetabolites_mappingID_dataStage02IsotopomerAtomMappingReactionsAndAtomMappingMetabolites(mapping_id_rxns,mapping_ids_mets[mapping_cnt]);
else:
mappings = self.stage02_isotopomer_query.get_atomMappingMetabolites_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns);
# remove duplicates
duplicate_ind = [];
for d1_cnt,d1 in enumerate(mappings):
for d2_cnt in range(d1_cnt+1,len(mappings)):
if d1['mapping_id'] == mappings[d2_cnt]['mapping_id'] and \
d1['met_id'] == mappings[d2_cnt]['met_id'] and \
d1['met_elements'] == mappings[d2_cnt]['met_elements'] and \
d1['met_atompositions'] == mappings[d2_cnt]['met_atompositions'] and \
d1['met_symmetry_elements'] == mappings[d2_cnt]['met_symmetry_elements'] and \
d1['met_symmetry_atompositions'] == mappings[d2_cnt]['met_symmetry_atompositions']:
duplicate_ind.append(d2_cnt);
duplicate_ind_unique=list(set(duplicate_ind));
# copy out unique metabolites
data_O = [];
for d1_cnt,d1 in enumerate(mappings):
if d1_cnt in duplicate_ind_unique:
continue;
else:
if mapping_id_new_I: d1['mapping_id']=mapping_id_new_I; # change to the new mapping
data_O.append(d1);
met_ids = [x['met_id'] for x in data_O];
met_ids_unique = list(set(met_ids));
data_mets_cnt = {};
for met in met_ids_unique:
data_mets_cnt[met] = 0;
for d in data_O:
data_mets_cnt[d['met_id']] += 1;
# add data to the database
if mapping_id_new_I:
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites(data_O);
else:
data_add_O = [];
for d in data_O:
# check to see if the metabolite is already in the database
mapping_row = {};
mapping_row = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,d['met_id']);
if not mapping_row: data_add_O.append(d);
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingMetabolites(data_add_O);
def make_missingReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_rxns_I=[],mapping_id_mets_I=[],mapping_id_new_I=None):
'''Update missing or incomplete reaction mappings for the current mapping from the matching metabolite mappings,
and optionally, from the previous reaction mappings'''
#Note: prior to running, remove all reaction mappings that are not used.
imm = stage02_isotopomer_metaboliteMapping();
data_O = [];
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
#get all reactions in the model:
reactions = [];
reactions = self.stage02_isotopomer_query.get_rows_modelID_dataStage02IsotopomerModelReactions(model_id);
#get mapping ids
if mapping_id_rxns_I and mapping_id_mets_I:
mapping_ids_rxns=mapping_id_rxns_I;
mapping_ids_mets=mapping_id_mets_I;
elif mapping_id_rxns_I:
mapping_ids_rxns=mapping_id_rxns_I;
else:
mapping_rxns=[];
mapping_rxns=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id_rxns in enumerate(mapping_ids_rxns):
missing_reactions_O = [];
missing_metabolites_O = [];
for reaction_cnt,reaction in enumerate(reactions):
#get the current reaction mappings
mapping_rxns = [];
mapping_rxns = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns,reaction['rxn_id']);
#if mapping_rxns: # atom mapping for the reaction already exists and is used
# continue;
if mapping_id_new_I:
mapping_id_current = mapping_id_new_I;
else:
mapping_id_current = mapping_id_rxns;
data_tmp={'mapping_id':mapping_id_current,
'rxn_id':reaction['rxn_id'],
'rxn_description':None,
'reactants_stoichiometry_tracked':[],
'products_stoichiometry_tracked':[],
'reactants_ids_tracked':[],
'products_ids_tracked':[],
'reactants_mapping':[],
'products_mapping':[],
'rxn_equation':reaction['equation'],
'products_elements_tracked':[],
'products_positions_tracked':[],
'reactants_elements_tracked':[],
'reactants_positions_tracked':[],
'used_':True,
'comment_':''};
#check if the reactants or products are tracked
tracked_reactants = [];
for reactant in reaction['reactants_ids']:
tracked_reactant = {};
if mapping_id_mets_I:
tracked_reactant = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_ids_mets[mapping_cnt],reactant);
else:
tracked_reactant = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,reactant);
if tracked_reactant:
tracked_reactants.append(tracked_reactant);
tracked_products = [];
for product in reaction['products_ids']:
tracked_product = {};
if mapping_id_mets_I:
tracked_product = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_ids_mets[mapping_cnt],product);
else:
tracked_product = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id_rxns,product);
if tracked_product:
tracked_products.append(tracked_product);
if tracked_reactants or tracked_products:
#check if the reaction is missing or is missing a tracked metabolite
tracked_reaction = {};
tracked_reaction = self.stage02_isotopomer_query.get_row_mappingIDAndRxnID_dataStage02IsotopomerAtomMappingReactions(mapping_id_rxns,reaction['rxn_id']);
if tracked_reaction:
missing_reactants = [];
# get the stoichiometry for each reactant
tracked_reaction_reactant_ids_stoich = {};
for tracked_reactant_id_cnt,tracked_reactant_id in enumerate(tracked_reaction['reactants_ids_tracked']):
tracked_reaction_reactant_ids_stoich[tracked_reactant_id] = 0;
for tracked_reactant_id_cnt,tracked_reactant_id in enumerate(tracked_reaction['reactants_ids_tracked']):
tracked_reaction_reactant_ids_stoich[tracked_reactant_id] += abs(tracked_reaction['reactants_stoichiometry_tracked'][tracked_reactant_id_cnt]);
#copy existing data
data_tmp['reactants_ids_tracked'].extend(tracked_reaction['reactants_ids_tracked']);
data_tmp['reactants_stoichiometry_tracked'].extend(tracked_reaction['reactants_stoichiometry_tracked']);
data_tmp['reactants_mapping'].extend(tracked_reaction['reactants_mapping']);
data_tmp['reactants_elements_tracked'].extend(tracked_reaction['reactants_elements_tracked']);
data_tmp['reactants_positions_tracked'].extend(tracked_reaction['reactants_positions_tracked']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
for tracked_reactant in tracked_reactants:
if tracked_reactant['met_id'] in tracked_reaction['reactants_ids_tracked']:
# check for matching stoichiometry
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['reactants_ids']):
if met_id == tracked_reactant['met_id']:
reaction_stoich = abs(reaction['reactants_stoichiometry'][met_id_cnt]);
break;
unbalanced_stoich = reaction_stoich - tracked_reaction_reactant_ids_stoich[tracked_reactant['met_id']];
if tracked_reaction_reactant_ids_stoich[tracked_reactant['met_id']] != reaction_stoich:
for stoich_cnt in range(int(unbalanced_stoich)):
missing_reactants.append(tracked_reactant);
#add missing data
data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']);
data_tmp['reactants_stoichiometry_tracked'].append(0);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},stoich_cnt)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['reactants_mapping'].append(new_mapping);
#data_tmp['reactants_mapping'].append('');
data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']);
data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_reactant['met_id']+',';
else:
missing_reactants.append(tracked_reactant);
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['reactants_ids']):
if met_id == tracked_reactant['met_id']:
reaction_stoich = reaction['reactants_stoichiometry'][met_id_cnt];
break;
#add missing data
data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']);
data_tmp['reactants_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['reactants_mapping'].append(new_mapping);
#data_tmp['reactants_mapping'].append('');
data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']);
data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_reactant['met_id']+',';
missing_products = [];
# get the stoichiometry for each product
tracked_reaction_product_ids_stoich = {};
for tracked_product_id_cnt,tracked_product_id in enumerate(tracked_reaction['products_ids_tracked']):
tracked_reaction_product_ids_stoich[tracked_product_id] = 0;
for tracked_product_id_cnt,tracked_product_id in enumerate(tracked_reaction['products_ids_tracked']):
tracked_reaction_product_ids_stoich[tracked_product_id] += abs(tracked_reaction['products_stoichiometry_tracked'][tracked_product_id_cnt]);
#copy existing data
data_tmp['products_ids_tracked'].extend(tracked_reaction['products_ids_tracked']);
data_tmp['products_stoichiometry_tracked'].extend(tracked_reaction['products_stoichiometry_tracked']);
data_tmp['products_mapping'].extend(tracked_reaction['products_mapping']);
data_tmp['products_elements_tracked'].extend(tracked_reaction['products_elements_tracked']);
data_tmp['products_positions_tracked'].extend(tracked_reaction['products_positions_tracked']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
for tracked_product in tracked_products:
if tracked_product['met_id'] in tracked_reaction['products_ids_tracked']:
# check for matching stoichiometry
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['products_ids']):
if met_id == tracked_product['met_id']:
reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]);
break;
unbalanced_stoich = reaction_stoich - tracked_reaction_product_ids_stoich[tracked_product['met_id']];
if tracked_reaction_product_ids_stoich[tracked_product['met_id']] != reaction_stoich:
for stoich_cnt in range(int(unbalanced_stoich)):
missing_products.append(tracked_product);
#add missing data
data_tmp['products_ids_tracked'].append(tracked_product['met_id']);
data_tmp['products_stoichiometry_tracked'].append(0);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},stoich_cnt)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['products_mapping'].append(new_mapping);
#data_tmp['products_mapping'].append('');
data_tmp['products_elements_tracked'].append(tracked_product['met_elements']);
data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_product['met_id']+',';
else:
missing_products.append(tracked_product);
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['products_ids']):
if met_id == tracked_product['met_id']:
reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]);
break;
#add missing data
data_tmp['products_ids_tracked'].append(tracked_product['met_id']);
data_tmp['products_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['products_mapping'].append(new_mapping);
#data_tmp['products_mapping'].append('');
data_tmp['products_elements_tracked'].append(tracked_product['met_elements']);
data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']);
data_tmp['rxn_description']=tracked_reaction['rxn_description'];
data_tmp['used_']=False;
data_tmp['comment_']+=tracked_product['met_id']+',';
if missing_reactants or missing_products:
tmp = {};
tmp = tracked_reaction;
tmp.update({'missing_reactants':missing_reactants});
tmp.update({'missing_products':missing_products});
tmp.update({'equation':reaction['equation']})
missing_metabolites_O.append(tmp);
else:
tmp = {};
tmp = reaction;
tmp.update({'tracked_reactants':tracked_reactants});
tmp.update({'tracked_products':tracked_products});
missing_reactions_O.append(reaction);
for tracked_reactant in tracked_reactants:
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['reactants_ids']):
if met_id == tracked_reactant['met_id']:
reaction_stoich = reaction['reactants_stoichiometry'][met_id_cnt];
break;
#add missing data
data_tmp['reactants_ids_tracked'].append(tracked_reactant['met_id']);
data_tmp['reactants_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_reactant['met_id']:tracked_reactant['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['reactants_mapping'].append(new_mapping);
#data_tmp['reactants_mapping'].append('');
data_tmp['reactants_elements_tracked'].append(tracked_reactant['met_elements']);
data_tmp['reactants_positions_tracked'].append(tracked_reactant['met_atompositions']);
data_tmp['rxn_description']=None;
data_tmp['used_']=False;
data_tmp['comment_']=reaction['rxn_id'];
for tracked_product in tracked_products:
reaction_stoich = 0;
for met_id_cnt,met_id in enumerate(reaction['products_ids']):
if met_id == tracked_product['met_id']:
reaction_stoich = abs(reaction['products_stoichiometry'][met_id_cnt]);
break;
#add missing data
data_tmp['products_ids_tracked'].append(tracked_product['met_id']);
data_tmp['products_stoichiometry_tracked'].append(reaction_stoich);
imm.make_trackedMetabolite(mapping_id_rxns,model_id,{tracked_product['met_id']:tracked_product['met_elements'][0]},0)
new_mapping = imm.convert_arrayMapping2StringMapping();
imm.clear_metaboliteMapping();
data_tmp['products_mapping'].append(new_mapping);
#data_tmp['products_mapping'].append('');
data_tmp['products_elements_tracked'].append(tracked_product['met_elements']);
data_tmp['products_positions_tracked'].append(tracked_product['met_atompositions']);
data_tmp['rxn_description']=None;
data_tmp['used_']=False;
data_tmp['comment_']=reaction['rxn_id'];
data_O.append(data_tmp);
#self.print_missingReactionMappings(missing_reactions_O,missing_metabolites_O);
return missing_reactions_O,missing_metabolites_O;
#add data to the database:
self.stage02_isotopomer_query.add_data_dataStage02IsotopomerAtomMappingReactions(data_O);
def print_missingReactionMappings(self,missing_reactions_I,missing_metabolites_I):
'''print missing reaction mappings to the screen'''
#missing reactions
script = '';
for missing_reaction in missing_reactions_I:
script+= missing_reaction['rxn_id']+'\t'+missing_reaction['equation']+'\t'+str(missing_reaction['reactants_ids'])+'\t'+str(missing_reaction['products_ids'])+'\t';
for tracked_reactant in missing_reaction['tracked_reactants']:
script+= tracked_reactant['met_id']+',';
script+= '\t'
for tracked_product in missing_reaction['tracked_products']:
script+= tracked_product['met_id']+',';
script+='\n'
print(script)
#missing metabolites
script = '';
for missing_metabolite in missing_metabolites_I:
script+= missing_metabolite['rxn_id']+'\t'+missing_metabolite['equation']+'\t'+str(missing_metabolite['reactants_ids_tracked'])+'\t'+str(missing_metabolite['products_ids_tracked'])+'\t';
for tracked_reactant in missing_metabolite['missing_reactants']:
script+= tracked_reactant['met_id']+',';
script+= '\t'
for tracked_product in missing_metabolite['missing_products']:
script+= tracked_product['met_id']+',';
script+='\n'
print(script)
def find_inconsistentMetaboliteMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]):
'''Find inconsistencies in the atom mapping by comparing the metabolite information in
atomMappingMetabolites table to the atom mapping in the atomMappingReactions table'''
#Output:
# data_O = row of atomMappingReactions filled only with the inconsistent metabolite mapping information
# missing_mets_O = metabolites that are tracked in atomMappingReactions, but are not present in atomMappingMetabolites
data_O = [];
missing_mets_O = [];
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
print('checking model_id ' + model_id);
#get mapping ids
if mapping_id_I:
mapping_ids=mapping_id_I;
else:
mapping_ids=[];
mapping_ids=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id in enumerate(mapping_ids):
print('checking mapping_id ' + mapping_id);
# get the reaction mapping
reaction_mappings = [];
reaction_mappings = self.stage02_isotopomer_query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id);
for reaction_cnt,reaction_mapping in enumerate(reaction_mappings):
print('checking reaction ' + reaction_mapping['rxn_id']);
#debug:
if reaction_mapping['rxn_id'] == 'COFACTOR_3':
print('check');
#check reactants
rxn_tmp = {};
rxn_tmp['mapping_id']=mapping_id
rxn_tmp['rxn_id']=reaction_mapping['rxn_id']
rxn_tmp['rxn_description']=reaction_mapping['rxn_description']
rxn_tmp['reactants_stoichiometry_tracked']=[]
rxn_tmp['products_stoichiometry_tracked']=[]
rxn_tmp['reactants_ids_tracked']=[]
rxn_tmp['products_ids_tracked']=[]
rxn_tmp['reactants_elements_tracked']=[]
rxn_tmp['products_elements_tracked']=[]
rxn_tmp['reactants_positions_tracked']=[]
rxn_tmp['products_positions_tracked']=[]
rxn_tmp['reactants_mapping']=[]
rxn_tmp['products_mapping']=[]
rxn_tmp['rxn_equation']=None
rxn_tmp['used_']=True
rxn_tmp['comment_']='Inconsistent metabolites found';
rxn_tmp['reactants_metaboliteMappings']=[]
rxn_tmp['products_metaboliteMappings']=[]
bad_reactant = False;
for reactant_cnt,reactant in enumerate(reaction_mapping['reactants_ids_tracked']):
print('checking reactant ' + reactant);
# get the metabolite mapping
metabolite_mapping = {};
metabolite_mapping = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id,reactant);
if not metabolite_mapping:
print('metabolite mapping not found')
missing_mets_O.append(reactant);
continue;
# check the reaction mapping
reactants_mapping = reaction_mapping['reactants_mapping'][reactant_cnt];
if '[' in reaction_mapping['reactants_mapping'][reactant_cnt]:
reactants_mapping = reaction_mapping['reactants_mapping'][reactant_cnt].split('][');
reactants_mapping = [m.replace('[','') for m in reactants_mapping];
reactants_mapping = [m.replace(']','') for m in reactants_mapping];
if len(metabolite_mapping['met_atompositions']) != len(reactants_mapping):
rxn_tmp['reactants_metaboliteMappings'].append(reaction_mapping['reactants_mapping'][reactant_cnt]);
print('bad reactants_metaboliteMappings');
bad_reactant = True;
# check the reaction elements tracked
if metabolite_mapping['met_atompositions'] != reaction_mapping['reactants_positions_tracked'][reactant_cnt]:
rxn_tmp['reactants_positions_tracked'].append(reaction_mapping['reactants_positions_tracked'][reactant_cnt]);
print('bad reactants_positions_tracked');
bad_reactant = True;
# check the reaction positions tracked
if metabolite_mapping['met_elements'] != reaction_mapping['reactants_elements_tracked'][reactant_cnt]:
rxn_tmp['reactants_elements_tracked'].append(reaction_mapping['reactants_elements_tracked'][reactant_cnt]);
print('bad reactants_elements_tracked');
bad_reactant = True;
if bad_reactant:
rxn_tmp['reactants_ids_tracked'].append(reactant);
rxn_tmp['reactants_stoichiometry_tracked'].append(reaction_mapping['reactants_stoichiometry_tracked'][reactant_cnt]);
#check products
bad_product = False;
for product_cnt,product in enumerate(reaction_mapping['products_ids_tracked']):
print('checking product ' + product);
# get the metabolite mapping
metabolite_mapping = {};
metabolite_mapping = self.stage02_isotopomer_query.get_rows_mappingIDAndMetID_dataStage02IsotopomerAtomMappingMetabolites(mapping_id,product);
if not metabolite_mapping:
print('metabolite mapping not found')
missing_mets_O.append(product);
continue;
# check the reaction mapping
products_mapping = reaction_mapping['products_mapping'][product_cnt];
if '[' in reaction_mapping['products_mapping'][product_cnt]:
products_mapping = reaction_mapping['products_mapping'][product_cnt].split('][');
products_mapping = [m.replace('[','') for m in products_mapping];
products_mapping = [m.replace(']','') for m in products_mapping];
if len(metabolite_mapping['met_atompositions']) != len(products_mapping):
rxn_tmp['products_metaboliteMappings'].append(reaction_mapping['products_mapping'][product_cnt]);
print('bad products_metaboliteMappings');
bad_product = True;
# check the reaction elements tracked
if metabolite_mapping['met_atompositions'] != reaction_mapping['products_positions_tracked'][product_cnt]:
rxn_tmp['products_positions_tracked'].append(reaction_mapping['products_positions_tracked'][product_cnt]);
print('bad products_positions_tracked');
bad_product = True;
# check the reaction positions tracked
if metabolite_mapping['met_elements'] != reaction_mapping['products_elements_tracked'][product_cnt]:
rxn_tmp['products_elements_tracked'].append(reaction_mapping['products_elements_tracked'][product_cnt]);
print('bad products_elements_tracked');
bad_product = True;
if bad_product:
rxn_tmp['products_ids_tracked'].append(product);
rxn_tmp['products_stoichiometry_tracked'].append(reaction_mapping['products_stoichiometry_tracked'][product_cnt]);
#record
if bad_reactant or bad_product:
data_O.append(rxn_tmp);
return data_O,missing_mets_O;
def find_unbalancedReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]):
'''Find reactions mappings that are not elementally balanced'''
#Output:
# unbalanced_rxns_O = {rxn_id:{'n_products_elements_tracked':products_positions_tracked_cnt,
# 'n_reactants_elements_tracked':reactants_positions_tracked_cnt},...}
unbalanced_rxns_O = {};
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
print('checking model_id ' + model_id);
#get mapping ids
if mapping_id_I:
mapping_ids=mapping_id_I;
else:
mapping_ids=[];
mapping_ids=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id in enumerate(mapping_ids):
print('checking mapping_id ' + mapping_id);
# get the reaction mapping
reaction_mappings = [];
reaction_mappings = self.stage02_isotopomer_query.get_rows_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id);
for reaction_cnt,reaction_mapping in enumerate(reaction_mappings):
print('checking reaction ' + reaction_mapping['rxn_id']);
#check reactants
reactants_positions_tracked_cnt = 0;
for reactant_cnt,reactant in enumerate(reaction_mapping['reactants_ids_tracked']):
print('checking reactant ' + reactant);
# check that the reactant positions == reactant elements
if len(reaction_mapping['reactants_positions_tracked'][reactant_cnt])!=len(reaction_mapping['reactants_elements_tracked'][reactant_cnt]):
print('inconsistent reactants_positions and reactants_elements');
continue;
reactants_positions_tracked_cnt += len(reaction_mapping['reactants_positions_tracked'][reactant_cnt]);
#check products
products_positions_tracked_cnt = 0;
for product_cnt,product in enumerate(reaction_mapping['products_ids_tracked']):
print('checking product ' + product);
# check that the product positions == product elements
if len(reaction_mapping['products_positions_tracked'][product_cnt])!=len(reaction_mapping['products_elements_tracked'][product_cnt]):
print('inconsistent products_positions and products_elements');
continue;
products_positions_tracked_cnt += len(reaction_mapping['products_positions_tracked'][product_cnt]);
#record
if reactants_positions_tracked_cnt!=products_positions_tracked_cnt:
unbalanced_rxns_O[reaction_mapping['rxn_id']] = {'n_products_elements_tracked':products_positions_tracked_cnt,
'n_reactants_elements_tracked':reactants_positions_tracked_cnt};
#unbalanced_rxns_O.append(reaction_mapping);
return unbalanced_rxns_O;
def find_inconsistentReactionMappings(self,experiment_id_I,model_id_I=[],mapping_id_I=[]):
'''Find inconsistencies in the reaction mapping'''
#Output:
# unbalanced_rxns_O = {rxn_id:{'n_products_elements_tracked':products_positions_tracked_cnt,
# 'n_reactants_elements_tracked':reactants_positions_tracked_cnt},...}
irm = stage02_isotopomer_reactionMapping();
#get model ids:
if model_id_I:
model_ids = model_id_I;
else:
model_ids = [];
model_ids = self.stage02_isotopomer_query.get_modelID_experimentID_dataStage02IsotopomerSimulation(experiment_id_I);
for model_id in model_ids:
print('checking model_id ' + model_id);
#get mapping ids
if mapping_id_I:
mapping_ids=mapping_id_I;
else:
mapping_ids=[];
mapping_ids=self.stage02_isotopomer_query.get_mappingID_experimentIDAndModelID_dataStage02IsotopomerSimulation(experiment_id_I,model_id);
for mapping_cnt,mapping_id in enumerate(mapping_ids):
print('checking mapping_id ' + mapping_id);
# get the reaction ids
reaction_ids = [];
reaction_ids = self.stage02_isotopomer_query.get_rxnIDs_mappingID_dataStage02IsotopomerAtomMappingReactions(mapping_id);
for reaction_cnt,reaction_id in enumerate(reaction_ids):
print('checking reaction ' + reaction_id);
#check each reaction
irm.get_reactionMapping(mapping_id,reaction_id);
reactants_ids_stoichiometry_check,reactants_elements_positions_check,reactants_elements_mapping_check,reactants_positions_mapping_check,\
products_ids_stoichiometry_check,products_elements_positions_check,products_elements_mapping_check,products_positions_mapping_check,\
element_balance_check,mapping_check = irm.check_reactionMapping();
#clear reaction
irm.clear_reactionMapping();
class isotopomer_netRxns():
def __init__(self):
self.isotopomer_rxns_net = {};
self.isotopomer_rxns_net = self.define_netRxns();
def define_netRxns(self):
isotopomer_rxns_net = {};
isotopomer_rxns_net.update(self.define_netRxns_iDM2014_reversible());
isotopomer_rxns_net.update(self.define_netRxns_RL2013_reversible());
return isotopomer_rxns_net
def define_netRxns_iDM2014_reversible(self):
isotopomer_rxns_net = {
'ptrc_to_4abut_1':{'reactions':['PTRCTA','ABUTD'],
'stoichiometry':[1,1]},
'ptrc_to_4abut_2':{'reactions':['GGPTRCS','GGPTRCO','GGGABADr','GGGABAH'],
'stoichiometry':[1,1,1,1]},
'glu_DASH_L_to_acg5p':{'reactions':['ACGS','ACGK'],
'stoichiometry':[1,1]},
'2obut_and_pyr_to_3mop':{'reactions':['ACHBS','KARA2','DHAD2'],
'stoichiometry':[1,1,1]},
'pyr_to_23dhmb':{'reactions':['ACLS','KARA1'],
'stoichiometry':[1,-1]},
#'met_DASH_L_and_ptrc_to_spmd_and_5mta':{'reactions':['METAT','ADMDC','SPMS'],
# 'stoichiometry':[1,1,1]}, #cannot be lumped
'chor_and_prpp_to_3ig3p':{'reactions':['ANS','ANPRT','PRAIi','IGPS'],
'stoichiometry':[1,1,1,1]},
'hom_DASH_L_and_cyst_DASH_L_to_pyr_hcys_DASH_L':{'reactions':['HSST','SHSL1','CYSTL'],
'stoichiometry':[1,1,1]},
'e4p_and_pep_to_3dhq':{'reactions':['DDPA','DHQS'],
'stoichiometry':[1,1]},
'aspsa_to_sl2a6o':{'reactions':['DHDPS','DHDPRy','THDPS'],
'stoichiometry':[1,1,1]},
'glu_DASH_L_to_glu5sa':{'reactions':['GLU5K','G5SD'],
'stoichiometry':[1,1]},
'g1p_to_glycogen':{'reactions':['GLGC','GLCS1'],
'stoichiometry':[1,1]},
'thr_DASH_L_to_gly':{'reactions':['THRD','GLYAT'],
'stoichiometry':[1,-1]}, #need to remove deadend mets: athr-L: ATHRDHr, ATHRDHr_reverse; aact: AACTOOR, AOBUTDs
'dhap_to_lac_DASH_D':{'reactions':['MGSA','LGTHL','GLYOX'],
'stoichiometry':[1,1,1]},
'hom_DASH_L_to_thr_DASH_L':{'reactions':['HSK','THRS'],
'stoichiometry':[1,1]},
'3pg_to_ser_DASH_L':{'reactions':['PGCD','PSERT','PSP_L'],
'stoichiometry':[1,1,1]},
'prpp_to_his_DASH_L':{'reactions':['ATPPRT','PRATPP','PRAMPC','PRMICI','IG3PS','IGPDH','HSTPT','HISTP','HISTD'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'UMPSYN_aerobic':{'reactions':['ASPCT','DHORTS','DHORD2','ORPT','OMPDC'],
'stoichiometry':[1,-1,1,-1,1]},
#'UMPSYN_anaerobic':{'reactions':['ASPCT','DHORTS','DHORD5','ORPT','OMPDC'],
# 'stoichiometry':[1,-1,1,-1,1]},
'IMPSYN_1':{'reactions':['GLUPRT','PRAGSr','PRFGS','PRAIS'],
'stoichiometry':[1,1,1,1]},
'IMPSYN_2':{'reactions':['AIRC2','AIRC3','PRASCSi','ADSL2r'],
'stoichiometry':[1,-1,1,1]},
'IMPSYN_3':{'reactions':['AICART','IMPC'],
'stoichiometry':[1,-1]},
'imp_to_gmp':{'reactions':['IMPD','GMPS2'],
'stoichiometry':[1,1]},
'imp_to_amp':{'reactions':['ADSS','ADSL1r'],
'stoichiometry':[1,1]},
#'utp_to_dump_anaerobic':{'reactions':['RNTR4c2','DUTPDP'],
# 'stoichiometry':[1,1]},
'udp_to_dump_aerobic':{'reactions':['RNDR4','NDPK6','DUTPDP'],
'stoichiometry':[1,1,1]},
#'dtmp_to_dttp':{'reactions':['DTMPK','NDPK4'],
# 'stoichiometry':[1,1]}, #cannot be lumped
'COASYN':{'reactions':['ASP1DC','MOHMT','DPR','PANTS','PNTK','PPNCL2','PPCDC','PTPATi','DPCOAK'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'FADSYN_1':{'reactions':['GTPCII2','DHPPDA2','APRAUR','PMDPHT','RBFSb'],
'stoichiometry':[1,1,1,1,1]},
'FADSYN_2':{'reactions':['RBFSa','DB4PS'],
'stoichiometry':[1,1]},
'FADSYN_3':{'reactions':['RBFK','FMNAT'],
'stoichiometry':[1,1]},
'NADSYN_aerobic':{'reactions':['ASPO6','QULNS','NNDPR','NNATr','NADS1','NADK'],
'stoichiometry':[1,1,1,1,1,1]},
'NADSYN_anaerobic':{'reactions':['ASPO5','QULNS','NNDPR','NNATr','NADS1','NADK'],
'stoichiometry':[1,1,1,1,1,1]},
#'NADSALVAGE':{'reactions':['NADPPPS','NADN','NNAM','NAMNPP','NMNN','NMNDA','NMNAT','NADDP','ADPRDP'],
# 'stoichiometry':[1,1,1,1,1,1,1,1,1]}, #cannot be lumped
'THFSYN':{'reactions':['GTPCI','DNTPPA','DNMPPA','DHNPA2r','HPPK2','ADCS','ADCL','DHPS2','DHFS'],
'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'GTHSYN':{'reactions':['GLUCYS','GTHS'],
'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_1':{'reactions':['DASYN181','AGPAT181','G3PAT181'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_2':{'reactions':['PSSA181','PSD181'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_3':{'reactions':['PGSA160','PGPP160'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_4':{'reactions':['DASYN161','AGPAT161','G3PAT161'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_5':{'reactions':['PGSA181','PGPP181'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_6':{'reactions':['PSD161','PSSA161'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_7':{'reactions':['PSSA160','PSD160'],'stoichiometry':[1,1]},
'GLYCPHOSPHOLIPID_8':{'reactions':['DASYN160','AGPAT160','G3PAT160'],'stoichiometry':[1,1,1]},
'GLYCPHOSPHOLIPID_9':{'reactions':['PGSA161','PGPP161'],'stoichiometry':[1,1]},
'MOLYBDOPTERIN_1':{'reactions':['MPTAT','MPTS','CPMPS'],'stoichiometry':[1,1,1]},
'MOLYBDOPTERIN_2':{'reactions':['MOCDS','MOGDS'],'stoichiometry':[1,1]},
'MOLYBDOPTERIN_3':{'reactions':['MOADSUx','MPTSS'],'stoichiometry':[1,1]},
'COFACTOR_1':{'reactions':['GLUTRR','G1SAT','GLUTRS'],'stoichiometry':[1,1,1]},
'COFACTOR_2':{'reactions':['DHNAOT4','UPPDC1','DHNCOAT','DHNCOAS','SEPHCHCS','SUCBZS','SUCBZL','PPPGO3','FCLT','CPPPGO','SHCHCS3'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]},
'COFACTOR_3':{'reactions':['TYRL','AMMQLT8','HEMEOS','UPP3MT','SHCHD2','SHCHF','ENTCS','CBLAT'],'stoichiometry':[1,1,1,1,1,1,1,1]},
'VITB6':{'reactions':['E4PD','PERD','OHPBAT','PDX5PS','PDX5PO2'],'stoichiometry':[1,1,1,1,1]},
#'THIAMIN':{'reactions':['AMPMS2','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]}, # original pathway without correction
'THIAMIN':{'reactions':['AMPMS3','PMPK','THZPSN3','TMPPP','TMPK'],'stoichiometry':[1,1,1,1,1]},
'COFACTOR_4':{'reactions':['I4FE4ST','I4FE4SR','I2FE2SS2'],'stoichiometry':[1,1,1]},
'COFACTOR_5':{'reactions':['BMOGDS1','BMOGDS2','BMOCOS'],'stoichiometry':[1,1,1]},
'COFACTOR_6':{'reactions':['DMPPS','GRTT','DMATT'],'stoichiometry':[1,1,1]},
'COFACTOR_7':{'reactions':['MECDPS','DXPRIi','MEPCT','CDPMEK','MECDPDH5'],'stoichiometry':[1,1,1,1,1]},
'COFACTOR_8':{'reactions':['LIPOS','LIPOCT'],'stoichiometry':[1,1]},
'COFACTOR_9':{'reactions':['OMMBLHX','OMPHHX','OPHHX','HBZOPT','DMQMT','CHRPL','OMBZLM','OPHBDC','OHPHM'],'stoichiometry':[1,1,1,1,1,1,1,1,1]},
'COFACTOR_10':{'reactions':['SERASr','DHBD','UPP3S','HMBS','ICHORT','DHBS'],'stoichiometry':[1,1,1,1,1,1]},
'COFACTOR_11':{'reactions':['PMEACPE','EGMEACPR','DBTS','AOXSr2','I2FE2SR','OPMEACPD','MALCOAMT','AMAOTr','OPMEACPS','OPMEACPR','OGMEACPD','OGMEACPR','OGMEACPS','EPMEACPR','BTS5'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_1':{'reactions':['UAMAGS','UAPGR','UAGPT3','PAPPT3','GLUR','UAGCVT','UAMAS','UDCPDP','UGMDDS','UAAGDS'],'stoichiometry':[1,1,1,1,-1,1,1,1,1,1]},
'CELLENV_2':{'reactions':['3HAD181','3OAR181','3OAS181','EAR181x'],'stoichiometry':[1,1,1,1]},
'CELLENV_3':{'reactions':['3HAD160','3OAR160','EAR160x','3OAS160'],'stoichiometry':[1,1,1,1]},
'CELLENV_4':{'reactions':['EAR120x','3OAR120','3HAD120','3OAS120','EAR100x'],'stoichiometry':[1,1,1,1,1]},
'CELLENV_5':{'reactions':['G1PACT','UAGDP','PGAMT','GF6PTA'],'stoichiometry':[1,1,-1,1]},
'CELLENV_6':{'reactions':['3OAR40','EAR40x','3OAS60','3OAR60','3HAD80','3OAS80','3OAR80','EAR60x','3HAD60','EAR80x','3HAD40'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_7':{'reactions':['3HAD161','EAR161x','3OAS161','3OAR161','3OAS141','3HAD141','3OAR121','EAR121x','3HAD121','EAR141x','T2DECAI','3OAR141','3OAS121'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1]},
'CELLENV_8':{'reactions':['TDPGDH','TDPDRR','TDPDRE','G1PTT'],'stoichiometry':[1,1,1,1]},
'CELLENV_9':{'reactions':['3OAS140','3OAR140'],'stoichiometry':[1,1]},
'CELLENV_10':{'reactions':['3HAD140','EAR140x'],'stoichiometry':[1,1]},
'CELLENV_11':{'reactions':['3OAR100','3HAD100','3OAS100'],'stoichiometry':[1,1,1]},
'LIPOPOLYSACCHARIDE_1':{'reactions':['COLIPAabcpp','COLIPAabctex','EDTXS1','EDTXS2','GALT1','GLCTR1','GLCTR2','GLCTR3','HEPK1','HEPK2','HEPT1','HEPT2','HEPT3','HEPT4','LPADSS','MOAT','MOAT2','MOAT3C','RHAT1','TDSK','USHD'],'stoichiometry':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]},
'LIPOPOLYSACCHARIDE_2':{'reactions':['AGMHE','GMHEPAT','GMHEPK','GMHEPPA','S7PI'],'stoichiometry':[1,1,1,1,1]},
'LIPOPOLYSACCHARIDE_3':{'reactions':['U23GAAT','UHGADA','UAGAAT'],'stoichiometry':[1,1,1]},
'LIPOPOLYSACCHARIDE_4':{'reactions':['KDOPP','KDOCT2','KDOPS'],'stoichiometry':[1,1,1]},
'ASTPathway':{'reactions':['AST','SADH','SGDS','SGSAD','SOTA'],'stoichiometry':[1,1,1,1,1]}
};
return isotopomer_rxns_net
def define_netRxns_RL2013_reversible(self):
isotopomer_rxns_net = {
'PTAr_ACKr_ACS':{'reactions':['PTAr','ACKr','ACS'],
'stoichiometry':[1,-1,-1]}, #acetate secretion
'ACONTa_ACONTb':{'reactions':['ACONTa','ACONTb'],
'stoichiometry':[1,1]},
'G6PDH2r_PGL':{'reactions':['G6PDH2r','PGL'],
'stoichiometry':[1,1]},
'GAPD_PGK':{'reactions':['GAPD','PGK'], #glycolysis
'stoichiometry':[1,-1]},
'PGM':{'reactions':['PGM','ENO'], #glycolysis
'stoichiometry':[-1,1]},
'SUCCOAS':{'reactions':['SUCOAS'], #mispelling
'stoichiometry':[1]}
#TODO: amino acid synthesis reactions
};
return isotopomer_rxns_net;
class isotopomer_fluxSplits():
def __init__(self):
self.isotopomer_splits = {};
self.isotopomer_splits = self.define_fluxSplits();
def define_fluxSplits(self):
isotopomer_splits = {};
isotopomer_splits['g6p_2_f6p_or_6pgc']=['PGI','G6PDH2r'];
isotopomer_splits['6pgc_2_2ddg6p_or_ru5p-D']=['EDD','GND'];
isotopomer_splits['pep_2_oaa_or_pyr']=['PPC','PYK','GLCptspp'];
isotopomer_splits['accoa_2_ac_or_cit']=['PTAr','CS'];
isotopomer_splits['icit_2_akg_or_glx']=['ICDHyr','ICL'];
isotopomer_splits['glc-D_2_g6p']=['HEX1','GLCptspp'];
isotopomer_splits['mal-L_2_oaa_or_pyr']=['ME1','ME2','MDH'];
return isotopomer_splits
|
[
"dmccloskey87@gmail.com"
] |
dmccloskey87@gmail.com
|
4f2cdd1eb56bda921db71669d39b4bbdaf4062e4
|
82dafd9b89abdf334420e50f9d7562984aed8a7d
|
/cifar10_models/senet.py
|
a6f47305812f4ead441c3208f43d2a499c2c5841
|
[] |
no_license
|
mostafaelhoushi/tensor-decompositions
|
844aaed58abeb1e17923860a5e9aebed64465030
|
8c3186dfc4d5d2eb22b0a673e3eaf1bcaa872feb
|
refs/heads/master
| 2020-07-09T03:51:30.214582
| 2020-05-02T12:46:00
| 2020-05-02T12:46:00
| 203,867,675
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,086
|
py
|
'''SENet in PyTorch.
SENet is the winner of ImageNet-2017. The paper is not released yet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['senet18']
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w
out += shortcut
return out
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SENet18():
return SENet(PreActBlock, [2,2,2,2])
def senet18():
return SENet18()
def test():
net = SENet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
|
[
"m.elhoushi@ieee.org"
] |
m.elhoushi@ieee.org
|
fb7248f9ab1b81c3bee297715b6eed6deb7193f3
|
b2f6b65cba891f3a86e507d4dd312936517ab139
|
/utils/modelsize.py
|
213406ce9a9a0c028c54e6939f32b41239f2d85d
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
leeesangwon/CGNet
|
2822d288355e8a535a780c4a6e850608467465dc
|
d07c0e84d252bed9cbc28e66da4b85bdcc4c6293
|
refs/heads/master
| 2020-04-14T04:48:48.532572
| 2019-05-09T13:08:26
| 2019-05-09T13:08:26
| 163,646,131
| 1
| 0
|
MIT
| 2019-05-09T13:08:28
| 2018-12-31T06:45:11
|
Python
|
UTF-8
|
Python
| false
| false
| 2,602
|
py
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
class SizeEstimator(object):
def __init__(self, model, input_size=(1,1,32,32), bits=32):
'''
Estimates the size of PyTorch models in memory
for a given input size
'''
self.model = model
self.input_size = input_size
self.bits = 32
return
def get_parameter_sizes(self):
'''Get sizes of all parameters in `model`'''
mods = list(self.model.modules())
sizes = []
for i in range(1,len(mods)):
m = mods[i]
p = list(m.parameters())
for j in range(len(p)):
sizes.append(np.array(p[j].size()))
self.param_sizes = sizes
return
def get_output_sizes(self):
'''Run sample input through each layer to get output sizes'''
input_ = Variable(torch.FloatTensor(*self.input_size), volatile=True)
mods = list(self.model.modules())
out_sizes = []
for i in range(1, len(mods)):
m = mods[i]
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
self.out_sizes = out_sizes
return
def calc_param_bits(self):
'''Calculate total number of bits to store `model` parameters'''
total_bits = 0
for i in range(len(self.param_sizes)):
s = self.param_sizes[i]
bits = np.prod(np.array(s))*self.bits
total_bits += bits
self.param_bits = total_bits
return
def calc_forward_backward_bits(self):
'''Calculate bits to store forward and backward pass'''
total_bits = 0
for i in range(len(self.out_sizes)):
s = self.out_sizes[i]
bits = np.prod(np.array(s))*self.bits
total_bits += bits
# multiply by 2 for both forward AND backward
self.forward_backward_bits = (total_bits*2)
return
def calc_input_bits(self):
'''Calculate bits to store input'''
self.input_bits = np.prod(np.array(self.input_size))*self.bits
return
def estimate_size(self):
'''Estimate model size in memory in megabytes and bits'''
self.get_parameter_sizes()
self.get_output_sizes()
self.calc_param_bits()
self.calc_forward_backward_bits()
self.calc_input_bits()
total = self.param_bits + self.forward_backward_bits + self.input_bits
total_megabytes = (total/8)/(1024**2)
return total_megabytes, total
|
[
"874314714@qq.com"
] |
874314714@qq.com
|
46b305d71e12ec7393424848fdb3b864a16ff25c
|
c2a168ec9e91415eeadd53ba6042e614c3e8460c
|
/benchmark_features/hpopt_1/hpop_test_1/ht_13.py
|
c6733fb7f930bc4ee0b82563d4b43470ae436f78
|
[] |
no_license
|
LiYanChalmers/BoschProductionLine
|
530098a9de0d08332511b24a31cdd4b4ec5473fb
|
de864e55be0e8cd174ccacb06afc77e3dc9ec42a
|
refs/heads/master
| 2020-03-21T20:29:14.134812
| 2018-09-03T08:10:08
| 2018-09-03T08:10:08
| 139,010,159
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
# -*- coding: utf-8 -*-
"""
Template for CV parameter search
Tasks:
1. CV
2. Train model
3. Predict on test set
4. Save
a. CV results
b. models trained in CV
c. model trained on the whole train set
d. predictions on test set
To-do:
1. Use models in CV to predict on test set, and save the predictions
a. Rewrite the CV function
b. Overhead of prediction should be small
c. RAM requirement should be small if #columns is not too large
d. In some cases, may need many columns, RAM requirement may be high.
So not implementing this idea now.
"""
import sys
sys.path.insert(0, 'bosch_helper')
from bosch_helper import *
#%% Set parameter
param_id = 13
random_state = 90859
param = {'subsample': 0.95, 'silent': 1, 'objective': 'binary:logistic', 'nthread': 20, 'min_child_weight': 5.5, 'max_depth': 15, 'lambda': 4, 'eta': 0.025, 'colsample_bytree': 0.5, 'booster': 'gbtree', 'base_score': 0.0058, 'alpha': 0}
np.random.seed(random_state)
#%% Load data
x = pd.read_hdf('numeric_b1_b8_nf149_1.hdf', 'x')
y_train = pd.read_hdf('numeric_b1_b8_nf149_1.hdf', 'y_train')
x_train = x.loc['train']
x_test = x.loc['test']
#%%
cv_results, clfs, running_time = \
cross_val_predict_skf_rm_xgb(param, x_train, y_train,
num_boost_round=80,
n_splits=5,
n_repeats=3,
random_state=np.random.randint(10**6),
verbose_eval=True)
results = {'clfs_cv': clfs, 'results_cv': cv_results, 'running_time_cv': running_time}
#%% Train on model
dtrain = xgb.DMatrix(x_train, label=y_train)
param['seed'] = np.random.randint(10**6)
clf = xgb.train(param, dtrain,
num_boost_round=60,
feval=mcc_eval, evals=[(dtrain, 'train')])
y_train_pred = clf.predict(dtrain)
# Find best threshold
thresholds = np.linspace(0.01, 0.99, 400)
mcc = np.array([matthews_corrcoef(y_train, y_train_pred>thr) for thr in thresholds])
best_threshold = thresholds[mcc.argmax()]
results['best_threshold_train'] = best_threshold
results['mcc_max_train'] = mcc.max()
results['clf_train'] = clf
#%% Predict on test set
dtest = xgb.DMatrix(x_test)
y_test_pred = clf.predict(dtest)
y_test_pred_int = (y_test_pred>best_threshold).astype(int)
sub = pd.read_csv("sample_submission.csv.zip", index_col=0)
sub["Response"] = y_test_pred_int
sub.to_csv('ht_13.csv.gz', compression='gzip')
results['y_test_pred_prob'] = y_test_pred
results['y_test_pred_int'] = y_test_pred_int
save_pickle(results, 'ht_13.pickle')
|
[
"li.yan.chalmers@gmail.com"
] |
li.yan.chalmers@gmail.com
|
6d27c8039a8ce6ca14e65e11999fb3c5304f2563
|
ef4a4c8de95516700134a45800238de9298e1485
|
/zadacha3.py
|
ccb6d7317053767af297787dfcc42f5ddf4e9f3a
|
[] |
no_license
|
nikolaj74-hub/lessons
|
a45d67d380982d245f5950fe6eef3041c7ffbd2e
|
54437b8e8063668017d7e29612c0623adb8fce94
|
refs/heads/master
| 2023-01-23T19:11:18.680790
| 2020-12-04T13:46:02
| 2020-12-04T13:46:02
| 311,939,032
| 1
| 0
| null | 2020-12-04T13:42:39
| 2020-11-11T10:38:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,624
|
py
|
# Реализовать базовый класс Worker (работник), в котором определить атрибуты: name,
# surname, position (должность), income (доход). Последний атрибут должен быть
# защищенным и ссылаться на словарь, содержащий элементы: оклад и премия, например,
# {"wage": wage, "bonus": bonus}. Создать класс Position (должность) на базе класса Worker.
# В классе Position реализовать методы получения полного имени сотрудника (get_full_name) и
# дохода с учетом премии (get_total_income). Проверить работу примера на реальных данных
# (создать экземпляры класса Position, передать данные, проверить значения атрибутов,
# вызвать методы экземпляров
class Worker:
def __init__(self, n, sn, pos, w, b):
self.name = n
self.surname = sn
self.position = pos
self.incom = {"wage": w, "bonus": b}
class Position(Worker):
def get_full_name(self):
print(f'{self.name + " " + self.surname}')
def get_full_incom(self):
print(f'доход ={sum(self.incom.values())} тугр.')
a = Position('коля', 'трофимов', 'слесарь', 30000, 300)
print(a.name)
print(a.incom)
print(a.surname)
print(a.position)
a.get_full_name()
a.get_full_incom()
|
[
"noreply@github.com"
] |
noreply@github.com
|
d2f27c55bbc9eed109b72828c5be2aad86fb4cd3
|
3cd680e0372f942affeb948eedca8e08d9bfb743
|
/22.py
|
c9a9f60726386d6ffe5ecf4bcdc7f5f02fe04839
|
[] |
no_license
|
ug2454/PythonPractice
|
cb507e380b32ecba14b355a3bd60769a4682b4ab
|
cbf7211e00d46f166246d5932661a6f110cc1cf0
|
refs/heads/master
| 2022-11-09T03:52:57.971095
| 2020-06-14T12:11:51
| 2020-06-14T12:11:51
| 272,194,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
import max
numbers=[19,20,30]
print(max.max(numbers))
|
[
"u.garg14@gmail.com"
] |
u.garg14@gmail.com
|
3d6c10f42425778b851063b600ddb7ceddf3622d
|
161e4fad71b23ac5514f8cc8c04b97ff29039cf2
|
/Array/Buy_Sell_Stock.py
|
7ca3171b5ee36527ea4e438f7ffb002bbdda2c3b
|
[] |
no_license
|
yash872/PyDsa
|
726d43a0730e9143593327f180fab3eb3367d281
|
a3046231c466f2ec5cae94129d2c15d21a082b86
|
refs/heads/main
| 2023-03-06T12:12:49.731899
| 2021-02-04T17:14:28
| 2021-02-04T17:14:28
| 332,211,139
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 978
|
py
|
'''
Best Time to Buy and Sell Stock
You are given an array prices where prices[i] is the price of a given stock on the ith day.
You want to maximize your profit by choosing a single day to buy one stock and choosing a different day in the future to sell that stock.
Return the maximum profit you can achieve from this transaction. If you cannot achieve any profit, return 0.
Example 1:
Input: prices = [7,1,5,3,6,4]
Output: 5
Explanation: Buy on day 2 (price = 1) and sell on day 5 (price = 6), profit = 6-1 = 5.
Note that buying on day 2 and selling on day 1 is not allowed because you must buy before you sell.
'''
#------------------------------
# Time-> O(N) | Space-> O(1)
#------------------------------
class Solution:
def maxProfit(self, prices: List[int]) -> int:
min_so_far = float('inf')
profit = 0
for price in prices:
profit = max(profit,price-min_so_far)
min_so_far = min(min_so_far,price)
return profit
|
[
"noreply@github.com"
] |
noreply@github.com
|
5a3a47716a461cf0fbff4da09be385c1328fc34e
|
66d915e0d9c0016d5bbb22946539b81866fecb45
|
/Soma de numeros1.py
|
75775660ac4254c3804a653dfe04dfded9325d39
|
[
"MIT"
] |
permissive
|
SricardoSdSouza/Curso-da-USP
|
62e00a820b218cce24fb46ef89debd8f786ce66a
|
6198c509c52bf6132f904cded2e12ae941f2b973
|
refs/heads/main
| 2023-06-02T00:19:53.006210
| 2021-06-14T19:57:30
| 2021-06-14T19:57:30
| 376,927,156
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
import math
numero = int(input('numero: '))
n=int(numero)
if numero > 0:
soma = 0
while numero != 0:
resto = numero % 10
numero = (numero - resto) // 10
soma = soma + resto
print("A soma dos números(",n,")é = ",soma)
else:
print('Número invalido...')
|
[
"SricardoSdSouza@yahoo.com.br"
] |
SricardoSdSouza@yahoo.com.br
|
c177f0da14bb7731c15a9e25ad35b2bb78f5ca63
|
3d2192385e65889d20b74742755f5369d0d09161
|
/stock_colis/models/__init__.py
|
da8dece232489928427446f10dfd1d1af8ea259d
|
[] |
no_license
|
FIDINGSARL/audoune
|
9ba746a9d7424a41f8775a6e30f42f2a97224edf
|
39cecd44497d5fa227cc594a6bf5807eb14976d3
|
refs/heads/main
| 2023-06-18T09:49:13.778878
| 2021-06-30T15:06:51
| 2021-06-30T15:06:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 72
|
py
|
# -*- coding: utf-8 -*-
from . import stock_colis, stock_colis_request
|
[
"macbook@MacBook-Pro-de-MacBook.local"
] |
macbook@MacBook-Pro-de-MacBook.local
|
18d5a691ca86297e0db6536e331fc046f0aedd4b
|
9d53da8fbd6d6760fb652e84687cf73ef1f3034d
|
/model/EventPointNetpp/nets.py
|
b5fdb7fc10cad171466eb6ce22481815099f0d63
|
[] |
no_license
|
HowoongJun/localfeature
|
8a944256738e7f10f5e0564c499bf88afaf006ba
|
0d17fca75d2f67c33652710250c3d0f07d7c8970
|
refs/heads/main
| 2023-08-27T19:57:10.071631
| 2021-10-28T06:53:30
| 2021-10-28T06:53:30
| 340,907,081
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,301
|
py
|
###
#
# @Brief nets.py
# @Details EventPointNetPP network
# @Org Robot Learning Lab(https://rllab.snu.ac.kr), Seoul National University
# @Author Howoong Jun (howoong.jun@rllab.snu.ac.kr)
# @Date Sep. 01, 2021
# @Version v0.1
#
###
import torch
class CEventPointNetPP(torch.nn.Module):
def __init__(self):
super(CEventPointNetPP, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.conv1_1 = torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1)
self.conv1_2 = torch.nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1)
self.conv2_1 = torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv2_2 = torch.nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv3_1 = torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv3_2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv4_1 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv4_2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.convDsc1 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.convDsc2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.convKp1 = torch.nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.convKp2 = torch.nn.Conv2d(256, 65, kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = self.relu(self.conv1_1(x))
x = self.relu(self.conv1_2(x))
x = self.pool(x)
x = self.relu(self.conv2_1(x))
x = self.relu(self.conv2_2(x))
x = self.pool(x)
x = self.relu(self.conv3_1(x))
x = self.relu(self.conv3_2(x))
x = self.pool(x)
x = self.relu(self.conv4_1(x))
x = self.relu(self.conv4_2(x))
kpt = self.relu(self.convKp1(x))
kpt = self.convKp2(kpt)
desc = self.relu(self.convDsc1(x))
desc = self.convDsc2(desc)
descNorm = torch.norm(desc, p=2, dim=1)
desc = desc.div(torch.unsqueeze(descNorm, 1))
return kpt, desc
|
[
"prestoxic@gmail.com"
] |
prestoxic@gmail.com
|
11ffda3f83c07b96012a29d6f0df3a67e7760664
|
c15d1e6e8396278aaf495a8f6949514791b6b2cb
|
/clonality.py
|
5dea01b243347bfdddede61957f680797167d117
|
[] |
no_license
|
KnightsDiagnosticsLab/PeakFinder
|
c27615ac1179010e273511d4d4663f1966a06745
|
cc9ea1e0e6b0d7c1eacde482fa72c965049f2e09
|
refs/heads/master
| 2020-09-04T17:12:55.374919
| 2020-01-29T01:07:24
| 2020-01-29T01:07:24
| 219,824,905
| 1
| 0
| null | 2020-01-28T19:22:52
| 2019-11-05T18:36:57
|
Python
|
UTF-8
|
Python
| false
| false
| 27,235
|
py
|
#!/usr/bin/env python3
# Importing Packages
import os
import sys
import re
import pandas as pd
import numpy as np
from scipy.signal import find_peaks, peak_prominences, peak_widths
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
from itertools import combinations
from outliers import smirnov_grubbs as grubbs
from bokeh.io import output_file, show, save
from bokeh.layouts import column
from bokeh.plotting import figure
from bokeh.models import BoxAnnotation, Label, Range1d, WheelZoomTool, ResetTool, PanTool, LegendItem, Legend
from bokeh.core.validation.warnings import FIXED_SIZING_MODE
from bokeh.core.validation import silence
import easygui
from convert_fsa_to_csv import convert_folder
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 50)
TOOLTIPS = [("(x,y)", "($x{1.1}, $y{int})")]
silence(FIXED_SIZING_MODE, True)
channels_of_interest = {
'IGH-A_channel_1': 'blue',
'IGH-B_channel_1': 'blue',
'IGH-C_channel_2': 'green',
'IGK-A_channel_1': 'blue',
'IGK-B_channel_1': 'blue',
'TCRB-A_channel_1': 'blue',
'TCRB-A_channel_2': 'green',
'TCRB-B_channel_1': 'blue',
'TCRB-C_channel_1': 'blue',
'TCRB-C_channel_2': 'green',
'TCRB-C_channel_3': 'orange',
'TCRG-A_channel_1': 'blue',
'TCRG-A_channel_2': 'green',
'TCRG-B_channel_1': 'blue',
'TCRG-B_channel_2': 'green',
'SCL_channel_1': 'black',
'IGH-A_channel_1_repeat': 'blue',
'IGH-B_channel_1_repeat': 'blue',
'IGH-C_channel_2_repeat': 'green',
'IGK-A_channel_1_repeat': 'blue',
'IGK-B_channel_1_repeat': 'blue',
'TCRB-A_channel_1_repeat': 'blue',
'TCRB-A_channel_2_repeat': 'green',
'TCRB-B_channel_1_repeat': 'blue',
'TCRB-C_channel_1_repeat': 'blue',
'TCRB-C_channel_2_repeat': 'green',
'TCRG-A_channel_1_repeat': 'blue',
'TCRG-A_channel_2_repeat': 'green',
'TCRG-B_channel_1_repeat': 'blue',
'TCRG-B_channel_2_repeat': 'green',
'SCL_channel_1_repeat': 'black'
}
roi_clonality = {
'IGH-A_channel_1': [(310, 360, 'FR1-JH', 'blue')],
'IGH-B_channel_1': [(250, 295, 'FR2-JH', 'blue')],
'IGH-C_channel_2': [(100, 170, 'FR3-JH', 'blue')],
'IGK-A_channel_1': [(120, 160, 'Vκ-Jκ-1', 'blue'), (190, 210, 'Vκ-Jκ-2', 'green'), (260, 300, 'Vκ-Jκ-3', 'red')],
'IGK-B_channel_1': [(210, 250, 'Vκ-Kde-1', 'blue'), (270, 300, 'Vκ-Kde-2', 'green'), (350, 390, 'Vκ-Kde-3', 'red')],
'TCRB-A_channel_1': [(240, 285, 'Vβ_Jβ_Jβ2.X', 'blue')],
'TCRB-A_channel_2': [(240, 285, 'Vβ_Jβ_Jβ1.X', 'blue')],
'TCRB-B_channel_1': [(240, 285, 'Vβ_Jβ2', 'blue')],
'TCRB-C_channel_1': [(170, 210, 'Dβ_Jβ_Dβ2', 'blue'), (285, 325, 'Dβ_Jβ_Dβ1', 'green')],
'TCRB-C_channel_2': [(170, 210, 'Dβ_Jβ_Dβ2', 'blue'), (285, 325, 'Dβ_Jβ_Dβ1', 'green')],
'TCRG-A_channel_1': [(175, 195, 'Vγ10_Jγ1.1_2.1', 'blue'), (230, 255, 'Vγ1-8_Jγ1.1_2.1', 'green')],
'TCRG-A_channel_2': [(145, 175, 'Vγ10_Jγ1.3_2.3', 'blue'), (195, 230, 'Vγ1-8_Jγ1.3_2.3', 'green')],
'TCRG-B_channel_1': [(110, 140, 'Vγ11_Jγ1.1_2.1', 'blue'), (195, 220, 'Vγ9_Jγ1.1_2.1', 'green')],
'TCRG-B_channel_2': [(80, 110, 'Vγ11_Jγ2.1_2.3', 'blue'), (160, 195, 'Vγ9_Jγ1.3_2.3', 'green')],
'IGH-A_channel_1_repeat': [(310, 360, 'FR1-JH', 'blue')],
'IGH-B_channel_1_repeat': [(250, 295, 'FR2-JH', 'blue')],
'IGH-C_channel_2_repeat': [(100, 170, 'FR3-JH', 'blue')],
'IGK-A_channel_1_repeat': [(120, 160, 'Vκ-Jκ-1', 'blue'), (190, 210, 'Vκ-Jκ-2', 'green'), (260, 300, 'Vκ-Jκ-3', 'red')],
'IGK-B_channel_1_repeat': [(210, 250, 'Vκ-Kde-1', 'blue'), (270, 300, 'Vκ-Kde-2', 'green'), (350, 390, 'Vκ-Kde-3', 'red')],
'TCRB-A_channel_1_repeat': [(240, 285, 'Vβ_Jβ_Jβ2.X', 'blue')],
'TCRB-A_channel_2_repeat': [(240, 285, 'Vβ_Jβ_Jβ1.X', 'blue')],
'TCRB-B_channel_1_repeat': [(240, 285, 'Vβ_Jβ2', 'blue')],
'TCRB-C_channel_1_repeat': [(170, 210, 'Dβ_Jβ_Dβ2', 'blue'), (285, 325, 'Dβ_Jβ_Dβ1', 'green')],
'TCRB-C_channel_2_repeat': [(170, 210, 'Dβ_Jβ_Dβ2', 'blue'), (285, 325, 'Dβ_Jβ_Dβ1', 'green')],
'TCRG-A_channel_1_repeat': [(175, 195, 'Vγ10_Jγ1.1_2.1', 'blue'), (230, 255, 'Vγ1-8_Jγ1.1_2.1', 'green')],
'TCRG-A_channel_2_repeat': [(145, 175, 'Vγ10_Jγ1.3_2.3', 'blue'), (195, 230, 'Vγ1-8_Jγ1.3_2.3', 'green')],
'TCRG-B_channel_1_repeat': [(110, 140, 'Vγ11_Jγ1.1_2.1', 'blue'), (195, 220, 'Vγ9_Jγ1.1_2.1', 'green')],
'TCRG-B_channel_2_repeat': [(80, 110, 'Vγ11_Jγ2.1_2.3', 'blue'), (160, 195, 'Vγ9_Jγ1.3_2.3', 'green')],
}
channel_colors = {
'channel_1': 'blue',
'channel_2': 'green',
'channel_3': 'purple',
'channel_4': 'red',
'channel_5': 'darkgoldenrod',
'SCL': 'black'
}
def pretty_name(c, t):
if 'channel' in c:
channel = re.findall(r'channel_\d', c)[0]
if 'repeat' in c:
pc = '_'.join([t, channel, 'repeat'])
else:
pc = '_'.join([t, channel])
else:
pc = c
return pc
def organize_clonality_files(path):
tests = [
'IGH-A', 'IGH-B', 'IGH-C', 'IGK-A', 'IGK-B',
'TCRB-A', 'TCRB-B', 'TCRB-C', 'TCRG-A', 'TCRG-B',
'SCL'
]
# construct case list
csv_list = [f for f in os.listdir(path) if f.endswith('.csv')]
# case_names_as_llt = [re.findall(r'(\d\dKD-\d\d\dM\d\d\d\d)(-R)*', x) for
# x in csv_list] # 'llt' is 'list of lists of tuple'
case_names_as_llt = [
re.findall(
r'(\d+KD-\d+M\d+)(-R)*',
x) for x in csv_list] # 'llt' is 'list of lists of tuple'
case_names_as_ll = [list(lt[0]) for lt in case_names_as_llt if len(
lt) > 0] # ll is 'list of lists'
# finally we have a set of unique strings
case_names = {''.join(x) for x in case_names_as_ll}
# make a dictionary of case names to case files
cd = {case_name: {t: [f for f in csv_list if case_name in f and t in f]
for t in tests} for case_name in case_names}
cases = {case_name: Case() for case_name in case_names}
for case_name, c in cases.items():
c.name = case_name
c.files = cd[case_name]
# c.ladder = {}
# c.rox500 = []
# c.index_of_peaks_to_annotate = {}
# c.index_of_artifactual_peaks = {}
# c.index_of_replicate_peaks = {}
# c.allelic_ladder = None
# c.plot_labels = {}
return cases
class Case(object):
""" I'm sure there's a better way than making a dummy class like this.
"""
def __init__(self):
self.name = None
self.files = {}
self.ladder = {}
self.rox500 = []
self.index_of_peaks_to_annotate = {}
self.index_of_artifactual_peaks = {}
self.index_of_replicate_peaks = {}
self.allelic_ladder = None
self.plot_labels = {}
self.widths = {}
self.abberant_peaks = {}
self.some_peaks = {}
self.some_upside_down_peaks = {}
pass
def gather_case_data(case, case_name, path):
df = pd.DataFrame()
for t, files in case.files.items():
for f in files:
df_t = pd.read_csv(os.path.join(path, f))
df_t.columns = [pretty_name(c, t) for c in df_t.columns]
columns_to_drop = [c for c in df_t.columns if not (
c.startswith('TCR') or c.startswith('IG') or c.startswith('SCL'))]
df_t = df_t.drop(columns_to_drop, axis=1)
df = pd.concat([df, df_t], axis=1, sort=False)
df.name = case_name
case.df = df
return case
def local_southern(case, order=2):
for ch_ss, ladder in case.ladder.items():
x_fitted = np.array([])
for i in range(2, len(ladder) - 1):
x1 = ladder[i - 2:i + 1]
y1 = case.rox500[i - 2:i + 1]
polyx1 = np.poly1d(np.polyfit(x1, y1, deg=order))
x2 = ladder[i - 1:i + 2]
y2 = case.rox500[i - 1:i + 2]
polyx2 = np.poly1d(np.polyfit(x2, y2, deg=order))
if i == 2:
x = range(case.df.index.tolist()[0], ladder[i])
elif i == len(ladder) - 2:
x = range(ladder[i - 1], case.df.index.tolist()[-1] + 1)
# print('x[0] = {}, x[-1] = {}'.format(x[0], x[-1]))
else:
x = range(ladder[i - 1], ladder[i])
y = np.average(np.array([polyx1(x), polyx2(x)]), axis=0)
x_fitted = np.concatenate((x_fitted, y), axis=0)
x_df = pd.DataFrame(x_fitted)
# print('len(x_fitted) = {}'.format(len(x_fitted)))
col_name = '_'.join(['x_fitted', ch_ss])
x_df.columns = [col_name]
case.df = pd.concat([case.df, x_df], axis=1, sort=False)
return case
def pick_peak_one(case):
case.ladder_success = False
scldf = case.df['SCL_channel_1']
# Goal is to return the farther (on x axis) of the two tallest peaks
# this range was determined by looking at 250+ cases
mask = scldf.index.isin(range(1500, 2300))
min_dist = 20
if mask.size == scldf.size:
peaks_x, _ = find_peaks(scldf.where(mask, 0), distance=min_dist)
peaks_2tallest = sorted(
[(x, scldf[x]) for x in peaks_x], key=lambda coor: coor[1], reverse=True)[:2]
peak_farther_of_2tallest = sorted(
peaks_2tallest, key=lambda coor: coor[0], reverse=True)[0]
case.peak_one = peak_farther_of_2tallest
mask = scldf.index.isin(range(case.peak_one[0], scldf.size))
peaks_x, _ = find_peaks(scldf.where(mask, 0), distance=min_dist)
case.peaks = [(x, scldf[x]) for x in sorted(peaks_x, reverse=False)]
else:
print(
'\tSkipping {} due to size mismatch, likely due to multiple files being added to the same column in the case DataFrame column'.format(
case.name))
for f in case.files['SCL']:
print('\t\t{}'.format(f))
return case
def make_decay_curve(case):
a = case.peak_one[1]
b = 0.5
x_decay = np.array(range(case.peak_one[0], len(case.df.index.tolist())))
i = 0
while i < 20:
i += 0.1
y_decay = a * b**(i * (x_decay - case.peak_one[0]) / case.peak_one[0])
decay = pd.Series(data=y_decay, index=x_decay)
decay.name = 'decay'
if decay.name not in case.df.columns:
case.df = pd.concat([case.df, decay], axis=1, sort=False)
else:
case.df[decay.name] = decay
case = evaluate_SCL(case, decay)
if case.residual <= 10:
case.ladder_success = True
break
case.decay_value = i
return case
def evaluate_SCL(case, decay):
qualifying_peaks = [(x, y) for x, y in case.peaks if y > decay[x]]
combos = [list(c) for c in combinations(qualifying_peaks, 3)]
combos.sort(key=lambda coor: coor[0])
case.ladder_SCL = [400, 100, 300, 200] # just some made up ladder
case.residual = 1000000
for combo in combos:
ladder_SCL = [case.peak_one[0]] + [x for x, y in combo]
poly_current, res_current, rank, singular_values, rcond = np.polyfit(
ladder_SCL, [100, 200, 300, 400], 1, full=True)
res_current = res_current[0]
if res_current < case.residual:
case.residual = res_current
case.ladder_SCL = ladder_SCL
return case
def build_ladder(df, size_standard, label_name):
choices, std = reduce_choices(df, label_name)
ss = np.array(size_standard)
if len(choices) < len(size_standard):
print(
'\tWARNING: len(choices) = {}, k = {}'.format(
len(choices),
len(size_standard)))
X = np.array([sorted(list(c))
for c in combinations(choices, len(size_standard))])
# print('\t{} choose {} -> {:,} combos'.format(len(choices), len(size_standard), len(X)))
pfit_zx = np.polyfit(ss, X.T, deg=1, full=True)
residuals_zx = pfit_zx[1]
X_mean = np.expand_dims(np.mean(X, axis=1), axis=1)
R_sq_zx = 1.0 - (np.square(residuals_zx) / np.sum(np.square(X - X_mean)))
# i = np.argmax(R_sq_zx)
ranked_R_sq, indices = np.unique(R_sq_zx, return_index=True)
indices = indices.tolist()
indices.reverse()
for i in indices:
ladder = X[i]
Y = df[ladder]
# print('len(ladder) = {}'.format(len(ladder)))
Ygrubb = grubbs.test(Y.tolist(), alpha=0.05)
if len(Y) == len(Ygrubb):
break
return ladder
def reduce_choices(ds, label_name):
t = 2.0
# print('label_name = {}'.format(label_name))
# print(ds)
try:
peaks_x_restricted, _ = find_peaks(
ds, height=[20, 1000], distance=30, width=2)
except:
p = figure(tools='pan,wheel_zoom,reset', tooltips=TOOLTIPS, title=label_name)
p.line(ds.index.to_list(), ds, line_width=0.5, color='blue')
show(p)
peaks_x, _ = find_peaks(ds)
coor = [(x, ds[x]) for x in peaks_x]
# print('label_name = {}'.format(label_name))
# print('coor = {}'.format(coor))
tallest = sorted(coor, key=lambda x: x[1])[-1]
choices_x = [x for x in peaks_x_restricted if x > tallest[0]]
choices_y = [ds[x] for x in choices_x]
# choices_y_grubbs = grubbs.test(choices_y, alpha=0.05)
# choices_x_reduced = [x for x in choices_x if ds[x] in choices_y_grubbs]
polyxy = np.poly1d(np.polyfit(choices_x, choices_y, deg=1))
# polybaseline = np.poly1d(np.polyfit(ds.index.tolist()[choices_x[0]:], ds[choices_x[0]:],deg=1))
std = np.std(choices_y)
std2_below = polyxy(ds.index.to_list()) - t * std
std2_above = polyxy(ds.index.to_list()) + t * std
# std2 = [(x1,x2) for x1, x2 in zip(std2_below, std2_above)]
peaks_x, _ = find_peaks(
ds, height=[
std2_below, std2_above], prominence=20, width=2)
choices_x = [x for x in peaks_x if x > tallest[0]]
return choices_x, std
def size_standard(case, ch_ss_num=4):
rox500_16 = [
35,
50,
75,
100,
139,
150,
160,
200,
250,
300,
340,
350,
400,
450,
490,
500]
rox500_14 = [
35,
50,
75,
100,
139,
150,
160,
200,
250,
300,
340,
350,
400,
450]
rox500_13 = [50, 75, 100, 139, 150, 160, 200, 250, 300, 340, 350, 400, 450]
rox500_75_400 = [75, 100, 139, 150, 160, 200, 250, 300, 340, 350, 400]
rox500_75_450 = [75, 100, 139, 150, 160, 200, 250, 300, 340, 350, 400, 450]
rox500 = rox500_75_400
case.rox500 = rox500[:]
ch_ss = 'channel_' + str(ch_ss_num)
ladder_channels = [
ch for ch in case.df.columns if ch_ss in ch and 'x_fitted' not in ch]
# print('ladder_channels = {}'.format(ladder_channels))
for ch in ladder_channels:
label_name = '_'.join([case.name, ch])
case.ladder[ch] = build_ladder(case.df[ch], rox500, label_name)
return case
def baseline_correction_simple(case, ch_list=None, ch_ss_num=4):
if ch_list is None:
ch_list = case.df.columns.to_list()
else:
ch_list = list(set(case.df.columns.to_list()) & set(ch_list))
ch_ss = 'channel_' + str(ch_ss_num)
ch_list = [ch for ch in ch_list if ch_ss not in ch]
for ch in ch_list:
peaks_i, props = find_peaks(case.df[ch], prominence=50)
I = case.df.index.to_list()
I_1k = I[1000:]
# right_bases = props['right_bases']
# left_bases = props['left_bases']
# I_exclude = set()
# for l,r in zip(left_bases, right_bases):
# I_exclude.update(set(range(l,r)))
# I = [i for i in I if i not in I_exclude]
x_baseline = case.df[ch][I_1k].to_list()
# x_avg = mean(x_baseline)
polyxy = np.poly1d(np.polyfit(I_1k, x_baseline, deg=1))
case.df[ch] = case.df[ch] - polyxy(case.df.index.to_list())
# case.df = case.df.where(case.df > 0, 0)
return case
def baseline_correction_upside_down(
case,
ch_list=None,
ch_ss_num=4,
iterations=3,
prominence=1,
distance=20):
if ch_list is None:
ch_list = case.df.columns.to_list()
else:
ch_list = list(set(case.df.columns.to_list()) & set(ch_list))
ch_ss = 'channel_' + str(ch_ss_num)
ch_list = [ch for ch in ch_list if ch_ss not in ch]
for ch in ch_list:
peaks_start, _ = find_peaks(
case.df[ch], prominence=prominence, distance=distance)
df = case.df[ch] * -1
peaks_start, _ = find_peaks(
df, prominence=prominence, distance=distance)
all_your_base = set()
for i in range(0, iterations):
bases, props = find_peaks(
df, prominence=prominence, distance=distance)
spl = InterpolatedUnivariateSpline(
bases, df[bases], bbox=[bases[0], bases[int(len(bases) / 2)]])
spl_df = pd.Series(spl(case.df.index.tolist()))
df = df - spl_df
case.df[ch] = df * -1
peaks_finish, _ = find_peaks(
case.df[ch], prominence=prominence, distance=distance)
abberant_peaks = set(peaks_finish) - set(peaks_start)
case.abberant_peaks[ch] = abberant_peaks
return case
def baseline_correction_advanced(
case,
ch_list=None,
ch_ss_num=4,
iterations=3,
prominence=1,
distance=20):
if ch_list is None:
ch_list = case.df.columns.to_list()
else:
ch_list = list(set(case.df.columns.to_list()) & set(ch_list))
ch_ss = 'channel_' + str(ch_ss_num)
ch_list = [ch for ch in ch_list if ch_ss not in ch]
for ch in ch_list:
peaks_start, _ = find_peaks(
case.df[ch], prominence=prominence, distance=distance)
all_your_base = set()
for i in range(0, iterations):
peaks_current, props = find_peaks(
case.df[ch], prominence=prominence, distance=distance)
# abberant_peaks = set(peaks_current) - set(peaks_original)
bases = set(np.concatenate(
[props['left_bases'], props['right_bases']]))
all_your_base = all_your_base | bases
# bases = bases | abberant_peaks
bases = sorted(list(bases))
# bases = sorted(list(set(np.concatenate([props['left_bases'], props['right_bases']]))))
# bases = [b for b in bases if b >=0]
# spl = InterpolatedUnivariateSpline(bases, case.df[ch][bases])
# print('len(bases) = {}'.format(len(bases)))
spl = InterpolatedUnivariateSpline(
bases, case.df[ch][bases], ext=1)
# spl = interp1d(bases, case.df[ch][bases], fill_value='extrapolate')
spl_df = pd.Series(spl(case.df.index.tolist()))
case.df[ch] = case.df[ch] - spl_df
# peaks_finish, _ = find_peaks(case.df[ch], prominence=prominence, distance=distance)
# abberant_peaks = set(peaks_finish) - set(peaks_start)
# case.abberant_peaks[ch] = abberant_peaks
# case.abberant_peaks[ch] = all_your_base
return case
def index_of_peaks_to_annotate(case):
for ch in case.df.columns:
x_col_name = 'x_fitted_' + re.sub(r'channel_\d', 'channel_4', ch)
if ch in roi_clonality.keys():
peaks_x, _ = find_peaks(case.df[ch], prominence=100, height=300)
peaks_in_all_roi = []
for x_start, x_end, _, _ in roi_clonality[ch]:
peaks_in_current_roi = [
x for x in peaks_x if case.df[x_col_name][x] >= x_start and case.df[x_col_name][x] <= x_end]
peaks_y = case.df[ch][peaks_in_current_roi].to_list()
peaks_in_current_roi = [x for y, x in sorted(
zip(peaks_y, peaks_in_current_roi), reverse=True)]
if len(peaks_in_current_roi) > 5:
peaks_in_all_roi.extend(peaks_in_current_roi[0:5])
else:
peaks_in_all_roi.extend(peaks_in_current_roi)
case.index_of_peaks_to_annotate[ch] = peaks_in_all_roi[:]
return case
def find_artifactual_peaks(case):
for ch in case.df.columns:
if 'channel_3' in ch and 'SCL' not in ch:
ch_4 = re.sub(r'channel_\d', 'channel_4', ch)
label_name = case.name + '_' + ch
ladder = case.ladder[ch_4]
peaks_temp, _ = find_peaks(case.df[ch], height=500)
peaks_i = []
for i in peaks_temp:
if i >= ladder[0] and i <= ladder[-1]:
peaks_i.append(i)
case.index_of_artifactual_peaks[ch] = peaks_i[:]
return case
def plot_scl(case, ch, plot_dict, w, h):
if ch in channels_of_interest.keys() and 'SCL' in ch:
ch_num = re.findall(r'channel_\d', ch)[0]
label_name = case.name + '_' + ch
x_col_name = 'x_fitted_' + re.sub(r'channel_\d', 'channel_4', ch)
x = case.df[ch].index.to_list()
y = case.df[ch].to_list()
p = figure(
tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='fragment size',
y_axis_label='RFU',
width=w,
height=h,
x_range=(
1000,
max(x)),
tooltips=TOOLTIPS)
p.line(x, y, line_width=0.5, color=channel_colors.get(ch_num, 'blue'))
plot_dict[ch] = p
return plot_dict
def plot_channels_of_interest(case, ch, plot_dict, w, h, ch_ss_num=4):
if ch in channels_of_interest.keys() and 'SCL' not in ch:
ch_num = re.findall(r'channel_\d', ch)[0]
label_name = case.name + '_' + ch
x_col_name = 'x_fitted_' + \
re.sub(r'channel_\d', 'channel_' + str(ch_ss_num), ch)
p = figure(
tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='fragment size',
y_axis_label='RFU',
width=w,
height=h,
x_range=(
75,
400),
tooltips=TOOLTIPS)
x = case.df[x_col_name].to_list()
y = case.df[ch].to_list()
p.line(x, y, line_width=0.5, color=channel_colors.get(ch_num, 'blue'))
plot_dict[ch] = p
return plot_dict
def highlight_roi_clonality(case, ch, plot_dict, w, h):
if ch in roi_clonality.keys():
p = plot_dict[ch]
legends = []
for x_left, x_right, roi_name, roi_color in roi_clonality[ch]:
dummy_dot = p.line([0, 0], [1, 1], line_width=20,
color=roi_color, alpha=0.10)
roi = BoxAnnotation(
left=x_left,
right=x_right,
fill_color=roi_color,
fill_alpha=0.05)
p.add_layout(roi)
legends.append(LegendItem(label=roi_name, renderers=[dummy_dot]))
p.add_layout(Legend(items=legends, location='top_right'))
# print(p.legend.items)
plot_dict[ch] = p
return plot_dict
def plot_peaks_of_interest(
case,
ch,
plot_dict,
w,
h,
replicate_only,
ch_ss_num=4):
if ch in roi_clonality.keys():
x_col_name = 'x_fitted_' + \
re.sub(r'channel_\d', 'channel_' + str(ch_ss_num), ch)
p = plot_dict[ch]
if replicate_only:
peaks_index = case.index_of_replicate_peaks[ch]
else:
peaks_index = case.index_of_peaks_to_annotate[ch]
x_peaks = case.df[x_col_name][peaks_index].to_list()
y_peaks = case.df[ch][peaks_index].to_list()
p.y_range.start = -100
if len(y_peaks) > 0:
p.y_range.end = 1.3 * max(y_peaks)
else:
p.y_range.end = 1000
for x, y in zip(x_peaks, y_peaks):
mytext = Label(
angle=1,
x=x,
y=int(y),
text='{:.1f}'.format(x),
x_offset=0,
y_offset=2,
text_font_size='8pt')
p.add_layout(mytext)
return plot_dict
def plot_size_standard(case, ch, plot_dict, w, h, ch_ss_num=4):
# if ch in channels_of_interest.keys() and 'SCL' not in ch:
ch_ss = re.sub(r'channel_\d', 'channel_' + str(ch_ss_num), ch)
ch_num = re.findall(r'channel_\d', ch)[0]
if ch_ss in case.ladder.keys():
label_name = case.name + '_' + ch_ss
# case.df[ch_ss].index.rename('x')
x = case.df[ch_ss].index.to_list()
y = case.df[ch_ss].to_list()
x_ladder = case.ladder[ch_ss]
y_ladder = case.df[ch_ss][x_ladder].to_list()
p = figure(tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='size standard',
y_axis_label='RFU',
width=w,
height=int(h / 2.0),
x_range=(0,
max(x)),
y_range=(-200,
max(y_ladder) + 200),
tooltips=TOOLTIPS)
p.line(x, y, line_width=0.5, color=channel_colors.get(ch_num, 'blue'))
p.ygrid.visible = False
p.x(x_ladder, y_ladder)
for x, y, label in zip(x_ladder, y_ladder, case.rox500):
mytext = Label(
angle=1,
x=x,
y=y,
text=str(label),
x_offset=0,
y_offset=2,
text_font_size='8pt')
p.add_layout(mytext)
plot_dict[ch_ss] = p
return plot_dict
def plot_empty_channel_3(case, ch, plot_dict, w, h):
if ch in channels_of_interest.keys() and 'SCL' not in ch:
ch_3 = re.sub(r'channel_\d', 'channel_3', ch)
label_name = case.name + '_' + ch_3
x = case.df[ch_3].index.to_list()
y = case.df[ch_3].to_list()
x_ladder = case.index_of_artifactual_peaks[ch_3]
y_ladder = case.df[ch_3][x_ladder].to_list()
if len(y_ladder) > 0:
p = figure(tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='channel of artifactual peaks',
y_axis_label='RFU',
width=w,
height=int(h / 2.0),
x_range=(0,
max(x)),
y_range=(-200,
1.5 * max(y_ladder)),
tooltips=TOOLTIPS)
else:
p = figure(
tools='pan,wheel_zoom,reset',
title=label_name,
x_axis_label='channel of artifactual peaks',
y_axis_label='RFU',
width=w,
height=int(
h / 2.0),
x_range=(
0,
max(x)),
tooltips=TOOLTIPS)
p.line(
x,
y,
line_width=0.5,
color=channel_colors.get(
'channel_3',
'blue'))
p.ygrid.visible = False
# p.x(x_ladder, y_ladder)
x_col_name = 'x_fitted_' + re.sub(r'channel_\d', 'channel_4', ch)
x_fitted = case.df[x_col_name][x_ladder].to_list()
for x, y, label in zip(x_ladder, y_ladder, x_fitted):
mytext = Label(angle=1, x=x, y=y, text='{:.1f}'.format(
label), x_offset=0, y_offset=2, text_font_size='8pt')
p.add_layout(mytext)
plot_dict[ch_3] = p
return plot_dict
def sync_axes(plot_dict):
sorted_keys = sorted(plot_dict.keys())
p1 = plot_dict[sorted_keys[0]]
p1.toolbar.active_scroll = p1.select_one(WheelZoomTool)
for ch, p in plot_dict.items():
p.tools = p1.tools
p.toolbar.logo = None
ch_repeat = ch + '_repeat'
if ch_repeat in plot_dict.keys():
if p.y_range.end is not None and plot_dict[ch_repeat].y_range.end is not None:
if p.y_range.end >= plot_dict[ch_repeat].y_range.end:
plot_dict[ch_repeat].x_range = p.x_range
plot_dict[ch_repeat].y_range = p.y_range
else:
p.x_range = plot_dict[ch_repeat].x_range
p.y_range = plot_dict[ch_repeat].y_range
return plot_dict
def plot_clonality_case(case, replicate_only, w=1100, h=350):
silence(FIXED_SIZING_MODE, True)
plot_dict = {}
for ch in sorted(case.df.columns):
plot_dict = plot_scl(case, ch, plot_dict, w, h)
plot_dict = plot_channels_of_interest(case, ch, plot_dict, w, h)
plot_dict = highlight_roi_clonality(case, ch, plot_dict, w, h)
plot_dict = plot_empty_channel_3(case, ch, plot_dict, w, h)
plot_dict = plot_size_standard(case, ch, plot_dict, w, h)
plot_dict = plot_peaks_of_interest(
case, ch, plot_dict, w, h, replicate_only)
plot_dict = sync_axes(plot_dict)
# sort the plots. SCL first, channel + repeat after, followed by their
# size standards.
plot_keys = sorted([key for key in plot_dict.keys() if 'SCL' not in key])
scl_keys = sorted([key for key in plot_dict.keys() if 'SCL' in key])
plot_keys = [*scl_keys, *plot_keys]
plots = column([plot_dict[ch] for ch in plot_keys], sizing_mode='fixed')
case_html = case.name + '.html'
output_file(case_html)
show(plots)
save(plots)
print('Saved {}'.format(case_html))
debug = False
def replicate_peaks(case):
for ch in case.index_of_peaks_to_annotate.keys():
if ch not in case.index_of_replicate_peaks.keys():
case.index_of_replicate_peaks[ch] = []
if 'repeat' not in ch:
x_ch = 'x_fitted_' + re.sub(r'channel_\d', 'channel_4', ch)
ch_repeat = ch + '_repeat'
x_ch_repeat = 'x_fitted_' + \
re.sub(r'channel_\d', 'channel_4', ch_repeat)
p1 = case.index_of_peaks_to_annotate[ch]
p2 = case.index_of_peaks_to_annotate[ch_repeat]
peaks1 = set()
peaks2 = set()
for i in p1:
i_re = case.df[x_ch][i]
for j in p2:
j_re = case.df[x_ch_repeat][j]
if abs(i_re - j_re) < 1.0:
peaks1.add(i)
peaks2.add(j)
case.index_of_replicate_peaks[ch] = sorted(list(peaks1))
case.index_of_replicate_peaks[ch_repeat] = sorted(list(peaks2))
return case
def main():
owd = os.getcwd() # original working directory
# path = os.path.abspath(sys.argv[1])
path = easygui.diropenbox()
os.chdir(path)
convert_folder(path)
cases = organize_clonality_files(path)
# output_path = os.path.join(path, '/plots')
# if not os.path.exists(output_path): os.mkdir(output_path)
for case_name in sorted(cases.keys()):
case = cases[case_name]
print('Processing {}'.format(case_name))
case = gather_case_data(case, case_name, path)
case = size_standard(case, ch_ss_num=4)
case = find_artifactual_peaks(case)
# case = baseline_correction_simple(case)
case = baseline_correction_advanced(
case, ch_list=channels_of_interest.keys(), distance=10)
# case = pick_peak_one(case)
# case = make_decay_curve(case)
case = local_southern(case)
case = index_of_peaks_to_annotate(case)
case = replicate_peaks(case)
plot_clonality_case(case, replicate_only=False, w=1050, h=350)
# except:
# print('Failed on {}'.format(case))
if __name__ == '__main__':
main()
|
[
"43114068+DocSupport@users.noreply.github.com"
] |
43114068+DocSupport@users.noreply.github.com
|
864f6c8e44747b438bdd00945bd88e7a810108db
|
6cd4d2923292004390a1b23dc26d0a7a4a7df223
|
/DjangoRedis/manage.py
|
9a25fcc6109e9b625d9a5bb7fcfab9c54f637263
|
[] |
no_license
|
Lyle101/docker_redis
|
4cc85b6c5c5784c3d032d129810ce49a0e4b09cc
|
f3b9db02ce65794d84220286c805ba799c0e79dd
|
refs/heads/master
| 2020-04-09T07:11:30.999829
| 2018-12-03T08:07:50
| 2018-12-03T08:07:50
| 160,144,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoPrj.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"Chris.Lyle101@gmail.com"
] |
Chris.Lyle101@gmail.com
|
ecb41fb56f8890f13f0b34b3b3a1c309800192b5
|
a4957a563bbd3ce322e0cd0fec8e0a37650b5092
|
/calculatorv2.py
|
289ec6ac8e829cd174995e3ee1cb013560bce9ea
|
[] |
no_license
|
CodingFluent/Simple-CalculatorV2-Py
|
66632717a94d0b27a5c1994b6d5eaf062ee793f7
|
3af99215b4eb8b40cabdc840172506825e27f4e0
|
refs/heads/master
| 2022-12-10T19:00:53.607598
| 2020-08-31T06:01:54
| 2020-08-31T06:01:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
a = float(input("Enter First Number => "))
op = str(input("Enter Operation (+, -, *, /, %) => "))
b = float(input("Enter Second Number => "))
if op == "+":
sum = a + b
total = str(f"The sum of {a} + {b} is {sum}")
elif op == "-":
diff = a - b
total = str(f"The difference of {a} - {b} is {diff}")
elif op == "*":
mul = a * b
total = str(f"The multiplication of {a} * {b} is {mul}")
elif op == "/":
div = a / b
total = str(f"The division of {a} / {b} is {div}")
elif op == "%":
mod = a % b
total = str(f"The module of {a} % {b} is {mod}")
else:
total = str("Please Enter an Valid Operation.......")
print (total)
|
[
"noreply@github.com"
] |
noreply@github.com
|
badbe251c1d5142ea01e96e916591f5b6330a6ca
|
202b1b82a2b7a70250415ba5d9bd1f6b277a6e84
|
/share/qt/extract_strings_qt.py
|
acf54d0b19bbf49be33497e58552501d9f56933d
|
[
"MIT"
] |
permissive
|
cmkcoin/cmkcore
|
92cc4dcaf63b1d282ea2c2aa15ede822c9c7b0e7
|
5c2a3222ef901d1c6d9315177ba79e3f5094f2a6
|
refs/heads/master
| 2020-03-15T04:26:42.979962
| 2019-10-19T03:55:45
| 2019-10-19T03:55:45
| 131,965,565
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,850
|
py
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/dashstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *dash_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("cmk-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
[
"cmkdev@vps.cmk.io"
] |
cmkdev@vps.cmk.io
|
8da0a0f25fb1f42f41d710abf1ca39dc617b67dc
|
5e4a1d08d199722fde585723d06644e9999c144e
|
/input.py
|
af348c0972728af30a24ce077b2d8f0d4bcd81bf
|
[] |
no_license
|
JustDoIT83/CTI110
|
ca30948cd5dc4e30103a4adfb681f5090363462d
|
3817c2b935eb166f0086026f0cf73c7e96b2bb8d
|
refs/heads/master
| 2020-04-02T10:14:06.081690
| 2018-10-23T13:25:33
| 2018-10-23T13:25:33
| 154,330,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
# get the users name, age, and income
name = input('What is your name?: ')
age = input('What is your age?: ')
income = input('What is your income?: ')
# display the date
print('here is the data you entered')
print('Name:', name)
print('Age:', age)
print('Income:', income)
|
[
"noreply@github.com"
] |
noreply@github.com
|
9cc2c3e325d074bfd93da7cd26d488883eadd91a
|
dd83f3a356278cd5ede9efa5ab25a93e258ef6b7
|
/slowfast/models/vit_helper.py
|
afa96024b9244b5160c7ff9fba7708ce3beda16c
|
[
"Apache-2.0"
] |
permissive
|
XrosLiang/Motionformer
|
9debfcaed5c68cce27ec3d1f5ebc409ae81066c5
|
890bded4139dc4b17e344ea9c090bf2de4dd2678
|
refs/heads/main
| 2023-06-02T16:50:06.222720
| 2021-06-12T11:38:24
| 2021-06-12T11:38:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,425
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright 2020 Ross Wightman
# Modified Model definition
"""Video models."""
from einops import rearrange, repeat
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _quadruple
from torch import einsum
from functools import partial
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from torch.hub import load_state_dict_from_url
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.registry import register_model
from . import performer_helper
from . import orthoformer_helper
from . import nystrom_helper
default_cfgs = {
'vit_1k': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth',
'vit_1k_large': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth',
}
def qkv_attn(q, k, v):
sim = einsum('b i d, b j d -> b i j', q, k)
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
return out
class JointSpaceTimeAttention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.head_dim = head_dim
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
B, N, C = x.shape
qkv = self.qkv(x).reshape(
B, N, 3,
self.num_heads,
C // self.num_heads
).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
# Joint space-time attention
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class DividedAttention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
# init to zeros
self.qkv.weight.data.fill_(0)
self.qkv.bias.data.fill_(0)
self.proj.weight.data.fill_(1)
self.proj.bias.data.fill_(0)
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, einops_from, einops_to, **einops_dims):
# num of heads variable
h = self.num_heads
# project x to q, k, v vaalues
q, k, v = self.qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(
t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
# Scale q
q *= self.scale
# Take out cls_q, cls_k, cls_v
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(
lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let CLS token attend to key / values of all patches across time and space
cls_out = qkv_attn(cls_q, k, v)
# rearrange across time or space
q_, k_, v_ = map(
lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims),
(q_, k_, v_)
)
# expand CLS token keys and values across time or space and concat
r = q_.shape[0] // cls_k.shape[0]
cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v))
k_ = torch.cat((cls_k, k_), dim=1)
v_ = torch.cat((cls_v, v_), dim=1)
# attention
out = qkv_attn(q_, k_, v_)
# merge back time or space
out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)
# concat back the cls token
out = torch.cat((cls_out, out), dim=1)
# merge back the heads
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
## to out
x = self.proj(out)
x = self.proj_drop(x)
return x
class TrajectoryAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj_q = nn.Linear(dim, dim, bias=qkv_bias)
self.proj_kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
B, N, C = x.shape
P = seq_len
F = num_frames
h = self.num_heads
# project x to q, k, v vaalues
q, k, v = self.qkv(x).chunk(3, dim=-1)
# Reshape: 'b n (h d) -> (b h) n d'
q, k, v = map(
lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
# remove CLS token from q, k, v
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(
lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let CLS token attend to key / values of all patches across time and space
cls_out = qkv_attn(cls_q * self.scale, k, v)
cls_out = rearrange(cls_out, f'(b h) f d -> b f (h d)', f=1, h=h)
if approx == "nystrom":
## Shared spatial landmarks
q_, k_, v_ = map(
lambda t: rearrange(t, f'b h p d -> (b h) p d', h=h), (q_, k_, v_))
x = nystrom_helper.nystrom_spatial_attn(
q_, k_, v_,
landmarks=num_landmarks,
num_frames=F,
inv_iters=6,
use_spatial_landmarks=True
)
x = rearrange(x, f'(b h) p f d -> b h p f d', f=F, h=h)
elif approx == "orthoformer":
x = orthoformer_helper.orthoformer(
q_, k_, v_,
num_landmarks=num_landmarks,
num_frames=F,
)
elif approx == "performer":
# Form random projection matrices:
m = 256 # r = 2m, m <= d
d = self.head_dim
seed = torch.ceil(torch.abs(torch.sum(q_) * performer_helper.BIG_CONSTANT))
seed = torch.tensor(seed)
projection_matrix = performer_helper.create_projection_matrix(
m, d, seed=seed, device=q_.device, dtype=q_.dtype)
q_, k_ = map(lambda t: rearrange(t, f'b h p d -> b p h d'), (q_, k_))
q_prime = performer_helper.softmax_kernel_transformation(
q_,
is_query=True,
projection_matrix=projection_matrix
)
k_prime = performer_helper.softmax_kernel_transformation(
k_,
is_query=False,
projection_matrix=projection_matrix
)
q_prime, k_prime = map(
lambda t: rearrange(t, f'b p h r -> b h p r'), (q_prime, k_prime))
k_prime = rearrange(k_prime, 'b h (f n) r -> b h f n r', f=F)
v_ = rearrange(v_, 'b h (f n) d -> b h f n d', f=F)
kv = torch.einsum('b h f n r, b h f n d -> b h f r d', k_prime, v_)
qkv = torch.einsum('b h p r, b h f r d -> b h p f d', q_prime, kv)
normaliser = torch.einsum('b h f n r -> b h f r', k_prime)
normaliser = torch.einsum('b h p r, b h f r -> b h p f', q_prime, normaliser)
x = qkv / normaliser.unsqueeze(-1)
else:
# Using full attention
q_dot_k = q_ @ k_.transpose(-2, -1)
q_dot_k = rearrange(q_dot_k, 'b q (f n) -> b q f n', f=F)
space_attn = (self.scale * q_dot_k).softmax(dim=-1)
attn = self.attn_drop(space_attn)
v_ = rearrange(v_, 'b (f n) d -> b f n d', f=F, n=P)
x = torch.einsum('b q f n, b f n d -> b q f d', attn, v_)
# Temporal attention: query is the similarity-aggregated patch
x = rearrange(x, '(b h) s f d -> b s f (h d)', b=B)
x_diag = rearrange(x, 'b (g n) f d -> b g n f d', g=F)
x_diag = torch.diagonal(x_diag, dim1=-4, dim2=-2)
x_diag = rearrange(x_diag, f'b n d f -> b (f n) d', f=F)
q2 = self.proj_q(x_diag)
k2, v2 = self.proj_kv(x).chunk(2, dim=-1)
q2 = rearrange(q2, f'b s (h d) -> b h s d', h=h)
x, k2, v2 = map(
lambda t: rearrange(t, f'b s f (h d) -> b h s f d', f=F, h=h), (x, k2, v2))
q2 *= self.scale
attn = torch.einsum('b h s d, b h s f d -> b h s f', q2, k2)
attn = attn.softmax(dim=-1)
x = torch.einsum('b h s f, b h s f d -> b h s d', attn, x)
x = rearrange(x, f'b h s d -> b s (h d)')
# concat back the cls token
x = torch.cat((cls_out, x), dim=1)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
def get_attention_module(
attn_type='joint', dim=768, num_heads=12, qkv_bias=False,
attn_drop=0., proj_drop=0.
):
if attn_type == 'joint':
attn = JointSpaceTimeAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=proj_drop)
elif attn_type == 'trajectory':
attn = TrajectoryAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=proj_drop)
return attn
class Block(nn.Module):
def __init__(
self, dim=768, num_heads=12, attn_type='trajectory',
mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = get_attention_module(
attn_type=attn_type, dim=dim, num_heads=num_heads,
qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
x = x + self.drop_path(
self.attn(
self.norm1(x),
seq_len=seq_len,
num_frames=num_frames,
approx=approx,
num_landmarks=num_landmarks
)[0]
)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class DividedSpaceTimeBlock(nn.Module):
def __init__(
self, dim=768, num_heads=12, attn_type='divided',
mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm
):
super().__init__()
self.einops_from_space = 'b (f n) d'
self.einops_to_space = '(b f) n d'
self.einops_from_time = 'b (f n) d'
self.einops_to_time = '(b n) f d'
self.norm1 = norm_layer(dim)
self.attn = DividedAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.timeattn = DividedAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim,
act_layer=act_layer, drop=drop)
self.norm3 = norm_layer(dim)
def forward(self, x, seq_len=196, num_frames=8, approx='none', num_landmarks=128):
time_output = self.timeattn(self.norm3(x),
self.einops_from_time, self.einops_to_time, n=seq_len)
time_residual = x + time_output
space_output = self.attn(self.norm1(time_residual),
self.einops_from_space, self.einops_to_space, f=num_frames)
space_residual = time_residual + self.drop_path(space_output)
x = space_residual
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class Mlp(nn.Module):
def __init__(
self, in_features, hidden_features=None,
out_features=None, act_layer=nn.GELU, drop=0.
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = img_size if type(img_size) is tuple else to_2tuple(img_size)
patch_size = img_size if type(patch_size) is tuple else to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class PatchEmbed3D(nn.Module):
""" Image to Patch Embedding
"""
def __init__(
self, img_size=224, temporal_resolution=4, in_chans=3,
patch_size=16, z_block_size=2, embed_dim=768, flatten=True
):
super().__init__()
self.height = (img_size // patch_size)
self.width = (img_size // patch_size)
self.frames = (temporal_resolution // z_block_size)
self.num_patches = self.height * self.width * self.frames
self.proj = nn.Conv3d(in_chans, embed_dim,
kernel_size=(z_block_size, patch_size, patch_size),
stride=(z_block_size, patch_size, patch_size))
self.flatten = flatten
def forward(self, x):
B, C, T, H, W = x.shape
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2)
return x
class HeadMLP(nn.Module):
def __init__(self, n_input, n_classes, n_hidden=512, p=0.1):
super(HeadMLP, self).__init__()
self.n_input = n_input
self.n_classes = n_classes
self.n_hidden = n_hidden
if n_hidden is None:
# use linear classifier
self.block_forward = nn.Sequential(
nn.Dropout(p=p),
nn.Linear(n_input, n_classes, bias=True)
)
else:
# use simple MLP classifier
self.block_forward = nn.Sequential(
nn.Dropout(p=p),
nn.Linear(n_input, n_hidden, bias=True),
nn.BatchNorm1d(n_hidden),
nn.ReLU(inplace=True),
nn.Dropout(p=p),
nn.Linear(n_hidden, n_classes, bias=True)
)
print(f"Dropout-NLP: {p}")
def forward(self, x):
return self.block_forward(x)
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
def adapt_input_conv(in_chans, conv_weight, agg='sum'):
conv_type = conv_weight.dtype
conv_weight = conv_weight.float()
O, I, J, K = conv_weight.shape
if in_chans == 1:
if I > 3:
assert conv_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
conv_weight = conv_weight.sum(dim=2, keepdim=False)
else:
if agg == 'sum':
print("Summing conv1 weights")
conv_weight = conv_weight.sum(dim=1, keepdim=True)
else:
print("Averaging conv1 weights")
conv_weight = conv_weight.mean(dim=1, keepdim=True)
elif in_chans != 3:
if I != 3:
raise NotImplementedError('Weight format not supported by conversion.')
else:
if agg == 'sum':
print("Summing conv1 weights")
repeat = int(math.ceil(in_chans / 3))
conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv_weight *= (3 / float(in_chans))
else:
print("Averaging conv1 weights")
conv_weight = conv_weight.mean(dim=1, keepdim=True)
conv_weight = conv_weight.repeat(1, in_chans, 1, 1)
conv_weight = conv_weight.to(conv_type)
return conv_weight
def load_pretrained(
model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True, progress=False
):
# Load state dict
assert(f"{cfg.VIT.PRETRAINED_WEIGHTS} not in [vit_1k, vit_1k_large]")
state_dict = torch.hub.load_state_dict_from_url(url=default_cfgs[cfg.VIT.PRETRAINED_WEIGHTS])
if filter_fn is not None:
state_dict = filter_fn(state_dict)
input_convs = 'patch_embed.proj'
if input_convs is not None and in_chans != 3:
if isinstance(input_convs, str):
input_convs = (input_convs,)
for input_conv_name in input_convs:
weight_name = input_conv_name + '.weight'
try:
state_dict[weight_name] = adapt_input_conv(
in_chans, state_dict[weight_name], agg='avg')
print(
f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)')
except NotImplementedError as e:
del state_dict[weight_name]
strict = False
print(
f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.')
classifier_name = 'head'
label_offset = cfg.get('label_offset', 0)
pretrain_classes = 1000
if num_classes != pretrain_classes:
# completely discard fully connected if model num_classes doesn't match pretrained weights
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
elif label_offset > 0:
# special case for pretrained weights with an extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:]
loaded_state = state_dict
self_state = model.state_dict()
all_names = set(self_state.keys())
saved_names = set([])
for name, param in loaded_state.items():
param = param
if 'module.' in name:
name = name.replace('module.', '')
if name in self_state.keys() and param.shape == self_state[name].shape:
saved_names.add(name)
self_state[name].copy_(param)
else:
print(f"didnt load: {name} of shape: {param.shape}")
print("Missing Keys:")
print(all_names - saved_names)
|
[
"mandelapatrick@devfair0297.h2.fair"
] |
mandelapatrick@devfair0297.h2.fair
|
3ff18915969da0e6505bd95f4d68b34cfdb72eb5
|
e2cb95d74ff13247a706a4a949e22fb397efe7b7
|
/A2 - Digital Makeup Transfer/src/faceWarp.py
|
9a20045a0b4934f6294b0a14c9d6558b1da7a672
|
[] |
no_license
|
Aditi-Singla/Digital-Image-Analysis
|
945beb48bfbd1f7bb75d76059d5faafcfe88881f
|
8fc08ee86c5a168e3dc6d3b22c4be5bf2195458d
|
refs/heads/master
| 2020-04-01T00:36:28.232484
| 2018-07-18T18:45:20
| 2018-07-18T18:45:20
| 152,704,480
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,948
|
py
|
#!/usr/bin/env python
import numpy as np
import cv2
import sys
import scipy.spatial
# Read points from text file
def readPoints(path) :
points = [];
with open(path) as file :
for line in file :
x, y = line.split()
points.append((np.float32(x), np.float32(y)))
return points
# Apply affine transform calculated using srcTri and dstTri to src and
# output an image of size.
def applyAffineTransform(src, srcTri, dstTri, size) :
# Given a pair of triangles, find the affine transform.
warpMat = cv2.getAffineTransform( np.float32(srcTri), np.float32(dstTri) )
# Apply the Affine Transform just found to the src image
dst = cv2.warpAffine( src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )
return dst
def warpTriangle(img1, img, t1, t) :
# Find bounding rectangle for each triangle
r1 = cv2.boundingRect(np.float32([t1]))
r = cv2.boundingRect(np.float32([t]))
# Offset points by left top corner of the respective rectangles
t1Rect = []
tRect = []
for i in xrange(0, 3):
tRect.append(((t[i][0] - r[0]),(t[i][1] - r[1])))
t1Rect.append(((t1[i][0] - r1[0]),(t1[i][1] - r1[1])))
# Get mask by filling triangle
mask = np.zeros((r[3], r[2], 3), dtype = np.float32)
cv2.fillConvexPoly(mask, np.int32(tRect), (1.0, 1.0, 1.0), 16, 0);
# Apply warpImage to small rectangular patches
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r[2], r[3])
warpImage = applyAffineTransform(img1Rect, t1Rect, tRect, size)
# Alpha blend rectangular patches
imgRect = warpImage
# Copy triangular region of the rectangular patch to the output image
img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] = img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] * ( 1 - mask ) + imgRect * mask
if __name__ == '__main__' :
filename1 = sys.argv[1]
filename2 = sys.argv[2]
# Read images
img1 = cv2.imread(filename1);
img2 = cv2.imread(filename2);
# Convert Mat to float data type
img1 = np.float32(img1)
img2 = np.float32(img2)
# Read array of corresponding points
points1 = readPoints(filename1 + '.txt')
points2 = readPoints(filename2 + '.txt')
tri = scipy.spatial.Delaunay(np.array(points1))
# Allocate space for final output
imgMorph = np.zeros(img2.shape, dtype = img2.dtype)
np.savetxt('tri.txt', np.uint8(tri.vertices), fmt='%d')
for l in tri.vertices :
x = int(l[0])
y = int(l[1])
z = int(l[2])
t1 = [points1[x], points1[y], points1[z]]
t2 = [ points2[x], points2[y], points2[z] ]
# Morph one triangle at a time.
warpTriangle(img1, imgMorph, t1, t2)
# Display Result
cv2.imwrite('warped.jpg', np.uint8(imgMorph))
|
[
"aditisksingla@gmail.com"
] |
aditisksingla@gmail.com
|
016e33094e39966281d2775ad6be6442e4a27330
|
63e06ef221242c2c614750df02b4283989e13052
|
/projeto_da_roca/users/migrations/0002_auto_20210521_1213.py
|
b49e9079706612918bcb18961c11420541017361
|
[] |
no_license
|
amandacl/Da_Roca
|
97ada3b6abe6df25258a34f82954c07c597daae6
|
b6187d62b91f06e0afb523a84194ad12467a89b4
|
refs/heads/master
| 2023-06-21T11:59:14.891738
| 2021-06-02T02:13:02
| 2021-06-02T02:13:02
| 368,898,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
# Generated by Django 3.2.3 on 2021-05-21 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='address',
name='house_number',
field=models.IntegerField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='user',
name='cpf',
field=models.CharField(blank=True, max_length=11, null=True, unique=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True),
),
]
|
[
"matheus.noronha@solyd.com.br"
] |
matheus.noronha@solyd.com.br
|
425be2dac09edaf397a3412fc17709976e67201f
|
de7a39129bf471d4d4be25c65174916a505146e6
|
/book/examples/weave_examples_simple.py
|
1dc25d425bcf85bc9a527aca248b38e6572a0caa
|
[] |
no_license
|
jdh2358/py4science
|
a6da01de9cb16709828bfd801bf7faf847f346bb
|
a56c742ec2e0a31c2251468d9947ebaf707346d7
|
refs/heads/master
| 2016-09-05T22:18:38.520426
| 2009-12-05T17:47:26
| 2009-12-05T17:47:26
| 1,418,846
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,214
|
py
|
"""Some simple examples of weave.inline use"""
from weave import inline,converters
import Numeric as nx
from pylab import rand
#-----------------------------------------------------------------------------
# Returning a scalar quantity computed from a Numeric array.
def trace(mat):
"""Return the trace of a matrix.
"""
nrow,ncol = mat.shape
code = \
"""
double tr=0.0;
for(int i=0;i<nrow;++i)
tr += mat(i,i);
return_val = tr;
"""
return inline(code,['mat','nrow','ncol'],
type_converters = converters.blitz)
# In-place operations on arrays in general work without any problems
def in_place_mult(num,mat):
"""In-place multiplication of a matrix by a scalar.
"""
nrow,ncol = mat.shape
code = \
"""
for(int i=0;i<nrow;++i)
for(int j=0;j<ncol;++j)
mat(i,j) *= num;
"""
inline(code,['num','mat','nrow','ncol'],
type_converters = converters.blitz)
def main():
zz = nx.zeros([10,10])
print 'tr(zz)=',trace(zz)
oo = nx.ones([4,4],nx.Float)
print 'tr(oo)=',trace(oo)
aa = rand(128,128)
print 'tr(aa)=',trace(aa)
print 'oo:',oo
in_place_mult(3,oo)
print '3*oo:',oo
if __name__=='__main__':
main()
|
[
"jdh2358@gmail.com"
] |
jdh2358@gmail.com
|
2e9d8f40ea73bf3323400de1ac413068f242e213
|
313978a9a5a1f0824a6f2bfb948e1a4ec0225213
|
/4-iteração/lazy iterable e iterator.py
|
7337513a1d77423de94a8c51d7d35f8de1e0a3f6
|
[] |
no_license
|
wallacex19/python
|
71ae310a6a6ec2f1c8c80d4ad2bee7db2d391d13
|
99f11249fec5e001e10b2a155c2608e9b8b420ec
|
refs/heads/master
| 2023-04-08T22:41:34.259091
| 2021-04-23T20:58:18
| 2021-04-23T20:58:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,384
|
py
|
# O objeto range em Python 3 (xrange em Python 2) pode ser executado em loop como qualquer outro iterável:
for n in range(3):
print(n)
# E como o range é iterável, podemos obter um iterador a partir dele:
iter(range(3))
# R:<range_iterator object at 0x7fe173542ed0>
# mas objetos range não sao 6-iteradores por si mesmos, nos nao podemos chamar next em um objeto range
next(range(3))
# R:Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: 'range' object is not an iterator
# E, ao contrário de um iterador, podemos fazer um loop em um objeto de intervalo sem consumi-lo:
numbers = range(3)
tuple(numbers)
# R:(0, 1, 2)
tuple(numbers)
# R:(0, 1, 2)
# Se fizéssemos isso com um iterador, não obteríamos nenhum elemento na segunda vez em que fizemos o loop:
numbers = iter(range(3))
tuple(numbers)
# R:(0, 1, 2)
tuple(numbers)
#R:()
# Ao contrário dos objetos zip, enumerate ou generator, os objetos range não são 6-iteradores.
#-- ENTÃO O QUE É O RANGE? --##
# O objeto range é "lazy" em certo sentido, porque não gera todos os números que "contém" quando o criamos. Em vez disso, ele nos fornece esses números conforme precisamos deles ao fazer um loop sobre ele.
#
# Aqui está um objeto range e um generator (que é um tipo de iterador):
numbers = range(1_000_000)
square = (n**2 for n in numbers)
|
[
"pedromadureira000@gmail.com"
] |
pedromadureira000@gmail.com
|
defbb28049ad7d422477ecaaabdf790640d21b17
|
c5e6a4e0264409f4dc5db9993c8c0cc058d4365a
|
/8_juego_ahorcado.py
|
c36c4f69dcc49dcd6f1cc0a09e02d34d9823de2c
|
[] |
no_license
|
carlosafdz/programacion_python
|
05c91eb858ce12b9fd2e9e3fd4e902c66ea2ee2d
|
17b0db4dcf923d6de3fdfd9c9e78b1d1a50651ea
|
refs/heads/master
| 2023-05-24T20:32:22.614224
| 2020-03-21T18:26:30
| 2020-03-21T18:26:30
| 248,345,937
| 0
| 0
| null | 2023-05-22T23:22:23
| 2020-03-18T21:22:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,486
|
py
|
import random
IMAGENES = [
'''
+=======+
| |
|
|
|
|
======
''',
'''
+=======+
| |
O |
|
|
|
======
''',
'''
+=======+
| |
O |
| |
|
|
======
''',
'''
+=======+
| |
O |
/| |
|
|
======
''',
'''
+=======+
| |
O |
/|\ |
|
|
======
''',
'''
+=======+
| |
O |
/|\ |
/ |
|
======
''',
'''
+=======+
| |
O |
/|\ |
/ \ |
|
======
''',
''' '''
]
PALABRAS = ["lavadora","secadora","pepel","computadora"]
def palabra_random():
idx = random.randint(0,len(PALABRAS)-1)
return PALABRAS[idx]
def mostrar_tablero(palabra_escondida,intentos):
print(IMAGENES[intentos])
print('')
print(palabra_escondida)
print("*---**---**---**---**---**---**---**---**---**---*")
def main():
palabra = palabra_random()
palabra_escondida = ["_"] * len(palabra)
intentos = 0
while True:
mostrar_tablero(palabra_escondida,intentos)
letra = input("escoge una letra: ")
indice_letras = []
for i in range(len(palabra)):
if palabra[i] == letra:
indice_letras.append(i)
if len(indice_letras) == 0:
intentos = intentos + 1
if intentos == 7:
mostrar_tablero(palabra_escondida,intentos)
print(f'Perdiste..... la palabra correcta era {palabra}')
break
else:
for i in indice_letras:
palabra_escondida[i] = letra
indice_letras = []
try:
palabra_escondida.index("_")
except ValueError:
print(" ")
print("ganaste!!!")
break
def pruebas_tablero():
mostrar_tablero("palabra",0)
mostrar_tablero("palabra",1)
mostrar_tablero("palabra",2)
mostrar_tablero("palabra",3)
mostrar_tablero("palabra",4)
mostrar_tablero("palabra",5)
mostrar_tablero("palabra",6)
if __name__ == "__main__":
main()
#pruebas_tablero()
|
[
"carlos.afdzf@hotmail.com"
] |
carlos.afdzf@hotmail.com
|
e05f09d686cf4fc1af26ff93dd112cabeaac5381
|
60e2b0f728bf7b497e241afdacffaa8ee9203213
|
/breast_cancer/breast_cancer_load.py
|
c7e6f6f48a1be53f7a7d856378b2b85efd42ffca
|
[] |
no_license
|
yamadayoshi/deep_learning
|
43897d59dc3f89ecd4820050b96acacbf653408e
|
78bbf5b12011a5d17375b50b75203251003cb3d0
|
refs/heads/master
| 2021-02-19T01:02:57.934801
| 2020-03-10T20:02:45
| 2020-03-10T20:02:45
| 245,260,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
import numpy as np
from keras.models import model_from_json
#read json file
file = open('breast_model.json', 'r')
network = file.read()
file.close()
#load model from json and weights
model = model_from_json(network)
model.load_weights('breast_weights.h5')
novo = np.array([[10.2,5.6,155.0,15.4,18.5,75.5,15.9,79.4,56.9,15, 10.2,5.6,155.0,15.4,18.5,75.5,15.9,79.4,56.9,15, 10.2,5.6,155.0,15.4,18.5,75.5,15.9,79.4,56.9,15]])
previsao = model.predict(novo) > 0.8
|
[
"andre.yamada@digiage.com"
] |
andre.yamada@digiage.com
|
c8fc1b630938f22c3762d543e169f25db756d2bd
|
fb23a842c99f9a5238a9c6dfb3ffa6eee5c3e47d
|
/Salt-api/python版示例/V2/diaoyong.py
|
f2f32adde67640cdb991d2d8e8fc1ff6f921dc29
|
[] |
no_license
|
nanzhushan/Saltstack
|
45a492855860a5664f1c0a2099935ae95a17d0de
|
d9fc85a7be1861b13e6de55de9b6951e405fffb7
|
refs/heads/master
| 2021-05-31T16:37:09.928023
| 2016-04-11T07:04:10
| 2016-04-11T07:04:10
| 39,339,839
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
#!/usr/bin/python
#coding:utf8
from saltapi import *
#import saltapi
sapi = saltAPI()
#params = {'client':'local', 'fun':'test.ping', 'tgt':'*'}
#params = {'client':'local','tgt':'*', 'fun':'cmd.run', 'arg1':'hello'}
#arg1也可以写成arg
#params = {'client':'local','tgt':'*', 'fun':'cmd.run', 'arg1':'hostname'}
params = {'client':'local','tgt':'*', 'fun':'cmd.run', 'arg1':'touch /root/cc.txt;touch cc1.txt'}
test = sapi.saltCmd(params)
#test = sapi.saltCmd()
print test
|
[
"624867243@qq.com"
] |
624867243@qq.com
|
218046a18f59c8cc6a566f6a16807e74d5250298
|
a4e502e9487cf17c53f9f931ec0dbc12168fea52
|
/packages/pyre/platforms/PackageManager.py
|
0877270914d7a2f1326787f57abfbb1ac0125b31
|
[
"BSD-3-Clause"
] |
permissive
|
bryanvriel/pyre
|
bdc5dd59c46d53ff81f2ece532b9073ac3b65be1
|
179359634a7091979cced427b6133dd0ec4726ea
|
refs/heads/master
| 2021-09-28T00:10:26.454282
| 2018-11-11T16:42:07
| 2018-11-11T16:42:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,373
|
py
|
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
# the framework
import pyre
# declaration
class PackageManager(pyre.protocol, family='pyre.platforms.packagers'):
"""
Encapsulation of host specific information
"""
# requirements
@pyre.provides
def prefix(self):
"""
The package manager install location
"""
@pyre.provides
def installed(self):
"""
Retrieve available information for all installed packages
"""
@pyre.provides
def packages(self, category):
"""
Provide a sequence of package names that provide compatible installations for the given
package {category}. If the package manager provides a way for the user to select a
specific installation as the default, care should be taken to rank the sequence
appropriately.
"""
@pyre.provides
def info(self, package):
"""
Return information about the given {package}
The type of information returned is determined by the package manager. This method
should return success if and only if {package} is actually fully installed.
"""
@pyre.provides
def contents(self, package):
"""
Generate a sequence of the contents of {package}
The type of information returned is determined by the package manager. Typically, it
contains the list of files that are installed by this package, but it may contain other
filesystem entities as well. This method should return a non-empty sequence if and only
if {pakage} is actually fully installed
"""
@pyre.provides
def configure(self, packageInstance):
"""
Dispatch to the {packageInstance} configuration procedure that is specific to the
particular implementation of this protocol
"""
# framework obligations
@classmethod
def pyre_default(cls, **kwds):
"""
Build the preferred host implementation
"""
# the host should specify a sensible default; if there is nothing there, this is an
# unmanaged system that relies on environment variables and standard locations
from .Bare import Bare
# return the support for unmanaged systems
return Bare
# end of file
|
[
"michael.aivazis@orthologue.com"
] |
michael.aivazis@orthologue.com
|
acc5c7355bf61f8fbde46568884e95f5b124e22c
|
4cfb9d75361f3c7f50744878e645073e3a8fc8d4
|
/sinx+sinx fft.py
|
ab1696136d9a7c9f87d523e156f07203ab760d85
|
[] |
no_license
|
mychenyoke/gwwave1
|
ac99c982b5037e8afff42e3055de366ddd8543dd
|
7520846ab848ac2434db11ceb66a271d5ab68270
|
refs/heads/master
| 2020-03-18T13:47:02.888171
| 2018-05-28T15:18:36
| 2018-05-28T15:18:36
| 134,808,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
import numpy as np
import matplotlib.pyplot as plt
omega1=0.1
omega2=0.2
sample_rate=20
a=np.arange(0,100)
sina=np.sin(omega1*a)
sinb=np.sin(omega2*a)+np.sin(omega1*a)
plt.figure(figsize=(10,24))
plt.subplot(4,1,1)
plt.title("sinax")
plt.plot(a,sina)
plt.savefig("sinax")
plt.subplot(4,1,2)
plt.title("sinax+sinbx")
plt.plot(a,sinb)
plt.savefig("sinax+sinbx")
aa=[]
fft_frequency=np.fft.fftfreq(len(a),1/sample_rate)
fft_sina=np.fft.fft(sina)
#print(abs(fft_sina))
aa=abs(fft_sina)
for ab in aa:
print(ab)
fft_sinb=np.fft.fft(sinb)
plt.subplot(4,1,3)
plt.title("FFT_Frequency_sinax")
plt.plot(fft_frequency,abs(fft_sina))
plt.savefig("FFT_Frequency_sinax")
plt.subplot(4,1,4)
plt.title("FFT_Frequency_sinax+sinbx")
plt.plot(fft_frequency,fft_sinb)
plt.savefig("FFT_Frequency_sinax+sinbx")
|
[
"noreply@github.com"
] |
noreply@github.com
|
71c917f941655f147f642dba17548ed3889df18d
|
3328e95f5a8498ab366aec380f0e1822826ba7a9
|
/puppy.py
|
5ecb1fddd03ca00ec9d69d3d7ed91e3934b08270
|
[] |
no_license
|
Abhiram1214/opencv
|
6e9dd53cc08c54a8e1ce6f0c297fda451ddb7c31
|
653a9ccddbc188679bc9afe8f83d98a93b47cf3d
|
refs/heads/main
| 2022-12-26T12:03:41.308652
| 2020-10-11T12:01:53
| 2020-10-11T12:01:53
| 301,957,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
import cv2
import numpy as np
'''
img = cv2.imread(r'C:\Users\cvenkatanagasatya\Pictures\Open CV\Computer-Vision-with-Python\DATA\puppy.jpg')
while True:
cv2.imshow('puppy', img)
#if we waited for milli second and we pressed the esc key
if cv2.waitKey(1) & 0xFF == 27:
break
cv2.destroyAllWindows()
'''
######################
#####Function#########
#####################
def draw_circle(event, x,y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.namedWindow(winname='Images') #this is connecting the below window to callback function
cv2.setMouseCallback('Images', draw_circle) #windows name with draw_circle
######################################
##### Showing images in OpenCV#########
#######################################
img = np.zeros((512,512,3), np.int8)
while True:
cv2.imshow("Images", img)
if cv2.waitKey(20) & 0xFF==27:
break
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
noreply@github.com
|
820708161506216faa57b389f2f0890d60afef5d
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv3/lib/python3.8/site-packages/ansible/modules/cron.py
|
2424f5c065543ddd96be359b69a92e58495389fd
|
[
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 26,537
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dane Summers <dsummers@pinedesk.biz>
# Copyright: (c) 2013, Mike Grozak <mike.grozak@gmail.com>
# Copyright: (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# Copyright: (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
# Copyright: (c) 2015, Luca Berruti <nadirio@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cron
short_description: Manage cron.d and crontab entries
description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
which is used by future ansible/module calls to find/check the state. The "name"
parameter should be unique, and changing the "name" value will result in a new cron
task being created (or a different one being removed).'
- When environment variables are managed, no comment line is added, but, when the module
needs to find/check the state, it uses the "name" parameter to find the environment
variable definition line.
- When using symbols such as %, they must be properly escaped.
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry or, if env is set, the name of environment variable.
- Required if I(state=absent).
- Note that if name is not set and I(state=present), then a
new crontab entry will always be created, regardless of existing ones.
- This parameter will always be required in future releases.
type: str
user:
description:
- The specific user whose crontab should be modified.
- When unset, this parameter defaults to the current user.
type: str
job:
description:
- The command to execute or, if env is set, the value of environment variable.
- The command should not contain line breaks.
- Required if I(state=present).
type: str
aliases: [ value ]
state:
description:
- Whether to ensure the job or environment variable is present or absent.
type: str
choices: [ absent, present ]
default: present
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
- If this is a relative path, it is interpreted with respect to I(/etc/cron.d).
- If it is absolute, it will typically be C(/etc/crontab).
- Many linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
- To use the I(cron_file) parameter you must specify the I(user) as well.
type: str
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
type: bool
default: no
minute:
description:
- Minute when the job should run (C(0-59), C(*), C(*/2), and so on).
type: str
default: "*"
hour:
description:
- Hour when the job should run (C(0-23), C(*), C(*/2), and so on).
type: str
default: "*"
day:
description:
- Day of the month the job should run (C(1-31), C(*), C(*/2), and so on).
type: str
default: "*"
aliases: [ dom ]
month:
description:
- Month of the year the job should run (C(1-12), C(*), C(*/2), and so on).
type: str
default: "*"
weekday:
description:
- Day of the week that the job should run (C(0-6) for Sunday-Saturday, C(*), and so on).
type: str
default: "*"
aliases: [ dow ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use I(special_time).
version_added: "1.0"
type: bool
default: no
special_time:
description:
- Special time specification nickname.
type: str
choices: [ annually, daily, hourly, monthly, reboot, weekly, yearly ]
version_added: "1.3"
disabled:
description:
- If the job should be disabled (commented out) in the crontab.
- Only has effect if I(state=present).
type: bool
default: no
version_added: "2.0"
env:
description:
- If set, manages a crontab's environment variable.
- New variables are added on top of crontab.
- I(name) and I(value) parameters are the name and the value of environment variable.
type: bool
default: false
version_added: "2.1"
insertafter:
description:
- Used with I(state=present) and I(env).
- If specified, the environment variable will be inserted after the declaration of specified environment variable.
type: str
version_added: "2.1"
insertbefore:
description:
- Used with I(state=present) and I(env).
- If specified, the environment variable will be inserted before the declaration of specified environment variable.
type: str
version_added: "2.1"
requirements:
- cron (or cronie on CentOS)
author:
- Dane Summers (@dsummersl)
- Mike Grozak (@rhaido)
- Patrick Callahan (@dirtyharrycallahan)
- Evan Kaufman (@EvanK)
- Luca Berruti (@lberruti)
notes:
- Supports C(check_mode).
'''
EXAMPLES = r'''
- name: Ensure a job that runs at 2 and 5 exists. Creates an entry like "0 5,2 * * ls -alh > /dev/null"
ansible.builtin.cron:
name: "check dirs"
minute: "0"
hour: "5,2"
job: "ls -alh > /dev/null"
- name: 'Ensure an old job is no longer present. Removes any job that is prefixed by "#Ansible: an old job" from the crontab'
ansible.builtin.cron:
name: "an old job"
state: absent
- name: Creates an entry like "@reboot /some/job.sh"
ansible.builtin.cron:
name: "a job for reboot"
special_time: reboot
job: "/some/job.sh"
- name: Creates an entry like "PATH=/opt/bin" on top of crontab
ansible.builtin.cron:
name: PATH
env: yes
job: /opt/bin
- name: Creates an entry like "APP_HOME=/srv/app" and insert it after PATH declaration
ansible.builtin.cron:
name: APP_HOME
env: yes
job: /srv/app
insertafter: PATH
- name: Creates a cron file under /etc/cron.d
ansible.builtin.cron:
name: yum autoupdate
weekday: "2"
minute: "0"
hour: "12"
user: root
job: "YUMINTERACTIVE=0 /usr/sbin/yum-autoupdate"
cron_file: ansible_yum-autoupdate
- name: Removes a cron file from under /etc/cron.d
ansible.builtin.cron:
name: "yum autoupdate"
cron_file: ansible_yum-autoupdate
state: absent
- name: Removes "APP_HOME" environment variable from crontab
ansible.builtin.cron:
name: APP_HOME
env: yes
state: absent
'''
RETURN = r'''#'''
import os
import platform
import pwd
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_bytes, to_native
from ansible.module_utils.six.moves import shlex_quote
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to current user)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
self.n_existing = ''
self.cron_cmd = self.module.get_bin_path('crontab', required=True)
if cron_file:
if os.path.isabs(cron_file):
self.cron_file = cron_file
self.b_cron_file = to_bytes(cron_file, errors='surrogate_or_strict')
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
self.b_cron_file = os.path.join(b'/etc/cron.d', to_bytes(cron_file, errors='surrogate_or_strict'))
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.b_cron_file, 'rb')
self.n_existing = to_native(f.read(), errors='surrogate_or_strict')
self.lines = self.n_existing.splitlines()
f.close()
except IOError:
# cron file does not exist
return
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
self.n_existing = out
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match(r'# \(/tmp/.*installed on.*\)', l) and
not re.match(r'# \(.*version.*\)', l)):
self.lines.append(l)
else:
pattern = re.escape(l) + '[\r\n]?'
self.n_existing = re.sub(pattern, '', self.n_existing, 1)
count += 1
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'wb')
elif self.cron_file:
fileh = open(self.b_cron_file, 'wb')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
os.chmod(path, int('0644', 8))
fileh = os.fdopen(filed, 'wb')
fileh.write(to_bytes(self.render()))
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
self.module.set_default_selinux_context(self.cron_file, False)
def do_comment(self, name):
return "%s%s" % (self.ansible, name)
def add_job(self, name, job):
# Add the comment
self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def add_env(self, decl, insertafter=None, insertbefore=None):
if not (insertafter or insertbefore):
self.lines.insert(0, decl)
return
if insertafter:
other_name = insertafter
elif insertbefore:
other_name = insertbefore
other_decl = self.find_env(other_name)
if len(other_decl) > 0:
if insertafter:
index = other_decl[0] + 1
elif insertbefore:
index = other_decl[0]
self.lines.insert(index, decl)
return
self.module.fail_json(msg="Variable named '%s' not found." % other_name)
def update_env(self, name, decl):
return self._update_env(name, decl, self.do_add_env)
def do_add_env(self, lines, decl):
lines.append(decl)
def remove_env(self, name):
return self._update_env(name, '', self.do_remove_env)
def do_remove_env(self, lines, decl):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except Exception:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name, job=None):
# attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match(r'%s' % self.ansible, l):
comment = re.sub(r'%s' % self.ansible, '', l)
# failing that, attempt to find job by exact match
if job:
for i, l in enumerate(self.lines):
if l == job:
# if no leading ansible header, insert one
if not re.match(r'%s' % self.ansible, self.lines[i - 1]):
self.lines.insert(i, self.do_comment(name))
return [self.lines[i], l, True]
# if a leading blank ansible header AND job has a name, update header
elif name and self.lines[i - 1] == self.do_comment(None):
self.lines[i - 1] = self.do_comment(name)
return [self.lines[i - 1], l, True]
return []
def find_env(self, name):
for index, l in enumerate(self.lines):
if re.match(r'^%s=' % name, l):
return [index, l]
return []
def get_cron_job(self, minute, hour, day, month, weekday, job, special, disabled):
# normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
job = job.strip('\r\n')
if disabled:
disable_prefix = '#'
else:
disable_prefix = ''
if special:
if self.cron_file:
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
else:
return "%s@%s %s" % (disable_prefix, special, job)
else:
if self.cron_file:
return "%s%s %s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, self.user, job)
else:
return "%s%s %s %s %s %s %s" % (disable_prefix, minute, hour, day, month, weekday, job)
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match(r'%s' % self.ansible, l):
jobnames.append(re.sub(r'%s' % self.ansible, '', l))
return jobnames
def get_envnames(self):
envnames = []
for l in self.lines:
if re.match(r'^\S+=', l):
envnames.append(l.split('=')[0])
return envnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = self.do_comment(name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def _update_env(self, name, decl, addenvfunction):
newlines = []
for l in self.lines:
if re.match(r'^%s=' % name, l):
addenvfunction(newlines, decl)
else:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result:
result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
elif platform.system() == 'AIX':
return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (
shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % shlex_quote(self.user)
return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# - name: sets env
# cron: name="PATH" env=yes value="/bin:/usr/bin"
#
# Would produce:
# PATH=/bin:/usr/bin
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str'),
user=dict(type='str'),
job=dict(type='str', aliases=['value']),
cron_file=dict(type='str'),
state=dict(type='str', default='present', choices=['present', 'absent']),
backup=dict(type='bool', default=False),
minute=dict(type='str', default='*'),
hour=dict(type='str', default='*'),
day=dict(type='str', default='*', aliases=['dom']),
month=dict(type='str', default='*'),
weekday=dict(type='str', default='*', aliases=['dow']),
reboot=dict(type='bool', default=False),
special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
disabled=dict(type='bool', default=False),
env=dict(type='bool', default=False),
insertafter=dict(type='str'),
insertbefore=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['reboot', 'special_time'],
['insertafter', 'insertbefore'],
],
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
disabled = module.params['disabled']
env = module.params['env']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
do_install = state == 'present'
changed = False
res_args = dict()
warnings = list()
if cron_file:
cron_file_basename = os.path.basename(cron_file)
if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
' solely of upper- and lower-case letters, digits, underscores, and hyphens')
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
crontab = CronTab(module, user, cron_file)
module.debug('cron instantiated - name: "%s"' % name)
if not name:
module.deprecate(
msg="The 'name' parameter will be required in future releases.",
version='2.12', collection_name='ansible.builtin'
)
if reboot:
module.deprecate(
msg="The 'reboot' parameter will be removed in future releases. Use 'special_time' option instead.",
version='2.12', collection_name='ansible.builtin'
)
if module._diff:
diff = dict()
diff['before'] = crontab.n_existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
if crontab.user:
diff['before_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['before_header'] = 'crontab'
# --- user input validation ---
if env and not name:
module.fail_json(msg="You must specify 'name' while working with environment variables (env=yes)")
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
# cannot support special_time on solaris
if (special_time or reboot) and platform.system() == 'SunOS':
module.fail_json(msg="Solaris does not support special_time=... or @reboot")
if cron_file and do_install:
if not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if job is None and do_install:
module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
if (insertafter or insertbefore) and not env and do_install:
module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
if reboot:
special_time = "reboot"
# if requested make a backup before making a change
if backup and not module.check_mode:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not do_install:
if module._diff:
diff['after'] = ''
diff['after_header'] = '/dev/null'
else:
diff = dict()
if module.check_mode:
changed = os.path.isfile(crontab.cron_file)
else:
changed = crontab.remove_job_file()
module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
decl = '%s="%s"' % (name, job)
old_decl = crontab.find_env(name)
if do_install:
if len(old_decl) == 0:
crontab.add_env(decl, insertafter, insertbefore)
changed = True
if len(old_decl) > 0 and old_decl[1] != decl:
crontab.update_env(name, decl)
changed = True
else:
if len(old_decl) > 0:
crontab.remove_env(name)
changed = True
else:
if do_install:
for char in ['\r', '\n']:
if char in job.strip('\r\n'):
warnings.append('Job should not contain line breaks')
break
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
old_job = crontab.find_job(name, job)
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
if len(old_job) > 2:
crontab.update_job(name, job)
changed = True
else:
old_job = crontab.find_job(name)
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and crontab.n_existing != '':
if not (crontab.n_existing.endswith('\r') or crontab.n_existing.endswith('\n')):
changed = True
res_args = dict(
jobs=crontab.get_jobnames(),
envs=crontab.get_envnames(),
warnings=warnings,
changed=changed
)
if changed:
if not module.check_mode:
crontab.write()
if module._diff:
diff['after'] = crontab.render()
if crontab.cron_file:
diff['after_header'] = crontab.cron_file
else:
if crontab.user:
diff['after_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['after_header'] = 'crontab'
res_args['diff'] = diff
# retain the backup only if crontab or cron file have changed
if backup and not module.check_mode:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
if __name__ == '__main__':
main()
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
96eb58da2807780f7f78eb49453cd03e2e4a57bb
|
33f30925224a7db3e3bf6948c6c569ad850e9c76
|
/Server/bin/rst2xml.py
|
6a7fab179644d60c2959331900cdea30a7350337
|
[] |
no_license
|
duelle/CTT
|
2bc64fffaf4b2eb3976fedd7aea231a51da8fbe9
|
e2da2ab9c599833cc8409728b456a9e37825986b
|
refs/heads/master
| 2022-04-06T15:25:06.747919
| 2020-02-19T14:04:37
| 2020-02-19T14:04:37
| 237,939,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 642
|
py
|
#!/home/duelle/Repositories/git/RadonCTT/Server/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
|
[
"duellmann@iste.uni-stuttgart.de"
] |
duellmann@iste.uni-stuttgart.de
|
d5408abdee9094c62381748340a424756eef3c8c
|
9d61daee8ec86d1c3b85ab577c4d0ffc5c4c4a7c
|
/code kata/summm.py
|
d8927006714e70f2f8448e2ce4032b3d9075ff48
|
[] |
no_license
|
Bhuvaneswaribai/guvi
|
ec3d2a922059859c778b78920d52936a44edbca8
|
ab6bb1193af49dbc431d5eb7ae19050d11aa622c
|
refs/heads/master
| 2020-06-03T00:11:14.636796
| 2019-07-04T11:30:00
| 2019-07-04T11:30:00
| 191,355,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
nuumber=int(input())
a=list(map(int,input().split()))
sum=0
for i in a:
sum+=i
print(sum)
|
[
"noreply@github.com"
] |
noreply@github.com
|
55c5e4126f52501d3ab1f9cd4f9c49c47dc30d18
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp/ZXR10-MACPING-MIB.py
|
805cbd59b0fb3a90dcafa3b37ef03e6abdf405d0
|
[
"Apache-2.0"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395
| 2019-08-16T15:51:41
| 2019-08-16T15:53:57
| 237,512,469
| 0
| 0
|
Apache-2.0
| 2020-01-31T20:41:36
| 2020-01-31T20:41:35
| null |
UTF-8
|
Python
| false
| false
| 12,798
|
py
|
#
# PySNMP MIB module ZXR10-MACPING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ZXR10-MACPING-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:42:08 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
iso, Bits, ModuleIdentity, Gauge32, Unsigned32, enterprises, IpAddress, Counter32, experimental, ObjectIdentity, MibIdentifier, NotificationType, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, mgmt, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "Bits", "ModuleIdentity", "Gauge32", "Unsigned32", "enterprises", "IpAddress", "Counter32", "experimental", "ObjectIdentity", "MibIdentifier", "NotificationType", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "mgmt", "Counter64")
TruthValue, DisplayString, RowStatus, MacAddress, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "RowStatus", "MacAddress", "TextualConvention")
zxr10L2vpn, = mibBuilder.importSymbols("ZXR10-SMI", "zxr10L2vpn")
zxr10MacPingMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4))
class DisplayString(OctetString):
pass
class OptionType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("ce", 0), ("pe", 1))
zxr10MacPingTable = MibTable((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1), )
if mibBuilder.loadTexts: zxr10MacPingTable.setStatus('current')
zxr10MacPingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1), ).setIndexNames((0, "ZXR10-MACPING-MIB", "zxr10PingMacSerial"))
if mibBuilder.loadTexts: zxr10MacPingEntry.setStatus('current')
zxr10PingMacSerial = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacSerial.setStatus('current')
zxr10PingMacDestMac = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 2), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacDestMac.setStatus('current')
zxr10PingMacControlOutEtherIf = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacControlOutEtherIf.setStatus('current')
zxr10PingMacIfOption = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("none", 0), ("option", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacIfOption.setStatus('current')
zxr10PingMacPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacPacketCount.setStatus('current')
zxr10PingMacTimeOut = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 60))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacTimeOut.setStatus('current')
zxr10PingMacHops = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacHops.setStatus('current')
zxr10PingMacControlResultType = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("summary", 0), ("detail", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacControlResultType.setStatus('current')
zxr10PingMacTrapOncompletion = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 9), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacTrapOncompletion.setStatus('current')
zxr10PingMacRosStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("not-active", 1), ("start-ping", 2), ("ping-processing", 3), ("ping-completed", 4))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacRosStatus.setStatus('current')
zxr10PingMacEntryOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 11), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacEntryOwner.setStatus('current')
zxr10PingMacIfPeOption = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 12), OptionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacIfPeOption.setStatus('current')
zxr10PingMacVfiName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 13), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacVfiName.setStatus('current')
zxr10PingMacPeerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 1, 1, 14), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: zxr10PingMacPeerAddress.setStatus('current')
zxr10PingMacResultTable = MibTable((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2), )
if mibBuilder.loadTexts: zxr10PingMacResultTable.setStatus('current')
zxr10pingMacResultEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1), ).setIndexNames((0, "ZXR10-MACPING-MIB", "zxr10PingMacResultSerial"))
if mibBuilder.loadTexts: zxr10pingMacResultEntry.setStatus('current')
zxr10PingMacResultSerial = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultSerial.setStatus('current')
zxr10PingMacResultSentPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultSentPkts.setStatus('current')
zxr10PingMacResultRcvPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRcvPkts.setStatus('current')
zxr10PingMacResultRoundTripMinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripMinTime.setStatus('current')
zxr10PingMacResultRoundTripMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripMaxTime.setStatus('current')
zxr10PingMacResultRoundTripAvgTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundTripAvgTime.setStatus('current')
zxr10PingMacResultType = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("summary", 0), ("detail", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultType.setStatus('current')
zxr10PingMacExtResultDestIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultDestIfName.setStatus('current')
zxr10PingMacExtResultDestHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultDestHostName.setStatus('current')
zxr10PingMacExtResultSourceIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultSourceIfName.setStatus('current')
zxr10PingMacExtResultSourceHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultSourceHostName.setStatus('current')
zxr10PingMacExtResultOutVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultOutVlanId.setStatus('current')
zxr10PingMacExtResultInVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacExtResultInVlanId.setStatus('current')
zxr10PingMacResultEntryOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultEntryOwner.setStatus('current')
zxr10PingMacResultRoundWobbleMinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleMinTime.setStatus('current')
zxr10PingMacResultRoundWobbleMaxTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleMaxTime.setStatus('current')
zxr10PingMacResultRoundWobbleAvgTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 2, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: zxr10PingMacResultRoundWobbleAvgTime.setStatus('current')
macpingNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 3))
macpingTrapResult = NotificationType((1, 3, 6, 1, 4, 1, 3902, 3, 104, 4, 3, 1)).setObjects(("ZXR10-MACPING-MIB", "zxr10PingMacResultSerial"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultSentPkts"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRcvPkts"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripMinTime"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripMaxTime"), ("ZXR10-MACPING-MIB", "zxr10PingMacResultRoundTripAvgTime"))
if mibBuilder.loadTexts: macpingTrapResult.setStatus('current')
mibBuilder.exportSymbols("ZXR10-MACPING-MIB", zxr10PingMacResultRoundTripAvgTime=zxr10PingMacResultRoundTripAvgTime, zxr10MacPingMIB=zxr10MacPingMIB, zxr10PingMacPeerAddress=zxr10PingMacPeerAddress, zxr10PingMacTimeOut=zxr10PingMacTimeOut, macpingNotifications=macpingNotifications, zxr10PingMacEntryOwner=zxr10PingMacEntryOwner, zxr10PingMacRosStatus=zxr10PingMacRosStatus, zxr10PingMacIfOption=zxr10PingMacIfOption, zxr10PingMacResultRoundWobbleAvgTime=zxr10PingMacResultRoundWobbleAvgTime, zxr10PingMacResultTable=zxr10PingMacResultTable, OptionType=OptionType, zxr10MacPingTable=zxr10MacPingTable, zxr10PingMacPacketCount=zxr10PingMacPacketCount, zxr10PingMacResultRcvPkts=zxr10PingMacResultRcvPkts, zxr10PingMacSerial=zxr10PingMacSerial, zxr10pingMacResultEntry=zxr10pingMacResultEntry, zxr10PingMacResultRoundWobbleMinTime=zxr10PingMacResultRoundWobbleMinTime, zxr10PingMacResultRoundTripMinTime=zxr10PingMacResultRoundTripMinTime, zxr10MacPingEntry=zxr10MacPingEntry, zxr10PingMacHops=zxr10PingMacHops, zxr10PingMacIfPeOption=zxr10PingMacIfPeOption, zxr10PingMacResultSerial=zxr10PingMacResultSerial, DisplayString=DisplayString, zxr10PingMacExtResultSourceHostName=zxr10PingMacExtResultSourceHostName, zxr10PingMacResultEntryOwner=zxr10PingMacResultEntryOwner, zxr10PingMacControlOutEtherIf=zxr10PingMacControlOutEtherIf, zxr10PingMacResultSentPkts=zxr10PingMacResultSentPkts, zxr10PingMacResultType=zxr10PingMacResultType, zxr10PingMacResultRoundWobbleMaxTime=zxr10PingMacResultRoundWobbleMaxTime, zxr10PingMacResultRoundTripMaxTime=zxr10PingMacResultRoundTripMaxTime, zxr10PingMacExtResultDestIfName=zxr10PingMacExtResultDestIfName, zxr10PingMacExtResultDestHostName=zxr10PingMacExtResultDestHostName, macpingTrapResult=macpingTrapResult, zxr10PingMacVfiName=zxr10PingMacVfiName, zxr10PingMacExtResultOutVlanId=zxr10PingMacExtResultOutVlanId, zxr10PingMacExtResultSourceIfName=zxr10PingMacExtResultSourceIfName, zxr10PingMacControlResultType=zxr10PingMacControlResultType, zxr10PingMacExtResultInVlanId=zxr10PingMacExtResultInVlanId, zxr10PingMacDestMac=zxr10PingMacDestMac, zxr10PingMacTrapOncompletion=zxr10PingMacTrapOncompletion)
|
[
"dcwangmit01@gmail.com"
] |
dcwangmit01@gmail.com
|
bcded7ca3347b631cb06ccb49aa49c5ef2291909
|
6cb18c62758bfbf783d3fabe851d1c4d9f323483
|
/setup.py
|
9319f44e05f51de89cc40224949e07be98a9e018
|
[
"MIT"
] |
permissive
|
bruinxiong/performer-pytorch
|
68e505ff5e59d35e339b23661feef377795fd2df
|
c368b5e4efd46f72e2abaa655dc813021f911014
|
refs/heads/main
| 2023-01-04T02:25:42.898296
| 2020-10-26T22:41:09
| 2020-10-26T22:41:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
from setuptools import setup, find_packages
setup(
name = 'performer-pytorch',
packages = find_packages(exclude=['examples']),
version = '0.1.4',
license='MIT',
description = 'Performer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/performer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'efficient attention',
'transformers'
],
install_requires=[
'pytorch-fast-transformers>=0.3.0',
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
[
"lucidrains@gmail.com"
] |
lucidrains@gmail.com
|
1697ff12097d074fe9a08b7e8cfbf1ecd1348016
|
cca89a7bbe2da907a38eb00e9a083f57597273f0
|
/162. 寻找峰值/pythonCode.py
|
ecfc5d414241c3d0b4d2b4aac3531e9ced628696
|
[] |
no_license
|
xerprobe/LeetCodeAnswer
|
cc87941ef2a25c6aa1366e7a64480dbd72750670
|
ea1822870f15bdb1a828a63569368b7cd10c6ab8
|
refs/heads/master
| 2022-09-23T09:15:42.628793
| 2020-06-06T16:29:59
| 2020-06-06T16:29:59
| 270,215,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
def binarySearch(l:int,r:int) -> int:
if(l == r): return l
mid = (l + r) // 2
if(nums[mid] > nums[mid + 1]):
return binarySearch(l,mid)
else:
return binarySearch(mid+1,r)
return binarySearch(0,len(nums)-1)
# 峰值元素是指其值大于左右相邻值的元素。
# 给定一个输入数组 nums,其中 nums[i] ≠ nums[i+1],找到峰值元素并返回其索引。
# 数组可能包含多个峰值,在这种情况下,返回任何一个峰值所在位置即可。
# 你可以假设 nums[-1] = nums[n] = -∞。
# 示例 1:
# 输入: nums = [1,2,3,1]
# 输出: 2
# 解释: 3 是峰值元素,你的函数应该返回其索引 2。
# 示例 2:
# 输入: nums = [1,2,1,3,5,6,4]
# 输出: 1 或 5
# 解释: 你的函数可以返回索引 1,其峰值元素为 2;
# 或者返回索引 5, 其峰值元素为 6。
# 说明:
# 你的解法应该是 O(logN) 时间复杂度的。
# 链接:https://leetcode-cn.com/problems/find-peak-element/
|
[
"changwenhao1@qq.com"
] |
changwenhao1@qq.com
|
68caed12611a8b789a1964a22fb49575eca70c7f
|
76d388b5d2e74ff0eda748c7868fadf0704cf700
|
/tensorpack/utils/develop.py
|
496de1dd245db766c3e4ba256ddb638d5e621b48
|
[
"Apache-2.0"
] |
permissive
|
jooyounghun/tensorpack
|
eebf0867e5a82ffd52660dccfbd34879b8d0f5af
|
90cdae380c40a1e91f627520c4a739bd6ee3f18b
|
refs/heads/master
| 2020-03-23T23:24:41.651089
| 2018-07-27T02:57:19
| 2018-07-27T02:57:19
| 142,232,523
| 1
| 0
|
Apache-2.0
| 2018-07-25T01:45:06
| 2018-07-25T01:45:05
| null |
UTF-8
|
Python
| false
| false
| 4,773
|
py
|
# -*- coding: utf-8 -*-
# File: develop.py
# Author: tensorpack contributors
""" Utilities for developers only.
These are not visible to users (not automatically imported). And should not
appeared in docs."""
import os
import functools
from datetime import datetime
import importlib
import types
import six
from . import logger
def create_dummy_class(klass, dependency):
"""
When a dependency of a class is not available, create a dummy class which throws ImportError when used.
Args:
klass (str): name of the class.
dependency (str): name of the dependency.
Returns:
class: a class object
"""
class _DummyMetaClass(type):
# throw error on class attribute access
def __getattr__(_, __):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
@six.add_metaclass(_DummyMetaClass)
class _Dummy(object):
# throw error on constructor
def __init__(self, *args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
return _Dummy
def create_dummy_func(func, dependency):
"""
When a dependency of a function is not available, create a dummy function which throws ImportError when used.
Args:
func (str): name of the function.
dependency (str or list[str]): name(s) of the dependency.
Returns:
function: a function object
"""
if isinstance(dependency, (list, tuple)):
dependency = ','.join(dependency)
def _dummy(*args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, func))
return _dummy
def building_rtfd():
"""
Returns:
bool: if tensorpack is being imported to generate docs now.
"""
return os.environ.get('READTHEDOCS') == 'True' \
or os.environ.get('DOC_BUILDING')
def log_deprecated(name="", text="", eos=""):
"""
Log deprecation warning.
Args:
name (str): name of the deprecated item.
text (str, optional): information about the deprecation.
eos (str, optional): end of service date such as "YYYY-MM-DD".
"""
assert name or text
if eos:
eos = "after " + datetime(*map(int, eos.split("-"))).strftime("%d %b")
if name:
if eos:
warn_msg = "%s will be deprecated %s. %s" % (name, eos, text)
else:
warn_msg = "%s was deprecated. %s" % (name, text)
else:
warn_msg = text
if eos:
warn_msg += " Legacy period ends %s" % eos
logger.warn("[Deprecated] " + warn_msg)
def deprecated(text="", eos=""):
"""
Args:
text, eos: same as :func:`log_deprecated`.
Returns:
a decorator which deprecates the function.
Example:
.. code-block:: python
@deprecated("Explanation of what to do instead.", "2017-11-4")
def foo(...):
pass
"""
def get_location():
import inspect
frame = inspect.currentframe()
if frame:
callstack = inspect.getouterframes(frame)[-1]
return '%s:%i' % (callstack[1], callstack[2])
else:
stack = inspect.stack(0)
entry = stack[2]
return '%s:%i' % (entry[1], entry[2])
def deprecated_inner(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
name = "{} [{}]".format(func.__name__, get_location())
log_deprecated(name, text, eos)
return func(*args, **kwargs)
return new_func
return deprecated_inner
def HIDE_DOC(func):
func.__HIDE_SPHINX_DOC__ = True
return func
# Copied from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/util/lazy_loader.py
class LazyLoader(types.ModuleType):
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
7ef2579880b9b7ec614ed66ecd323b2e3604e749
|
6eaca1b3ada96264bdad964652c19365f982025a
|
/QPainter/__init__.py
|
0a9a28d278c61ebd50c91b5166dc7748582e2115
|
[] |
no_license
|
RahulARanger/My_Qt-Py_Book
|
4c7e4dfc9a1d1ec8a587d3bbb722fc64f6de1008
|
396280e9110d11c9c297bf83f332411b98c98453
|
refs/heads/master
| 2023-08-15T01:42:33.415854
| 2021-10-01T19:44:50
| 2021-10-01T19:44:50
| 320,230,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
import RashSetup.__RashModules__.Rash.ApplicationManager
from .MemeGen import *
class UTIL(TabWindow):
def __init__(self, shared: dict):
Rash: RashSetup.__RashModules__.Rash.ApplicationManager.RashMain = shared["RASH"]
super().__init__(Rash)
self.Generator = MemeGenerator(self)
self.easeAdd(self.Generator, "SpongeBob")
|
[
"saihanumarahul66@gmail.com"
] |
saihanumarahul66@gmail.com
|
98d80763957c0adf4a839f4d123400647c1b2d7f
|
950fd350aba8c7584b8f362b2e5079b5010a1f6a
|
/lib/Sockets.py
|
aeb577b91be8e75da756909611e728e080dff370
|
[] |
no_license
|
entr0pist/fakeircd
|
96814755b0b2041bc14db8f942680c47f5ea56b0
|
43a88be91aa6337e1eacaeadaa20dcdb2bccd3a2
|
refs/heads/master
| 2020-06-07T10:34:36.562878
| 2015-11-10T04:02:38
| 2015-11-10T04:02:38
| 42,418,758
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,121
|
py
|
from lib import config
from lib import linechat
from lib.User import User
class Sockets:
def __init__(self):
self.server = linechat.Serve()
def add_sock(self, sock):
self.server.add_sock(sock)
def rm_sock(self, sock):
self.server.rm_sock(sock)
def serve(self):
self.server.serve()
def spawn_all(self):
for server in config.get(None, 'listen'):
if self.server.sock_by_address(server['bind_address'], server['bind_port']):
continue
ssl = False
if 'ssl' in server:
ssl = server['ssl']
s = linechat.Server(User, port=server['bind_port'],
hostname=server['bind_address'], ssl=ssl)
self.server.add_sock(s)
for server in self.server.socks:
try:
sock = server.sock.getsockname()
except:
return
if not config.get_listen_by_host_port(sock):
self.server.rm_sock_by_address(*sock)
def shutdown_all(self):
self.server.close_all()
sockets = Sockets()
|
[
"entr0pist@users.noreply.github.com"
] |
entr0pist@users.noreply.github.com
|
a4c71809c35378bb39dbbce97d55d2a122ab4dcd
|
f51c6d0cebb27c377ce9830deec4b727b9b2ee90
|
/AI/05_tictactoe/02grid_plot.py
|
b2fb6cbc7f65ddac4fc048c6664f6bdd82dfb227
|
[] |
no_license
|
dbbudd/Python-Experiments
|
1c3c1322583aaaf2016a2f2f3061e6d034c5d1c8
|
b6d294bf11a5c92b8578d16aa2f63cc27fc47b07
|
refs/heads/master
| 2020-04-17T02:21:36.693593
| 2019-01-17T00:18:34
| 2019-01-17T00:18:34
| 166,130,283
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
#!/usr/bin/env python
import numpy as np
import itertools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
class gameboard(object):
def __init__(self):
#player 1 puts a "X", player 2 puts a "O"
self.g = [[1,0,1],[0,0,2],[0,2,0]]
self.grid = np.array(self.g)
print(self.grid)
def drawGrid(self):
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(0,3), ylim = (0,3))
self.myCells = [(0,0),(0,1),(0,2),(1,0),(1,1),(1,2),(2,0),(2,1),(2,2)]
for i in self.myCells:
if self.grid[i] == 1:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="red")
ax.add_patch(cell)
elif self.grid[i] == 2:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="blue")
ax.add_patch(cell)
else:
cell = mpatches.Rectangle((i), 1, 1, alpha=1, facecolor="none")
ax.add_patch(cell)
plt.show()
board = gameboard()
board.drawGrid()
|
[
"dbbudd@gmail.com"
] |
dbbudd@gmail.com
|
311ba855cf35a4765fce0410377fb7f5eb4aa8a4
|
c56448aa3553d1a5ab71099e741fa71c15d539cb
|
/stations/urls.py
|
817356c4760a4af8560f60d4abb533fc1d2a9d3e
|
[] |
no_license
|
Jack11709/django-underground
|
8591cba5fbcd9e2202fbaefa1a95057d4258477d
|
60b868ce5dcb5001761c5207cfd764474ec8f19a
|
refs/heads/master
| 2022-06-04T04:11:14.667519
| 2019-10-31T09:50:46
| 2019-10-31T09:50:46
| 218,318,167
| 0
| 0
| null | 2022-05-25T03:24:00
| 2019-10-29T15:19:03
|
Python
|
UTF-8
|
Python
| false
| false
| 588
|
py
|
from django.urls import path
from .views import StationList, StationDetail, ZoneList, ZoneDetail, LineList, LineDetail # import our DRF views
urlpatterns = [
path('stations', StationList.as_view(), name='stations-list'),
path('stations/<int:pk>/', StationDetail.as_view(), name='stations-detail'),
path('zones', ZoneList.as_view()),
path('zones/<int:pk>/', ZoneDetail.as_view()),
path('lines', LineList.as_view()),
path('lines/<int:pk>/', LineDetail.as_view())
] # registering all our urls for this project, the route url for this project is in /project/urls.py
|
[
"jack.may@generalassemb.ly"
] |
jack.may@generalassemb.ly
|
aafbc6488301d7e48ce363affc42a6a4fdd24a02
|
5fa4b8a36eec770bd740b6016030d2843cac8329
|
/trial_scripts/do_multiprocessing.py
|
e3269fc1eac7ab4e43440377e0b0e23ed103b1c8
|
[] |
no_license
|
sysang/word-prepresentation-training
|
79ffe4355b2f66dfd7c09625cc430dd65815c937
|
79565d8f69c31f4938f079517db7ff7c53ec54aa
|
refs/heads/master
| 2022-12-22T10:22:52.649259
| 2020-10-03T17:04:08
| 2020-10-03T17:04:08
| 293,590,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
from multiprocessing import Process
from multiprocessing.sharedctypes import RawValue
import ctypes
def f(n):
n.value = 'hello!!'
if __name__ == '__main__':
num = RawValue(ctypes.c_wchar_p, 'abc')
p = Process(target=f, args=(num,))
p.start()
p.join()
print(num.value)
|
[
"daosysang@gmail.com"
] |
daosysang@gmail.com
|
c514c9650b93f135aac41cc8d73c464420d4b318
|
f7e1ada65e270fe2961df46179798ba522949e5c
|
/main1.py
|
37d1622998e259cd937474a1130d59c95377e6c3
|
[] |
no_license
|
fabian6768/WebsiteManager
|
36fad06af38298f25592fd2680837c6a1eb6a9b9
|
d10148e83e5533bbb3ece9018fd75db33a036138
|
refs/heads/master
| 2021-01-12T08:29:55.355610
| 2016-12-15T21:49:31
| 2016-12-15T21:49:31
| 76,597,511
| 0
| 1
| null | 2020-10-01T11:34:14
| 2016-12-15T21:28:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,578
|
py
|
#This Is A Program
from csv import *
from tkinter import *
from tkinter import messagebox
import webbrowser as wb
a=1
class Second(object):
def __init__(self):
self.t = Tk()
self.t.title("Website Library")
self.t.geometry("500x350")
self.t.configure(background="#ddaf7e")
self.book = []
self.urls = []
self.button = []
self.i = 0
self.j = 0
with open("website.csv", newline="") as csv:
self.csvf = reader(csv)
for row in self.csvf:
self.book.append(row[0])
self.urls.append(row[1])
for name in self.book:
self.button.append(Button(self.t, text=name, font="Verdana 15", width=16))
self.button[self.i].pack(pady=2)
self.i += 1
self.i = 0
for url in self.urls:
self.button[self.i].configure(command=lambda url=url: self.openwww(url))
self.i += 1
self.t.mainloop()
def openwww(self, url):
wb.open(url)
class Third(object):
def __init__(self):
self.t = Tk()
self.t.title("Website Library")
self.t.geometry("500x250")
self.t.configure(background="#ddaf7e")
self.first = Label(self.t, text="Name Of BookMark and second text box URL Of bookmark", font="Calibri 15", bg="#ddaf7e")
self.name = Label(self.t, text="Name :", font="Calibri 15", bg="#ddaf7e")
self.url = Label(self.t, text="URL :", font="Calibri 15", bg="#ddaf7e")
self.entry1 = Entry(self.t)
self.entry2 = Entry(self.t)
self.first.grid(row=0, columnspan=2)
self.name.grid(row=1, column=0, sticky=E)
self.url.grid(row=2, column=0, sticky=E)
self.entry1.grid(row=1, column=1, sticky=W)
self.entry2.grid(row=2, column=1, sticky=W)
self.getitall = Button(self.t, text="Get It All", font="Calibri 12", command=lambda: self.getit())
self.getitall.grid(row=3, column=1, sticky=W, padx=20)
self.t.mainloop()
def getit(self):
with open("website.csv", "a", newline="") as csv:
w = writer(csv)
w.writerow([self.entry1.get(), self.entry2.get()])
self.entry1.delete(0, END)
self.entry2.delete(0, END)
class WebsiteManager(object):
def __init__(self):
"""Creating The First Window That Holds Buttons"""
self.r = Tk()
self.r.title("Website Library 123")
self.r.geometry("500x250")
self.r.configure(background="#ddaf7e")
'''Configuring So that the First Window holds buttons'''
self.title = Label(self.r, text="Website Library", bg="#ddaf7e", font="Calibri 26").pack()
self.divider = Label(self.r, text=" "*100, bg="#ddaf7e").pack()
self.saved = Button(self.r, text="View Saved Websites", font="Verdana 15", command=lambda: self.newwind(1)).pack(pady=10)
self.addnew = Button(self.r, text="Add New Websites", font="Verdana 15", command=lambda: self.newwind(2)).pack(pady=10)
self.r.protocol("WM_DELETE_WINDOW", self.on_closing)
self.r.mainloop()
def on_closing(self):
global a
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.r.destroy()
a = 0
def newwind(self, option):
if option == 1:
self.r.destroy()
Second()
elif option == 2:
self.r.destroy()
Third()
def main():
while a == 1:
WebsiteManager()
if __name__ == "__main__":
main()
|
[
"fabian6768@yahoo.com"
] |
fabian6768@yahoo.com
|
941b70169ea0201bf4913ade211f0567886e5ca5
|
4c85452e12ad3d8ca08f91df21ff4c6812a9e3b7
|
/tests/invalid_boards.py
|
7ca7cb9830cd75f57154384786df9870880d65b6
|
[
"MIT"
] |
permissive
|
lesander/takuzu
|
452ad7b0b8abc76647b8542118c91be6e3cb8ee7
|
d0a913ce57a3234eaf17afd3c858f17c3f1e31e5
|
refs/heads/master
| 2022-07-05T17:01:48.117658
| 2020-05-21T23:00:25
| 2020-05-21T23:00:25
| 265,910,685
| 1
| 0
|
MIT
| 2022-06-22T02:06:48
| 2020-05-21T17:28:17
|
Python
|
UTF-8
|
Python
| false
| false
| 299
|
py
|
from takuzu import Takuzu
boards = [ [], [None], [1, 0, None], [ [], [] ], [ [1,0] ], [ [1,0], [1] ] ]
for b in boards:
try:
t = Takuzu(board=b, debug=True)
except AssertionError as e:
pass
else:
raise Exception('board={} should throw AssertionError'.format(b))
|
[
"lesander@users.noreply.github.com"
] |
lesander@users.noreply.github.com
|
21a7d146b5d95f1fee3c58b4e611dd502e854c74
|
83fb26fc9fe96c5821c7a13468f205ca6eb4fcda
|
/ICP exercise and assignment/A01/A01_exercise1.py
|
2662b1fc38669910f481aa07bc1481af8bf91817
|
[] |
no_license
|
zc2214/Introduction-to-Computer-Programming
|
e58355fc732a2eacf29aa5141573e64ef1c3f27e
|
95f5e36f102c5ebeeb628b61c3fdad416082ab4f
|
refs/heads/main
| 2023-08-11T23:44:54.766836
| 2021-09-22T14:45:54
| 2021-09-22T14:45:54
| 323,836,431
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
# PROGRAMMING ASSIGNMENT 01
# Filename: 'exercise1.py'
#
# Write a program that does the following (in the specified order):
# 1. asks the user to input his family name
# 2. asks the user to input his given name
# 3. then, prints the message Hello <given name> <family name> !!!
#
# WRITE YOUR CODE AFTER THIS LINE
firstname = input("Please enter your firstname")
lastname = input("Please enter your lastname")
print ("Hello",firstname,lastname )
|
[
"noreply@github.com"
] |
noreply@github.com
|
45b76c5185d0e6d5434ffd0717722d4e1b9aa0c1
|
c744b20f4d5f4035dd81bf515f6e969a67299309
|
/lists/migrations/0006_auto_20150825_1407.py
|
34243587e4fd8a04e03184790c2e99036ba5781f
|
[] |
no_license
|
jian-en/flyingjay-superlists-project
|
14c94e16658e6aef76019847423b6fd0ac01eebe
|
2c8ad9dfd26d68237b065797f3132872eb0cdaa5
|
refs/heads/master
| 2021-01-02T09:43:37.631559
| 2015-11-03T03:55:03
| 2015-11-03T03:55:03
| 40,744,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lists', '0005_auto_20150823_0227'),
]
operations = [
migrations.AlterField(
model_name='item',
name='text',
field=models.TextField(),
),
]
|
[
"fujian_en@126.com"
] |
fujian_en@126.com
|
d732b74a12857a9cfedd5615c35c20fd705c8355
|
b05e271e498ab231c8e6fd650826cb98a1887c5f
|
/main.py
|
59838bcf3d74bddadd669b317a56301dacea99a9
|
[
"MIT"
] |
permissive
|
tian409/joint-computation-offloading-and-resource-allocation
|
1074e6bee92303757561a0b6a6dfee8663584f3f
|
13e68b71c8e9ae7347a82294a355266c3ce28a81
|
refs/heads/master
| 2023-04-03T15:08:49.180165
| 2021-04-04T05:37:46
| 2021-04-04T05:37:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,472
|
py
|
# -*- coding: utf-8 -*-
import copy, json, argparse
import torch
from scenario import Scenario
from agent import Agent
from dotdic import DotDic
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def create_agents(opt, sce, scenario, device):
agents = [] # Vector of agents
for i in range(opt.nagents):
agents.append(Agent(opt, sce, scenario, index=i, device=device)) # Initialization, create a CNet for each agent
return agents
def run_episodes(opt, sce, agents, scenario):
global_step = 0
nepisode = 0
action = torch.zeros(opt.nagents,dtype=int)
reward = torch.zeros(opt.nagents)
QoS = torch.zeros(opt.nagents)
state_target = torch.ones(opt.nagents) # The QoS requirement
f= open("DDPG.csv","w+")
f.write("This includes the running steps:\n")
while nepisode < opt.nepisodes:
state = torch.zeros(opt.nagents) # Reset the state
next_state = torch.zeros(opt.nagents) # Reset the next_state
nstep = 0
while nstep < opt.nsteps:
eps_threshold = opt.eps_min + opt.eps_increment * nstep * (nepisode + 1)
if eps_threshold > opt.eps_max:
eps_threshold = opt.eps_max # Linear increasing epsilon
# eps_threshold = opt.eps_min + (opt.eps_max - opt.eps_min) * np.exp(-1. * nstep * (nepisode + 1)/opt.eps_decay)
# Exponential decay epsilon
for i in range(opt.nagents):
action[i] = agents[i].Select_Action(state, scenario, eps_threshold) # Select action
for i in range(opt.nagents):
QoS[i], reward[i] = agents[i].Get_Reward(action, action[i], state, scenario) # Obtain reward and next state
next_state[i] = QoS[i]
for i in range(opt.nagents):
agents[i].Save_Transition(state, action[i], next_state, reward[i], scenario) # Save the state transition
agents[i].Optimize_Model() # Train the model
if nstep % opt.nupdate == 0: # Update the target network for a period
agents[i].Target_Update()
state = copy.deepcopy(next_state) # State transits
if torch.all(state.eq(state_target)): # If QoS is satisified, break
break
nstep += 1
print('Episode Number:', nepisode, 'Training Step:', nstep)
# print('Final State:', state)
f.write("%i \n" % nstep)
nepisode += 1
f.close()
def run_trial(opt, sce):
scenario = Scenario(sce)
agents = create_agents(opt, sce, scenario, device) # Initialization
run_episodes(opt, sce, agents, scenario)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c1', '--config_path1', type=str, help='path to existing scenarios file')
parser.add_argument('-c2', '--config_path2', type=str, help='path to existing options file')
parser.add_argument('-n', '--ntrials', type=int, default=1, help='number of trials to run')
args = parser.parse_args()
sce = DotDic(json.loads(open(args.config_path1, 'r').read()))
opt = DotDic(json.loads(open(args.config_path2, 'r').read())) # Load the configuration file as arguments
for i in range(args.ntrials):
trial_result_path = None
trial_opt = copy.deepcopy(opt)
trial_sce = copy.deepcopy(sce)
run_trial(trial_opt, trial_sce)
|
[
"fenghao2018@bupt.edu.cn"
] |
fenghao2018@bupt.edu.cn
|
f716de44a80a10f01bfaa8b3a8d58b4ec092c945
|
dbe1f4110921a08cb13e22ea325d503bd5627195
|
/chuhuo_2.71/bluedon/monitor/sbin/checkproc.py
|
cd3521785adb14ce48baf65ec961b05655ab0e50
|
[] |
no_license
|
Hehouhua/waf_branches
|
92dc1b1cbecba20f24ef6c7372dde7caa43f9158
|
ca76f3a1ed8150b423474c9e37aee37841a5ee35
|
refs/heads/main
| 2023-01-07T11:33:31.667688
| 2020-11-03T06:58:33
| 2020-11-03T06:58:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
import os, re, sys
rexplogstart = re.compile(r'grep logstart.pl')
rexpwebvisit = re.compile(r'grep webvisit.pl')
def checklogstart():
if not os.path.exists("/usr/local/bdwaf/logs_bridge/data"):
os.popen("mkdir -p /usr/local/bdwaf/logs_bridge/data")
if not os.path.exists("/usr/local/bdwaf/logs_proxy/data"):
os.popen("mkdir -p /usr/local/bdwaf/logs_proxy/data")
flag = 0
pfp = os.popen('ps ax | grep logstart.pl')
lines = pfp.readlines()
for line in lines:
match = rexplogstart.search(line)
if match:
flag += 1
if flag >= len(lines):
os.system('/usr/local/bluedon/monitor/sbin/logstart.pl')
def checkwebvisit():
flag = 0
pfp = os.popen('ps ax | grep webvisit.pl')
lines = pfp.readlines()
for line in lines:
match = rexplogstart.search(line)
if match:
flag += 1
if flag >= len(lines):
os.system('/usr/local/bluedon/monitor/sbin/webvisit.pl')
if __name__ == '__main__':
checklogstart()
checkwebvisit()
|
[
"hanson_wong@qq.com"
] |
hanson_wong@qq.com
|
dc95cfc1d53773ef74245ed5c8a5b6bbbf3ce933
|
65e076e4fcc00a67faa0932b3f3a3d3a3a11e2aa
|
/sdk/python/pulumi_google_native/datastore/v1/_enums.py
|
15df09472641b2ebbeb23bd87aeab08fb357fbf9
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
TheJaySmith-Google/pulumi-google-native
|
816babe5c7316724e02d5b8b9d789df00262bb8e
|
566c295a39fe8c3dd16e4a7894ff6de72423e5da
|
refs/heads/master
| 2023-06-05T06:45:19.979837
| 2021-06-23T11:42:27
| 2021-06-23T11:42:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'GoogleDatastoreAdminV1IndexedPropertyDirection',
'IndexAncestor',
]
class GoogleDatastoreAdminV1IndexedPropertyDirection(str, Enum):
"""
Required. The indexed property's direction. Must not be DIRECTION_UNSPECIFIED.
"""
DIRECTION_UNSPECIFIED = "DIRECTION_UNSPECIFIED"
ASCENDING = "ASCENDING"
DESCENDING = "DESCENDING"
class IndexAncestor(str, Enum):
"""
Required. The index's ancestor mode. Must not be ANCESTOR_MODE_UNSPECIFIED.
"""
ANCESTOR_MODE_UNSPECIFIED = "ANCESTOR_MODE_UNSPECIFIED"
NONE = "NONE"
ALL_ANCESTORS = "ALL_ANCESTORS"
|
[
"noreply@github.com"
] |
noreply@github.com
|
e8f79267ba52969b4af0a0f02f9340977750ba24
|
5002ec313e12d6e5f58d5ef41ea265084ff96373
|
/信息收集工具/modular/Subdomain_name_query.py
|
ff3d469ff26e6418b763ef974be8e1beb300a2bd
|
[] |
no_license
|
IVorder/python
|
9a8dc46d69fb9b5c3d65509348595623b8d47a8a
|
6b60a13dda471ed3f1380b6bf014a33f185e6033
|
refs/heads/master
| 2020-06-21T22:43:41.838924
| 2019-07-18T10:21:28
| 2019-07-18T10:21:28
| 197,569,599
| 10
| 4
| null | 2019-07-18T10:55:49
| 2019-07-18T10:55:47
| null |
UTF-8
|
Python
| false
| false
| 2,369
|
py
|
# @author:九世
# @time:2019/7/2
# @file:mian.py
from gevent import monkey;monkey.patch_all()
import requests
import config.config
import warnings
import gevent
from multiprocessing import Process
import dns.resolver
from bs4 import BeautifulSoup
from gevent.lock import RLock
warnings.simplefilter("ignore", category=UserWarning)
domains=[]
lock=RLock()
def domain_query():
def wrater(func):
def query(*args,**kwargs):
print('\033[1;32m[+]\033[0m 域名查询:')
headers={'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
url='http://site.ip138.com/{}/domain.htm'.format(*args)
rqt=requests.get(url=url,headers=headers)
rgt=BeautifulSoup(rqt.text,'html.parser').find_all('a',target='_blank')
for c in rgt:
if str(*args) in str(c):
domains.append(c.get_text())
return func(*args,**kwargs)
return query
return wrater
def domain_baopo():
def wrter(func):
def bp(*args,**kwargs):
lock.acquire()
path=r'dict/domain.txt'
dp=[]
dk=open(path,'r',encoding='utf-8')
for d in dk.readlines():
dp.append("{}.{}".format("".join(d.split('\n')),*args))
lock.release()
return func(dp,**kwargs)
return bp
return wrter
@domain_query()
def run(url):
pass
def dns_b(domain):
try:
querys=dns.resolver.query(domain,'A')
for q in querys:
domains.append(domain)
except:
pass
def xc(rg):
rt=[]
try:
for r in rg:
rt.append(gevent.spawn(dns_b,r))
gevent.joinall(rt)
except:
pass
@domain_baopo()
def run2(url):
print('\033[1;32m[+]\033[0m 字典爆破域名开始')
rw=[]
calc=0
for c in url:
if calc==config.config.SUBDOMAIN:
p=Process(target=xc,args=(rw,))
p.start()
calc=0
rw.clear()
rw.append(c)
calc+=1
if len(rw)>0:
p = Process(target=xc, args=(rw,))
p.start()
def cat():
qc=list(set(domains))
for q in qc:
print(q)
|
[
"noreply@github.com"
] |
noreply@github.com
|
d86cb55284f9ec406e508cb0da30cb1564736a7e
|
919fd48a34ca200086f51905d64c21c3b31b6739
|
/CodeMixed-Text-Generator/cm_text_generator/grammar_inference.py
|
0449999d7ffce757d92333a845762acfcc6197a9
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"BSD-2-Clause",
"MIT",
"Python-2.0",
"PSF-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
mohdsanadzakirizvi/CodeMixed-Text-Generator
|
e89b758ad88a622c058bf1465003ae3c23a55b88
|
47740eeff3ecb46f5294711f4fe5d3a03a6e0b54
|
refs/heads/main
| 2023-06-15T22:43:21.578533
| 2021-04-13T09:16:24
| 2021-04-27T12:46:19
| 384,061,885
| 0
| 0
|
MIT
| 2021-07-08T08:54:14
| 2021-07-08T08:54:14
| null |
UTF-8
|
Python
| false
| false
| 1,853
|
py
|
###GRAMMAR INFERENCE
from .data_structure_definitions import *
def ruleEnlister(root, grammar):
if root.token=="XXXXX":
cond=False
for rule in grammar: ##check false/true
if (rule.lhs.nonTerminal==root.label and len(rule.rhs)==len(root.children)):
#print "Using old rule!"
cond=True
for counter in range(len(rule.rhs)):
if(rule.rhs[counter].nonTerminal!=root.children[counter].label or rule.rhs[counter].index!=root.children[counter].repeatIndex):
cond=False
if cond==True:
root.ruleNum=rule.ruleNum
if(root.ruleNum==-1):
#print "Making new rule!", str(len(grammar))
lhs=grammarPoint(root.label, -1, -1)
rhs=[]
for child in root.children:
rhs.append(grammarPoint(child.label, child.repeatIndex, root.children.index(child)))
grammar.append(grammarRule(len(grammar), lhs, rhs))
root.ruleNum=len(grammar)-1
for child in root.children:
ruleEnlister(child, grammar)
def projectHindiRules(hinRoot, grammar):
if hinRoot.token=="XXXXX":
# print "\nLABEL: ", hinRoot.label, " ", str(hinRoot.ruleNum)
for child in hinRoot.children:
for count in range(len(grammar[hinRoot.ruleNum].rhs)):
#print "(", child.label, grammar[hinRoot.ruleNum].rhs[count].nonTerminal, child.repeatIndex, grammar[hinRoot.ruleNum].rhs[count].index, ")",
if child.label==grammar[hinRoot.ruleNum].rhs[count].nonTerminal and \
child.repeatIndex==grammar[hinRoot.ruleNum].rhs[count].index:
#print "index assigned: ", ind
grammar[hinRoot.ruleNum].rhs[count].hinRank=hinRoot.children.index(child)
#print "incrementing..."
for child in hinRoot.children:
projectHindiRules(child, grammar)
|
[
"mohdsanadzakirizvi@gmail.com"
] |
mohdsanadzakirizvi@gmail.com
|
622914c9a6c8f38dd5339009d187c1a23ea57bf5
|
6bd1aa6b80fd93fd65f3e3f9c6b4cc743fabc076
|
/Laboratorios-Big-Data/MOOC/KMeans/KMeansHackers.py
|
94bd8290cb1a8e974ee767a073e4064bc5d47159
|
[] |
no_license
|
RAricardo/Laboratorios-Big-Data
|
617a7adc5531d29653b65af0a3a3e885a0aa42e8
|
04ebc65ae83007407e9e14f38774ef77a21cbe31
|
refs/heads/master
| 2020-04-29T05:09:51.189057
| 2019-04-08T17:00:46
| 2019-04-08T17:00:46
| 175,872,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,589
|
py
|
# Databricks notebook source
from pyspark.sql import SparkSession
# COMMAND ----------
spark = SparkSession.builder.appName("Kmeans").getOrCreate()
# COMMAND ----------
data = spark.read.csv("/FileStore/tables/hack_data.csv", inferSchema=True, header=True)
# COMMAND ----------
data.printSchema()
# COMMAND ----------
from pyspark.ml.clustering import KMeans
# COMMAND ----------
from pyspark.ml.feature import VectorAssembler
# COMMAND ----------
data.columns
# COMMAND ----------
assembler = VectorAssembler(inputCols=['Session_Connection_Time',
'Bytes Transferred',
'Kali_Trace_Used',
'Servers_Corrupted',
'Pages_Corrupted',
'WPM_Typing_Speed'], outputCol="features")
# COMMAND ----------
final_data = assembler.transform(data)
# COMMAND ----------
final_data.printSchema()
# COMMAND ----------
from pyspark.ml.feature import StandardScaler
# COMMAND ----------
scaler = StandardScaler(inputCol="features", outputCol="Scaled Features")
# COMMAND ----------
scaler_model = scaler.fit(final_data)
# COMMAND ----------
cluster_final_data = scaler_model.transform(final_data)
# COMMAND ----------
kmeans2 = KMeans(featuresCol="Scaled Features", k=2)
# COMMAND ----------
kmeans3 = KMeans(featuresCol="Scaled Features", k=3)
# COMMAND ----------
model_k2 = kmeans2.fit(cluster_final_data)
model_k3 = kmeans3.fit(cluster_final_data)
# COMMAND ----------
model_k3.transform(cluster_final_data).groupBy("prediction").count().show()
# COMMAND ----------
model_k2.transform(cluster_final_data).groupBy("prediction").count().show()
# COMMAND ----------
|
[
"rrazopardc@eafit.edu.co"
] |
rrazopardc@eafit.edu.co
|
185a3393a192094de5e11ae5133799e98d58a651
|
9b04206109e36d5f4f7cc4820546546ac239c5e0
|
/greedy/ATM_problem.py
|
39cc9ea015a03ed7d3442b6e7512c88cda49fc4d
|
[] |
no_license
|
joon3007/Algorithm
|
28417fffde40a79aac54375b57b31071dcf6bc4d
|
e45b6379f67272db0997156deca5713aa2113348
|
refs/heads/master
| 2022-12-14T01:33:25.050675
| 2020-09-09T12:36:02
| 2020-09-09T12:36:02
| 291,960,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,100
|
py
|
'''
description
인하은행에는 ATM이 1대밖에 없다. 지금 이 ATM앞에 N명의 사람들이 줄을 서있다.
사람은 1번부터 N번까지 번호가 매겨져 있으며, i번 사람이 돈을 인출하는데 걸리는 시간은 Pi분이다.
사람들이 줄을 서는 순서에 따라서, 돈을 인출하는데 필요한 시간의 합이 달라지게 된다.
예를 들어, 총 5명이 있고, P1 = 3, P2 = 1, P3 = 4, P4 = 3, P5 = 2 인 경우를 생각해보자.
[1, 2, 3, 4, 5] 순서로 줄을 선다면, 1번 사람은 3분만에 돈을 뽑을 수 있다.
2번 사람은 1번 사람이 돈을 뽑을 때 까지 기다려야 하기 때문에, 3+1 = 4분이 걸리게 된다.
3번 사람은 1번, 2번 사람이 돈을 뽑을 때까지 기다려야 하기 때문에, 총 3+1+4 = 8분이 필요하게 된다.
4번 사람은 3+1+4+3 = 11분, 5번 사람은 3+1+4+3+2 = 13분이 걸리게 된다.
이 경우에 각 사람이 돈을 인출하는데 필요한 시간의 합은 3+4+8+11+13 = 39분이 된다.
줄을 [2, 5, 1, 4, 3] 순서로 줄을 서면, 2번 사람은 1분만에, 5번 사람은 1+2 = 3분,
1번 사람은 1+2+3 = 6분, 4번 사람은 1+2+3+3 = 9분, 3번 사람은 1+2+3+3+4 = 13분이 걸리게 된다.
각 사람이 돈을 인출하는데 필요한 시간의 합은 1+3+6+9+13 = 32분이다.
이 방법보다 더 필요한 시간의 합을 최소로 만들 수는 없다.
줄을 서 있는 사람의 수 N과 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어졌을 때,
각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 구하는 프로그램을 작성하시오.
input
첫째 줄에 사람의 수 N(1 ≤ N ≤ 1,000)이 주어진다. 둘째 줄에는 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어진다. (1 ≤ Pi ≤ 1,000)
output
첫째 줄에 각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 출력한다.
'''
num = int(input())
times = list(map(int, input().split()))
times.sort()
result = 0
time = 0
for i in times:
time += i
result += time
print(result)
|
[
"joon4141@gmail.com"
] |
joon4141@gmail.com
|
9d15855256587b846eda68310ac6b8af5d598e25
|
eb91e8711243b70b14c38f17dbc7951dab430d2a
|
/run.py
|
8e560403e372b20fd1127318b95c7d69ec841267
|
[
"MIT"
] |
permissive
|
LittlePanic/Flask-Vue-Singlepage-Project
|
718d96390df99ee0f3654f8578073501ce5e3092
|
3d8ddad9dd2a4a41e76e3e248f31a4505801ea83
|
refs/heads/master
| 2022-12-23T19:36:06.992444
| 2018-07-05T03:38:59
| 2018-07-05T03:38:59
| 139,595,156
| 0
| 1
|
MIT
| 2022-12-16T22:20:15
| 2018-07-03T14:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 80
|
py
|
from backend.app import app
if __name__ == "__main__":
app.run(debug=True)
|
[
"2177890574@qq.com"
] |
2177890574@qq.com
|
672f47dbc06ff7e663a43bfdf34432fe9a92e2f4
|
5875c68d4e34193b9e565a6f34469612cfdc649c
|
/pyMap_0.9.4/pyCursors.py
|
a63f9c2bdf12abc465b5df4d587e61b1599a645e
|
[] |
no_license
|
Naxs-me/Software_development_tycoon
|
59d7059fb21b1655b05ad0057e17033603ec7377
|
b8a6166589a6231e607001ef84f927d2d15792c0
|
refs/heads/master
| 2020-12-15T00:13:25.496993
| 2020-01-19T16:01:03
| 2020-01-19T16:01:03
| 234,924,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
#31 lines of code (7/21/2012)
import pygame
import os
#the images size must be a multiple of 8
#the image must contain only 3 colors
#(0,0,0)black, (255,255,255)white, (255,0,255)tranparent(pink)
def set_cursor_from_image(image, hotspot = (0,0)):
#if os.path.isfile((cwd+'/'+image)):
img = pygame.image.load(image).convert()
w,h = img.get_size()
strings = []
size = (w,h)
if w%8 == 0 and h%8 == 0:
black = pygame.Color(0,0,0,255)
white = pygame.Color(255,255,255,255)
trans = pygame.Color(255,0,255,255)
img.lock()
for r in xrange(0, w):
pix_str = ""
for c in xrange(0, h):
color = img.get_at((r,c))
if color == white:
pix_str += 'X'
if color == black:
pix_str += '.'
if color == trans:
pix_str += ' '
strings.append(pix_str)
img.unlock()
new_cursor = pygame.cursors.compile(strings)
pygame.mouse.set_cursor(size, hotspot, *new_cursor)
|
[
"naxs.me@gmail.com"
] |
naxs.me@gmail.com
|
d5e7ae3bd1017599518278f12c78a1b1a2662ff3
|
4138376af721c583944882b68235746cd9637fd6
|
/7/sunjiayin/cpNbnet.py
|
305e2c1c4681006598eb80310af7c334d54f7acb
|
[] |
no_license
|
hulaoan/homework-arch-5
|
9df792281b7ac92abc166ad80e69a5c2a59b2c9e
|
1c1b07f8ebb1b2f9906c0cd29cef8227fed3c7fd
|
refs/heads/master
| 2021-01-14T13:58:05.883628
| 2015-12-25T05:05:16
| 2015-12-25T05:05:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,361
|
py
|
#!/usr/bin/env python
# coding:utf-8
import socket
import select
import time
import pdb
__all__ = ["nbNet"]
from nbNetUtils import *
class STATE:
def __init__(self):
self.state = "accept" #定义状态
self.have_read = 0 #记录读了的字节
self.need_read = 10 #头文件需要读取10个字节
self.have_write = 0 #记录读了的字节
self.need_write= 0 #需要写的字节
self.buff_read = "" #读缓存
self.buff_write = "" #写缓存
self.sock_obj = "" #sock对象
def printState(self):
if DEBUG:
dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno())
dbgPrint(" - - state: %s" % self.state)
dbgPrint(" - - have_read: %s" % self.have_read)
dbgPrint(" - - need_read: %s" % self.need_read)
dbgPrint(" - - have_write: %s" % self.have_write)
dbgPrint(" - - need_write: %s" % self.need_write)
dbgPrint(" - - buff_read: %s" % self.buff_read)
dbgPrint(" - - buff_write: %s" % self.buff_write)
dbgPrint(" - - sock_obj: %s" % self.sock_obj)
class nbNetBase:
def setFd(self, sock):
dbgPrint("\n setFd start")
tmp_state = STATE() #实例化类
tmp_state.sock_obj = sock #定义类中sock
self.conn_state[sock.fileno()] = tmp_state #把sock加入到字典中
self.conn_state[sock.fileno()].printState()
dbgPrint("\n setFd end")
def accept(self, fd):
dbgPrint("\n accept start!")
sock_state = self.conn_state[fd] #取出fd对应连接
sock = sock_state.sock_obj #取出fd的sock
conn, addr = sock.accept() #取出连接请求
conn.setblocking(0) #设置非阻塞模式
return conn #返回连接
def close(self, fd):
try:
sock = self.conn_state[fd].sock_obj #取出fd的sock
sock.close()#关闭sock
except:
dbgPrint("Close fd: %s" % fd)
finally:
self.epoll_sock.unregister(fd) #将fd重epoll中注销
self.conn_state.pop(fd) #踢出字典
def read(self, fd):
try:
sock_state = self.conn_state[fd] #取出fd对应连接
conn= sock_state.sock_obj #取出fd连接请求
if sock_state.need_read <= 0: #需要读取字节为空报错
raise socket.error
one_read = conn.recv(sock_state.need_read) #读取传输的字符
dbgPrint("\n func fd: %d, one_read: %s, need_read: %d" %(fd, one_read, sock_state.need_read))
if len(one_read) == 0: #读取数据为0报错
raise socket.error
sock_state.buff_read += one_read #把读取数据存到读缓存中
sock_state.have_read += len(one_read) #已经读取完的数据量
sock_state.need_read -= len(one_read) #还需要读取数据的量
sock_state.printState()
if sock_state.have_read == 10: #10字节为头文件处理
header_said_need_read = int(sock_state.have_read) #读取数据的量
if header_said_need_read <= 0: #如果还需读0字节报错
raise socket.error
sock_state.need_read += header_said_need_read #还需读取数量变化
sock_state.buff_read = '' #读缓存清空
sock_state.printState()
return "readcontent" #还需读取数据
elif sock_state.need_read == 0:
return "process" #读取数据完成,转换状态
else:
return "readmore" #还需读取数据
except (socket.error, ValueError), msg:
try:
if msg.errno == 11: #errno等于11,尝试进行一次读取
dbgPrint("11" + msg)
return "retry"
except:
pass
return "closing"
def write(self, fd):
sock_state = self.conn_state[fd] #取出fd对应的连接构造体
conn = sock_state.sock_obj #取出fd对于连接
last_have_send = sock_state.have_write #已经写数据的量
try:
have_send = conn.send(sock_state.buff_write[last_have_send:]) #发送剩下的数据
sock_state.have_write += have_send #已经写的数据量
sock_state.need_write -= have_send #还需写的数据量
if sock_state.need_write == 0 and sock_state.have_write !=0: #写数据完成
sock_state.printState()
dbgPrint("\n write date end")
return "writecomplete" #返回写入完成
else:
return "writemore" #返回计算写入
except socket.error, msg:
return "closing"
def run(self):
while True:
epoll_list = self.epoll_sock.poll() #定义poll()事件发生的list
for fd, events in epoll_list:
sock_state = self.conn_state[fd] #取出fd构造体
if select.EPOLLHUP & events: #文件描述符挂断
dbgPrint("EPOLLHUP")
sock_state.state = "closing" #fd状态设置为closing
elif select.EPOLLERR & events:
dbgPrint("EPOLLERR") #文件描述符出错
sock_state.state = "closing" #对应fd状态为closing
self.state_machine(fd) #状态机调用
def state_machine(self, fd):
sock_state = self.conn_state[fd] #fd构造体
self.sm[sock_state.state](fd) #通过sm字典调用对应状态的函数
class nbNet(nbNetBase):
def __init__(self, addr, port, logic):
dbgPrint('\n__init__: start!')
self.conn_state = {} #定义字典保存每个连接状态
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listen_sock.bind((addr, port))
self.listen_sock.listen(10) # 排队长度
self.setFd(self.listen_sock) #定义listen socket 放入字典conn_state
self.epoll_sock = select.epoll() #初始化fd的epoll
self.epoll_sock.register(self.listen_sock.fileno(), select.EPOLLIN ) #linten可以读的描述符
self.logic = logic #业务处理
self.sm = {
"accept" : self.accept2read,
"read" : self.read2process,
"write" : self.write2read,
"process": self.process,
"closing": self.close,
} #状态调用机的字典
dbgPrint('\n__init__: end, register no: %s' %self.listen_sock.fileno() )
def process(self, fd):
sock_state = self.conn_state[fd]
response = self.logic(sock_state.buff_read) #业务函数处理
sock_state.buff_write = "%010d%s" % (len(response), response) #发送的数据
sock_state.need_write = len(sock_state.buff_write) #需要发送的长度
sock_state.state = "write" #fd对应的状态
self.epoll_sock.modify(fd, select.EPOLLOUT) #fd对应的epoll为改写模式
sock_state.printState()
def accept2read(self, fd):
conn = self.accept(fd)
self.epoll_sock.register(conn.fileno(), select.EPOLLIN) #发送数据后重新将fd的epoll改成读
self.setFd(conn) #fd生成构造体
self.conn_state[conn.fileno()].state = "read" #fd状态为read
dbgPrint("\n -- accept end!")
def read2process(self, fd):
read_ret = ""
#状态转换
try:
read_ret = self.read(fd) #read函数返回值
except (Exception), msg:
dbgPrint(msg)
read_ret = "closing"
if read_ret == "process":# 读取完成,转换到process
self.process(fd)
elif read_ret == "readcontent":# readcontent、readmore、retry 继续读取
pass
elif read_ret == "readmore":
pass
elif read_ret == "retry":
pass
elif read_ret == "closing":
self.conn_state[fd].state = 'closing' #状态为closing关闭连接
self.state_machine(fd)
else:
raise Exception("impossible state returned by self.read")
def write2read(self, fd):
try:
write_ret = self.write(fd) #函数write返回值
except socket.error, msg: #出错关闭连接
write_ret = "closing"
if write_ret == "writemore": #继续写
pass
elif write_ret == "writecomplete":#写完成
sock_state = self.conn_state[fd]
conn = sock_state.sock_obj
self.setFd(conn) #重置见连接fd构造体
self.conn_state[fd].state = "read" #将fd状态设置为read
self.epoll_sock.modify(fd, select.EPOLLIN) #epoll状态为可读
elif write_ret == "closing":# 发生错误关闭
dbgPrint(msg)
self.conn_state[fd].state = 'closing'
self.state_machine(fd)
if __name__ == '__main__':
def logic(d_in):
return(d_in[::-1])
reverseD = nbNet('0.0.0.0', 9060, logic)
reverseD.run()
|
[
"sunjiayin@teach.works"
] |
sunjiayin@teach.works
|
ea6bb392af9c9e6b8d6c5ecb56a68b0cb11577a6
|
7040d642877f70360ca88a065ccf92b3c63dfd7b
|
/剑指 Offer 18. 删除链表的节点.py
|
f351503d1cc241f162b76a62e9ddfe892195285b
|
[
"BSD-2-Clause"
] |
permissive
|
YuLili-git/leetcode_offer
|
077fb1864f1c8e3258f5b9f065b7c0e71c8ccf8f
|
268940aa4e57a02fe635b7d6f6038f2b204ca968
|
refs/heads/main
| 2023-08-24T19:07:37.650616
| 2021-10-13T16:07:28
| 2021-10-13T16:07:28
| 370,324,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,088
|
py
|
#给定单向链表的头指针和一个要删除的节点的值,定义一个函数删除该节点。
#返回删除后的链表的头节点。
#注意:此题对比原题有改动
#示例 1:
#输入: head = [4,5,1,9], val = 5
#输出: [4,1,9]
#解释: 给定你链表中值为 5 的第二个节点,那么在调用了你的函数之后,该链表应变为 4 -> 1 -> 9.
#示例 2:
#输入: head = [4,5,1,9], val = 1
#输出: [4,5,9]
#解释: 给定你链表中值为 1 的第三个节点,那么在调用了你的函数之后,该链表应变为 4 -> 5 -> 9.
#说明:
#题目保证链表中节点的值互不相同
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, head: ListNode, val: int) -> ListNode:
if head.val == val:
return head.next
cur,pre = head, head.next
while pre and pre.val != val:
cur = pre
pre = pre.next
if pre:
cur.next = pre.next
return head
|
[
"noreply@github.com"
] |
noreply@github.com
|
246ec729ab0710529af7fd9594413b7242ed91fb
|
aba0b5002c040fa1b20bae5d7ac81c601395901f
|
/vistrails/packages/pandas/identifiers.py
|
63685ce6e7907a39552e23085e227ce9fd8bac89
|
[
"BSD-3-Clause"
] |
permissive
|
skylogic004/VisTrails
|
2673ca04160e776db17811d98b070f70e1d2e385
|
bc0d95ceac6e75d6ffb083e8cdab8c62a90d4b00
|
refs/heads/master
| 2021-06-23T01:16:16.697903
| 2017-08-24T21:28:33
| 2017-08-24T21:28:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
from __future__ import division, print_function
identifier = 'org.vistrails.vistrails.pandas'
name = 'pandas'
version = '0.0.1'
|
[
"matt@skylogic.ca"
] |
matt@skylogic.ca
|
39042a14dedf3d1a3d6e06d5f15a0915493b8514
|
66a967fac0bc5dfdfe28ad0fd5464ed9113429bd
|
/HobbyCoding/src/ListPermutation.py
|
6e6d6a9ef2a7a7f0a2211dc22bed93437611220c
|
[
"Apache-2.0"
] |
permissive
|
inspectorG4dget/Hobby-Coding
|
a37430320e7a74805bc7740933e217d004fa9714
|
41e82dbcc73e328b43bebd037b2df414f0837ca6
|
refs/heads/master
| 2020-12-24T17:17:37.589058
| 2012-07-10T05:18:56
| 2012-07-10T05:18:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
'''
Created on Oct 4, 2010
@author: ashwin
Licensed to Ashwin Panchapakesan under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
Ashwin licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
def permute(L):
if L == []:
return []
else:
for i in range(len(L)-1):
a = [L[i]]
b = L[:i]
c = L[i+1 :]
print "B:", b
print "C:", c
d = b + c
return a + permute(d)
def includeMembers(L):
if not L:
return L
else:
for i in L[0]:
includeMembers(L[1:])[-1] += i
if __name__ == "__main__":
print includeMembers(['asdf', 'jkl;'])
|
[
"topgunzurhero@gmail.com"
] |
topgunzurhero@gmail.com
|
fb2e193a24ae586d0c3d286e0fec5f4ca52eaf14
|
674f1ecdd8a196b5a271b556ed7e4d274fde63a1
|
/article/migrations/0002_auto_20161129_2304.py
|
65e17784a9ab696ab9749961108d38c587c88ee8
|
[] |
no_license
|
baby5/Django-Blog
|
fc57c06bac110c56662bcea20eb9c18579d20827
|
1e2f1a8b0589d87dea023d7e6d78376d0880ca27
|
refs/heads/master
| 2021-01-13T13:19:13.836449
| 2016-12-20T10:49:23
| 2016-12-20T10:49:23
| 72,647,232
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,315
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-29 15:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('created_time', models.DateTimeField(auto_now_add=True)),
('last_modified_time', models.DateTimeField(auto_now=True)),
],
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-last_modified_time']},
),
migrations.RenameField(
model_name='article',
old_name='date_time',
new_name='created_time',
),
migrations.AddField(
model_name='article',
name='abstract',
field=models.CharField(blank=True, help_text=b'arbitrary', max_length=54, null=True),
),
migrations.AddField(
model_name='article',
name='last_modified_time',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='article',
name='likes',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='article',
name='status',
field=models.CharField(choices=[(b'd', b'Draft'), (b'p', b'Published')], default=b'd', max_length=1),
),
migrations.AddField(
model_name='article',
name='topped',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='article',
name='views',
field=models.PositiveIntegerField(default=0),
),
migrations.AlterField(
model_name='article',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='article.Category'),
),
]
|
[
"zxnzysj@163.com"
] |
zxnzysj@163.com
|
ef168493665590dfa9c2c362d6e87e14550a7162
|
1e1ab6aba8ab3d05fe61df3b6a5fabbcdd00676a
|
/e_commerce_app/api/migrations/0002_remove_event_redundancy.py
|
86c973a2de49034f1f646a2664d9eaf5bda0ec1e
|
[] |
no_license
|
Batuhanipekci/E-Commerce
|
4f548f3e59cfa68c422f91419a53dadf175dcad3
|
45350d74e344686f619c1f9c50dac08e8c6eebe2
|
refs/heads/master
| 2023-06-02T01:05:44.647508
| 2021-06-22T20:19:13
| 2021-06-22T20:19:13
| 378,535,014
| 1
| 0
| null | 2021-06-22T20:19:14
| 2021-06-20T01:26:24
|
Python
|
UTF-8
|
Python
| false
| false
| 533
|
py
|
# Generated by Django 3.0.7 on 2021-06-20 22:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='krdetailsview',
name='event',
),
migrations.RemoveField(
model_name='krtransaction',
name='event',
),
migrations.RemoveField(
model_name='krcounter',
name='event',
),
]
|
[
"batuhanipekci@hotmail.com"
] |
batuhanipekci@hotmail.com
|
22ffc7c4ae1f6b16b2ece3c70722f0a2d0ec48c5
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2480/59018/262642.py
|
80da0919c460c290863470859367203af1d15933
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 281
|
py
|
def even_odd(N,a):
b=[]
for j in a:
if j%2==0:
b.append(j)
a.pop(j)
c=b+a
return c
T=int(input())
for i in range(T):
N=int(input())
info=input().split(' ')
a=[int(y) for y in info]
print(even_odd(N,a))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
ce978aea403ff050f84bd8c5e869fff0a69f22c8
|
fc22d8e8178aa4a47d360f1c83990ee8be1fc20e
|
/tools/md5_function.py
|
d2ce3e93b1ac9467b50883af0188b3663e7af8bb
|
[] |
no_license
|
moujiangliu/interface
|
a13b5ebe86439f2bae55cbecd02ab5e65a77288b
|
b6e968271cb9bd1287a9b4950a6ccb69a7720036
|
refs/heads/master
| 2023-02-03T08:56:43.205534
| 2020-12-25T17:05:02
| 2020-12-25T17:05:02
| 323,383,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,162
|
py
|
# -*- coding:utf-8 -*-
import base64
import hashlib
class Secret(object):
'''
实现各种加密方式
'''
def __init__(self, string):
self._string = string.encode('utf-8')
def md5(self):
'''
md5加密方法
:return:
'''
try:
sign = hashlib.md5(self._string).hexdigest()
return sign
except:
return False
def sha1(self):
'''
实现sha1的加密方法
:return:
'''
try:
sign = hashlib.sha1(self._string).hexdigest()
return sign
except:
return False
def base64encode(self):
'''
实现一个base64 encode的方法封装
'''
try:
sign = base64.b64encode(self._string).decode('utf-8')
return sign
except:
return False
def base64decode(self):
'''
base64 decode的方法封装 (解码)
:return:
'''
try:
sign = base64.b64decode(self._string).decode('utf-8')
return sign
except:
return False
|
[
"moujiang.liu@aliyun.com"
] |
moujiang.liu@aliyun.com
|
ad6320700a9871fd710ca5dc3b06b8878292f571
|
45a5c06c89d84e689b528ebd05f982914dc9f0f2
|
/rl_bolts/buffers.py
|
a53f82d1a6403bd000f4ecf561fe9bcbc8924a79
|
[
"Apache-2.0"
] |
permissive
|
jfpettit/rl_bolts
|
be0f2e56af3bab2effd5c0a0723b5eb13050fa2a
|
c3c3b3f91ee192048912fd48f2655b46526918a7
|
refs/heads/master
| 2022-11-30T15:53:32.316481
| 2020-08-14T05:45:47
| 2020-08-14T05:45:47
| 285,760,715
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,576
|
py
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_buffers.ipynb (unless otherwise specified).
__all__ = ['PGBuffer', 'ReplayBuffer']
# Cell
import numpy as np
from scipy.signal import lfilter
from typing import Optional, Any, Union
import torch
import gym
# Cell
class PGBuffer:
"""
A buffer for storing trajectories experienced by an agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
This class was written by Joshua Achaim at OpenAI. It was adapted to use PyTorch Tensors instead of NumPy arrays for the
observations and actions.
Args:
- obs_dim (tuple or int): Dimensionality of input feature space.
- act_dim (tuple or int): Dimensionality of action space.
- size (int): buffer size.
- gamma (float): reward discount factor.
- lam (float): Lambda parameter for GAE-Lambda advantage estimation
"""
def __init__(
self,
obs_dim: Union[tuple, int],
act_dim: Union[tuple, int],
size: int,
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.95,
):
self.obs_buf = torch.zeros(self._combined_shape(size, obs_dim), dtype=torch.float32)
self.act_buf = torch.zeros(self._combined_shape(size, act_dim), dtype=torch.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(
self,
obs: torch.Tensor,
act: torch.Tensor,
rew: Union[int, float, np.array],
val: Union[int, float, np.array],
logp: Union[float, np.array],
):
"""
Append one timestep of agent-environment interaction to the buffer.
Args:
- obs (torch.Tensor): Current observation to store.
- act (torch.Tensor): Current action.
- rew (int or float or np.array): Current reward from environment.
- val (int or float or np.array): Value estimate for the current state.
- logp (float or np.array): log probability of chosen action under current policy distribution.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val: Optional[Union[int, float, np.array]] = 0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
Args:
- last_val (int or float or np.array): Estimate of rewards-to-go. If trajectory ended, is 0.
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = self._discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = self._discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
Returns:
- obs_buf (torch.Tensor): Buffer of observations collected.
- act_buf (torch.Tensor): Buffer of actions taken.
- adv_buf (torch.Tensor): Advantage calculations.
- ret_buf (torch.Tensor): Buffer of earned returns.
- logp_buf (torch.Tensor): Buffer of log probabilities of selected actions.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the line implement the advantage normalization trick
adv_mean, adv_std = np.mean(self.adv_buf), np.std(self.adv_buf)
self.adv_buf = (self.adv_buf - adv_mean) / (adv_std + 1e-8)
return [
self.obs_buf,
self.act_buf,
torch.as_tensor(self.adv_buf, dtype=torch.float32),
torch.as_tensor(self.ret_buf, dtype=torch.float32),
torch.as_tensor(self.logp_buf, dtype=torch.float32)
]
def _combined_shape(
self, length: Union[int, np.array], shape: Optional[Union[int, tuple]] = None
):
"""
Return tuple of combined shapes from input length and tuple describing shape.
Args:
- length (int or np.array): Length of resultant shape.
- shape (int or tuple): Other shape dimensions to combine.
Returns:
- tuple of shape dimensions
"""
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def _discount_cumsum(self, x: np.array, discount: float):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
# Cell
class ReplayBuffer(PGBuffer):
"""
A replay buffer for off-policy RL agents.
This class is borrowed from OpenAI's SpinningUp package: https://spinningup.openai.com/en/latest/
Args:
- obs_dim (tuple or int): Dimensionality of input feature space.
- act_dim (tuple or int): Dimensionality of action space.
- size (int): buffer size.
"""
def __init__(
self, obs_dim: Union[tuple, int], act_dim: Union[tuple, int], size: int
):
self.obs1_buf = torch.zeros(self._combined_shape(size, obs_dim), dtype=torch.float32)
self.obs2_buf = torch.zeros(self._combined_shape(size, obs_dim), dtype=torch.float32)
self.act_buf = torch.zeros(self._combined_shape(size, act_dim), dtype=torch.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(
self,
obs: torch.Tensor,
act: Union[float, int, torch.Tensor],
rew: Union[float, int],
next_obs: torch.Tensor,
done: bool,
):
"""
Append one timestep of agent-environment interaction to the buffer.
Args:
- obs (torch.Tensor): Current observations.
- act (float or int or torch.Tensor): Current action.
- rew (float or int): Current reward
- next_obs (torch.Tensor): Observations from next environment step.
- done (bool): Whether the episode has reached a terminal state.
"""
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size: Optional[int] = 32):
"""
Sample a batch of agent-environment interaction from the buffer.
Args:
- batch_size (int): Number of interactions to sample for the batch.
Returns:
- tuple of batch tensors
"""
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(
obs=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs],
)
return tuple(torch.as_tensor(v, dtype=torch.float32) for _, v in batch.items())
def get(self):
"""
Get all contents of the batch.
Returns:
- list of PyTorch Tensors; full contents of the buffer.
"""
return [
torch.as_tensor(self.obs1_buf, dtype=torch.float32),
torch.as_tensor(self.obs2_buf, dtype=torch.float32),
torch.as_tensor(self.act_buf, dtype=torch.float32),
torch.as_tensor(self.rew_buf, dtype=torch.float32),
torch.as_tensor(self.done_buf, dtype=torch.float32)
]
|
[
"jfpettit@gmail.com"
] |
jfpettit@gmail.com
|
20dcb6e05c6420b481455112a093bca40a513956
|
a219c9b0f3ccd1b35c3bb7bb3c7b50e1d9d8ef93
|
/arasınav_tbb_s4.py
|
ce88476ccc8238735b3aadf7d040888c661fa98e
|
[] |
no_license
|
f0xmulder/python_ornekleri
|
3293541b5d4e594dc39e6df623e47ecd4e5e94c2
|
d1ebbcefdd7390a4e20a61864b150097f9919e29
|
refs/heads/master
| 2022-11-04T07:12:20.766931
| 2017-06-22T13:30:45
| 2017-06-22T13:30:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,489
|
py
|
# Soru 4
element = ""
tur = -1
cikti = ""
def turOgren(deger):#okunan karakterlerin büyük,küçük veya sayı olup olmadığını bu fonksiyon saysinde anlıyoruz.
if ord(deger) >= 65 and ord(deger) < 91:#karakterin ascii kodu bu değer aralığındaysa büyük harf
return 2
elif ord(deger) >= 97 and ord(deger) < 123:#karakterin ascii kodu bu değer aralığındaysa küçük harf
return 1
elif ord(deger) >= 49 and ord(deger) < 58:#karakterin ascii kodu bu değer aralığındaysa sayı
return 0
def elementAyristir(element):#bileşikten ayırdığımız her elementi bu fonksiyonda ayrıştırıyoruz.
transElement = ""
adet = ""
for j in element:
tur = turOgren(j)
if tur == 2 or tur == 1:
transElement = transElement + j
elif tur == 0:
adet = adet + j
if adet == "":#eğer elementten 1 tane varsa bunu if şartı ile kontrol ediyoruz.
adet = "1"
print transElement,"elementinden",adet,"tane var"
while (True):
giris=raw_input("element giriniz: ")
for i in giris:
tur = turOgren(i)
if tur == 2:#buyuk harf
if element == "":
element = i
else:
elementAyristir(element)
element = i
elif tur == 1 :#kucuk harf
element = element + i
elif tur == 0:#sayi
element = element + i
elementAyristir(element)
element = ""
tur = -1
|
[
"noreply@github.com"
] |
noreply@github.com
|
91984d48b3742244adf93f8e7500b8c3efa80728
|
68bbf3faecfdae707909647dce9a1dcffcb3491a
|
/searchNodeInBST.py
|
af52f8523b1f404f8de89e507130ec104cd462e8
|
[] |
no_license
|
Aniket-1/leetcode
|
d58c4b8e92888d7af000552292477e36c9a503cf
|
3cb3274888c4f182f44d9eba513f92a669f9d11b
|
refs/heads/main
| 2023-03-19T03:34:16.064981
| 2021-03-05T05:49:34
| 2021-03-05T05:49:34
| 334,960,115
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
//You are given the root of a binary search tree (BST) and an integer val.
//Find the node in the BST that the node's value equals val and return the subtree rooted with that node. If such a node does not exist, return null.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def searchBST(self, root: TreeNode, val: int) -> TreeNode:
while root:
if root.val > val: root = root.left
elif root.val < val: root = root.right
else: return root
return root
|
[
"noreply@github.com"
] |
noreply@github.com
|
a6ab4f744773dd3b24e1bb3cec4fe14a538e8c0e
|
5cb6b9b654ced936aa9d7dfc665b83a1fdd19ab6
|
/pyqt/first.py
|
81310a8f90620ce0dc80de2b269edbbed409581a
|
[] |
no_license
|
guoabyss/LearnMore
|
6ed32006719ed0023d32d91af7254d1ed85457e7
|
3cc39fedd5cb5cd915721ee313526213c81ced6d
|
refs/heads/master
| 2022-10-12T15:56:17.240679
| 2020-06-14T14:24:19
| 2020-06-14T14:24:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
import sys
from PyQt5.QtWidgets import QApplication, QWidget
if __name__ == "__main__":
app = QApplication(sys.argv)
# 创建窗口
w = QWidget()
w.resize(400, 150)
w.move(300, 300)
# 设置标题
w.setWindowTitle("第一个GUI")
w.show()
sys.exit(app.exec_())
|
[
"836463194@qq.com"
] |
836463194@qq.com
|
37307f0abd5565002723b66dd7bdb750cebcbf2a
|
69a4e83cad7b3d5e5f35761e7223002a6940d061
|
/2/2.py
|
98627f4f26b66f99efa3bfbffdaddc29b90b2d8d
|
[] |
no_license
|
c0mr4d3/adventofcode2020
|
408d01863b1b94872c77ab1b75e210c7b975574c
|
6e506d4b170e045643ffdbd095b4a209721670ec
|
refs/heads/main
| 2023-01-21T15:25:22.486170
| 2020-12-04T07:38:13
| 2020-12-04T07:38:13
| 317,858,777
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
arr = [x[:-1] for x in open("/home/comrade/Funstuff/adventofcode2020/2/input.txt").readlines()]
count = 0
for s in arr:
maxm = int(s[s.index("-")+1:s.index(" ")])
minm = int(s[:s.index("-")])
chrr = s[s.index(" ")+1]
pas = s[s.index(": ")+2:]
if (pas[minm-1]==chrr) != (pas[maxm-1]==chrr):
count+=1
print(count)
|
[
"siddharthsingh.17june@gmail.com"
] |
siddharthsingh.17june@gmail.com
|
aa1a467cc3e72429fddfc6663939baa04bc9e374
|
bc073560803464da166d661e916d21ad51b2c80e
|
/files/scripts/contact_detector.py
|
5ac2e00abc742896c576349cf11dd4b994ec5bc7
|
[] |
no_license
|
SDU-Embedded/event_processors
|
680edb4a8107a2661407f43be933795ef0a1e987
|
bdea5bbcab7d39f7b1746d1f391c494ffa0fd39d
|
refs/heads/master
| 2021-07-26T21:41:26.831474
| 2020-05-04T07:03:53
| 2020-05-04T07:03:53
| 165,830,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,084
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from event_processors import EventProcessor
from event_listeners import PerchEventListener
from state_monitors import LinearStateMonitor
from metric_processors import ProbabilityProcessor
from thresholders import Thresholder
from event_builders import EventBuilder
from event_emitters import EventEmitter
if __name__ == "__main__":
cage1_event_listener = PerchEventListener('manna,hou,bisnap','ats_perch',bird=1 )
#cage2_event_listener = PerchEventListener('manna,hou,bisnap','ats_perch',bird=2 )
# Setup event listeners
#cage1_event_listener = PerchEventListener( servers='manna,hou,bisnap', topic='perch_sensor', bird=1 )
#cage2_event_listener = PerchEventListener( servers='manna,hou,bisnap', topic='perch_sendor', bird=2, debug=True )
# Setup state monitors
cage1_state_monitor = LinearStateMonitor( period=0.1, upwards_gain=0.1, downwards_gain=0.5 )
#cage2_state_monitor = LinearStateMonitor( period=0.1, upwards_gain=0.1, downwards_gain=0.5 )
cage1_event_listener.stateTransitionCallback = cage1_state_monitor.setState
#cage2_event_listener.stateTransitionCallback = cage2_state_monitor.setState
# Setup metric processor
metric_processor = ProbabilityProcessor( period=0.1 )
metric_processor.getters.append( cage1_state_monitor.getProbability )
#metric_processor.getters.append( cage2_state_monitor.getProbability )
# Setup thresholders
thresholder = Thresholder( upwards_threshold=0.45, downwards_threshold=0.15 )
metric_processor.setters.append( thresholder.evaluate )
# Setup event builders
builder = EventBuilder( bird="1", type="ats_contact" )
thresholder.emitEvent = builder.evaluate
# Setup event emitters
emitter = EventEmitter( 'manna,hou,bisnap','ats_contact')
builder.send = emitter.send
# Setup and run event processor
event_processor = EventProcessor()
event_processor.tasks.append(cage1_event_listener)
event_processor.tasks.append(cage2_event_listener)
event_processor.tasks.append(cage1_state_monitor)
event_processor.tasks.append(cage2_state_monitor)
event_processor.tasks.append(metric_processor)
event_processor.run()
#event_processor.tasks.append( TwoLevelStateMonitor(period=0.01, upwards_gain=0.03, downwards_gain=0.005) )
#event_processor.tasks.append( OnOffEventListener(servers, 'power', event_processor.tasks[-1].setState) )
#event_processor.tasks.append( TwoLevelStateMonitor(period=0.01, upwards_gain=0.03, downwards_gain=0.005) )
#event_processor.tasks.append( OnOffEventListener(servers, 'entropy', event_processor.tasks[-1].setState) )
#event_processor.tasks.append( ProbabilityProcessor( servers=servers, topic='bout', upwards_threshold=0.85, downwards_threshold=0.5, period=0.01, bird="1", type="bout" ) )
#event_processor.tasks[-1].getters.append( event_processor.tasks[0].getProbability )
#event_processor.tasks[-1].getters.append( event_processor.tasks[2].getProbability )
#event_processor.run()
|
[
"lelar09@student.sdu.dk"
] |
lelar09@student.sdu.dk
|
29688ecf8b3300c70dbfd3ba0946cd5fffb4b583
|
843798667698d041a0097cc3d08847a27d9ec08f
|
/transaction/forms.py
|
0c8ea7a6761f65784fa2c37bb85381cd3f50a348
|
[] |
no_license
|
jaredtmartin/jade
|
d1faa6bd657a3c9ee8726e8178ee53a5687c1e7d
|
f627d4a3939c50443e7643909b036a9d9e283b9e
|
refs/heads/master
| 2021-01-18T14:03:08.906498
| 2011-05-06T20:07:07
| 2011-05-06T20:07:07
| 901,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,952
|
py
|
from jade.transaction.models import *
from jade.common.widgets import AutoCompleteField
from django.utils.safestring import mark_safe
from django import forms
from jade.common.widgets import CalanderInput
from django.utils.html import conditional_escape
from django.utils.encoding import StrAndUnicode, smart_unicode, force_unicode
from django.utils.translation import ugettext as _
def modelformset_factory(*args, **kwargs):
"""
Returns a FormSet class for the given Django model class.
Change its as_table function to show the forms as rows
"""
prefix=kwargs.pop('prefix',None)
can_delete=kwargs.get('can_delete',False)
def get_default_prefix(cls): return prefix
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
form_list = u' '.join([form.as_row() for form in self.forms])
header_form=self.form()
if can_delete: header_form.fields[forms.formsets.DELETION_FIELD_NAME] = forms.fields.BooleanField(label=_(u'Delete'), required=False)
header=header_form.as_header_row()
return mark_safe(u'\n'.join([unicode(self.management_form),header, form_list]))
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {'auto_id': self.auto_id, 'prefix': self.add_prefix(i), 'formset_id':i, 'group':self.prefix}
if self.data or self.files:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
FormSet = forms.models.modelformset_factory(*args, **kwargs)
FormSet._construct_form=_construct_form
FormSet.as_table=as_table
FormSet.get_default_prefix=get_default_prefix
return FormSet
class RowForm(forms.ModelForm):
""" Adds four features to the ModelForms.
1. Adds .as_row method that renders the form as a table row, appropriate for a formset
2. Adds .default_prefix method as well as its hook in init so a default prefix can be specified in subclasses
3. Adds formset_id and group attributes to be set by a formset
4. Adds arguments to put html at the beginning and end of the html_output... This is important when working
with formsets
"""
def get_default_prefix(self): return 'rowform'
def __init__(self, *args, **kwargs):
self.formset_id=kwargs.pop('formset_id',None)
self.group=kwargs.pop('group',None)
super(RowForm, self).__init__(*args, **kwargs)
if not self.prefix: self.prefix=self.get_default_prefix()
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row, start='', end=''):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [start], []
for name, field in self.fields.items():
html_class_attr = ''
bf = forms.forms.BoundField(self, field, name)
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors]) # Escape and cache in local variable.
if bf.is_hidden:
if bf_errors:
top_errors.extend([u'(Hidden field %s) %s' % (name, force_unicode(e)) for e in bf_errors])
hidden_fields.append(unicode(bf))
else:
# Create a 'class="..."' atribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_unicode(bf_errors))
if bf.label:
label = conditional_escape(force_unicode(bf.label))
# Only add the suffix if the label does not end in
# punctuation.
if self.label_suffix:
if label[-1] not in ':?.!':
label += self.label_suffix
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_unicode(field.help_text)
else:
help_text = u''
output.append(normal_row % {
'errors': force_unicode(bf_errors),
'label': force_unicode(label),
'field': unicode(bf),
'help_text': help_text,
'html_class_attr': html_class_attr
})
if top_errors:
output.insert(0, error_row % force_unicode(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = u''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text':'',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
output.append(end)
return mark_safe(u'\n'.join(output))
def as_row(self):
"Returns this form rendered as a row in a table."
row_attr=''
if self.group: row_attr+=' group="%s" ' % self.group
if not self.formset_id==None: row_attr+=' formset_id="%s" ' % self.formset_id
return self._html_output(
normal_row = u'<td%(html_class_attr)s>%(errors)s%(field)s%(help_text)s</td>',
error_row = u'<td colspan="2">%s</td>',
row_ender = u'</td>',
help_text_html = u'<br />%s',
errors_on_separate_row = False,
start=u'<tr%s>' % row_attr,
end=u'</tr>',
)
def as_header_row(self):
"Returns this form rendered as a row in a table."
return self._html_output(
normal_row = u'<th>%(label)s</th>',
error_row = u'<td colspan="2">%s</td>',
row_ender = u'</td>',
help_text_html = u'<br />%s',
errors_on_separate_row = False,
start=u'<tr>',
end=u'</tr>',
)
class GroupForm(RowForm):
def __init__(self, *args, **kwargs):
self.group=kwargs.pop('group',None)
super(RowForm, self).__init__(*args, **kwargs)
def as_row(self):
"Returns this form rendered as a row in a table with the specified group."
group_spec=' group="%s" ' % self.prefix.split('-')[0]
return self._html_output(
normal_row = u'aa<td'+group_spec+u' %(html_class_attr)s>%(errors)s%(field)s%(help_text)s</td>',
error_row = u'bb<td'+group_spec+u' colspan="2">%s</td>',
row_ender = u'cc</td>',
help_text_html = u'dd<br />%s',
errors_on_separate_row = False)
class SaleForm(RowForm):
def __init__(self, *args, **kwargs):
super(SaleForm, self).__init__(*args, **kwargs)
if kwargs.has_key('instance'):
instance = kwargs['instance']
self.initial['account'] = instance.account.name
def get_default_prefix(self): return 'saleform'
class Meta:
model = Sale
date = forms.DateField(widget=CalanderInput())
account=AutoCompleteField(model=Client, url="/accounting/ajax-client-list/", required=False, label='Client')
class TransactionForm(RowForm):
class Meta:
fields=('date', 'value', 'active','inventorytransaction')
model = Transaction
TransactionFormSet = modelformset_factory(Transaction, form=TransactionForm, extra=1)
class SaleLineForm(RowForm):
def __init__(self, *args, **kwargs):
super(SaleLineForm, self).__init__(*args, **kwargs)
if kwargs.has_key('instance'):
instance = kwargs['instance']
if instance.item: self.initial['item'] = instance.item.name
item=AutoCompleteField(model=Item, url='/inventory/ajax-item-list/', required=False)
date = forms.DateField(widget=CalanderInput())
document = forms.ModelChoiceField(Document, widget=forms.HiddenInput())
def get_default_prefix(self): return 'salelineform'
class Meta:
fields=('document','date', 'value', 'quantity', 'item', 'serial', 'active', 'delivered')
model = SaleLine
SaleLineFormSet = modelformset_factory(SaleLine, form=SaleLineForm, extra=0, prefix='salelineform', can_order=False, can_delete=True)
class NewSaleLineForm(forms.Form):
formset_id = forms.IntegerField()
item=AutoCompleteField(model=Item, url='/inventory/ajax-item-list/', required=False)
|
[
"jaredtmartin@gmail.com"
] |
jaredtmartin@gmail.com
|
0f6f4d5d7aeebbacb367b43963db1842478c9ef1
|
96df532f6bebf067a302ed096ae1d5b47022073a
|
/test/test_parser_helper.py
|
fe6a05305c0c0208a934eec7302909698c339599
|
[] |
no_license
|
datamix-study/notification_bot
|
bb7907feaf4587d25214edfd4a1e4d21dd87f6aa
|
109e38b9fff07dc97ab5ad35275ce1c8ed5264fb
|
refs/heads/master
| 2020-08-11T23:49:58.094679
| 2020-03-07T14:40:51
| 2020-03-07T14:40:51
| 214,650,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145,738
|
py
|
DATAMIX_INFORMATION_SOURCE = """
<!DOCTYPE html>
<html lang="ja"
itemscope
itemtype="http://schema.org/WebSite"
prefix="og: http://ogp.me/ns#" class="no-js">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="google-site-verification" content="R1OoJj7cg0JS9YC_7mCZQ3mzzA2Qe1gIn4_IJurT1X4" />
<link rel="shortcut icon" href="/favicon.ico">
<link rel="icon" type="image/png" sizes="32x32" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-16x16.png">
<link rel="apple-touch-icon" sizes="57x57" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/android-icon-192x192.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/ms-icon-144x144.png">
<link href="https://fonts.googleapis.com/css?family=Roboto:400,900" rel="stylesheet">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/style.css">
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/modernizr-custom.js"></script>
<title>ニュース | データサイエンティストを目指すならデータミックス</title>
<!-- Facebook Pixel Code -->
<script>
!function(f,b,e,v,n,t,s){if(f.fbq)return;n=f.fbq=function(){n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments)};if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s)}(window,
document,'script','https://connect.facebook.net/en_US/fbevents.js');
fbq('init', '760992884080078'); // Insert your pixel ID here.
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none"
src="https://www.facebook.com/tr?id=760992884080078&ev=PageView&noscript=1"
/></noscript>
<!-- DO NOT MODIFY -->
<!-- End Facebook Pixel Code -->
<!-- All in One SEO Pack 2.4.3 by Michael Torbert of Semper Fi Web Design[2736,2767] -->
<meta name="description" content="ニュースのページ。未経験から6ヶ月間で データサイエンティストを目指す社会人のためのデータサイエンティスト 育成専門の教育プログラム。IoT・ビッグデータ時代に必須のビジネス知識、統計学、機械学習、人工知能、データベース、プログラミング、SQLとBIツールのスキル獲得は株式会社データミックス。" />
<link rel='next' href='https://datamix.co.jp/news/page/2/' />
<link rel="canonical" href="https://datamix.co.jp/news/" />
<script type="text/javascript" >
window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;
ga('create', 'UA-99319144-1', 'auto');
// Plugins
ga('send', 'pageview');
</script>
<script async src="https://www.google-analytics.com/analytics.js"></script>
<!-- /all in one seo pack -->
<link rel='dns-prefetch' href='//s0.wp.com' />
<link rel='dns-prefetch' href='//secure.gravatar.com' />
<link rel='dns-prefetch' href='//s.w.org' />
<link rel="alternate" type="application/rss+xml" title="データサイエンティストを目指すならデータミックス » ニュース フィード" href="https://datamix.co.jp/news/feed/" />
<script type="text/javascript">
window._wpemojiSettings = {"baseUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/72x72\/","ext":".png","svgUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/svg\/","svgExt":".svg","source":{"concatemoji":"https:\/\/datamix.co.jp\/test\/wp-includes\/js\/wp-emoji-release.min.js?ver=4.9.3"}};
!function(a,b,c){function d(a,b){var c=String.fromCharCode;l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,a),0,0);var d=k.toDataURL();l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,b),0,0);var e=k.toDataURL();return d===e}function e(a){var b;if(!l||!l.fillText)return!1;switch(l.textBaseline="top",l.font="600 32px Arial",a){case"flag":return!(b=d([55356,56826,55356,56819],[55356,56826,8203,55356,56819]))&&(b=d([55356,57332,56128,56423,56128,56418,56128,56421,56128,56430,56128,56423,56128,56447],[55356,57332,8203,56128,56423,8203,56128,56418,8203,56128,56421,8203,56128,56430,8203,56128,56423,8203,56128,56447]),!b);case"emoji":return b=d([55357,56692,8205,9792,65039],[55357,56692,8203,9792,65039]),!b}return!1}function f(a){var c=b.createElement("script");c.src=a,c.defer=c.type="text/javascript",b.getElementsByTagName("head")[0].appendChild(c)}var g,h,i,j,k=b.createElement("canvas"),l=k.getContext&&k.getContext("2d");for(j=Array("flag","emoji"),c.supports={everything:!0,everythingExceptFlag:!0},i=0;i<j.length;i++)c.supports[j[i]]=e(j[i]),c.supports.everything=c.supports.everything&&c.supports[j[i]],"flag"!==j[i]&&(c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&c.supports[j[i]]);c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&!c.supports.flag,c.DOMReady=!1,c.readyCallback=function(){c.DOMReady=!0},c.supports.everything||(h=function(){c.readyCallback()},b.addEventListener?(b.addEventListener("DOMContentLoaded",h,!1),a.addEventListener("load",h,!1)):(a.attachEvent("onload",h),b.attachEvent("onreadystatechange",function(){"complete"===b.readyState&&c.readyCallback()})),g=c.source||{},g.concatemoji?f(g.concatemoji):g.wpemoji&&g.twemoji&&(f(g.twemoji),f(g.wpemoji)))}(window,document,window._wpemojiSettings);
</script>
<style type="text/css">
img.wp-smiley,
img.emoji {
display: inline !important;
border: none !important;
box-shadow: none !important;
height: 1em !important;
width: 1em !important;
margin: 0 .07em !important;
vertical-align: -0.1em !important;
background: none !important;
padding: 0 !important;
}
</style>
<link rel='stylesheet' id='jetpack_css-css' href='https://datamix.co.jp/test/wp-content/plugins/jetpack/css/jetpack.css?ver=6.8' type='text/css' media='all' />
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery.js?ver=1.12.4'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery-migrate.min.js?ver=1.4.1'></script>
<link rel='https://api.w.org/' href='https://datamix.co.jp/wp-json/' />
<link rel="EditURI" type="application/rsd+xml" title="RSD" href="https://datamix.co.jp/test/xmlrpc.php?rsd" />
<link rel="wlwmanifest" type="application/wlwmanifest+xml" href="https://datamix.co.jp/test/wp-includes/wlwmanifest.xml" />
<meta name="generator" content="WordPress 4.9.3" />
<!-- Markup (JSON-LD) structured in schema.org ver.4.1.8 START -->
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": "BreadcrumbList",
"itemListElement": [
{
"@type": "ListItem",
"position": 1,
"item": {
"@id": "https://datamix.co.jp",
"name": "データサイエンティストを目指すならデータミックス"
}
},
{
"@type": "ListItem",
"position": 2,
"item": {
"@id": "https://datamix.co.jp/news/",
"name": "ニュース"
}
}
]
}
</script>
<!-- Markup (JSON-LD) structured in schema.org END -->
<link rel='dns-prefetch' href='//v0.wordpress.com'/>
<link rel='dns-prefetch' href='//i0.wp.com'/>
<link rel='dns-prefetch' href='//i1.wp.com'/>
<link rel='dns-prefetch' href='//i2.wp.com'/>
<style type='text/css'>img#wpstats{display:none}</style> <style type="text/css">
html:not( .jetpack-lazy-images-js-enabled ) .jetpack-lazy-image {
display: none;
}
</style>
<script>
document.documentElement.classList.add(
'jetpack-lazy-images-js-enabled'
);
</script>
<style type="text/css" id="syntaxhighlighteranchor"></style>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css">
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>
//hljs.initHighlightingOnLoad();
$(function()
{
$( 'pre' ).each(function( i , block )
{
hljs.highlightBlock( block ) ;
} ) ;
} ) ;
</script>
<script>
//スムーズスクロール
jQuery(function(){
// #で始まるアンカーをクリックした場合に処理
jQuery('a[href^=#]').click(function() {
// スクロールの速度
var speed = 400; // ミリ秒
// アンカーの値取得
var href= jQuery(this).attr("href");
// 移動先を取得
var target = jQuery(href == "#" || href == "" ? 'html' : href);
// 移動先を数値で取得
var position = target.offset().top;
// スムーススクロール
jQuery('body,html').animate({scrollTop:position}, speed, 'swing');
return false;
});
});
</script>
<!-- Treasure Data -->
<script type="text/javascript">
!function(t,e){if(void 0===e[t]){e[t]=function(){e[t].clients.push(this),this._init=[Array.prototype.slice.call(arguments)]},e[t].clients=[];for(var r=function(t){return function(){return this["_"+t]=this["_"+t]||[],this["_"+t].push(Array.prototype.slice.call(arguments)),this}},s=["blockEvents","unblockEvents","setSignedMode","setAnonymousMode","resetUUID","addRecord","fetchGlobalID","set","trackEvent","trackPageview","trackClicks","ready","fetchUserSegments"],n=0;n<s.length;n++){var c=s[n];e[t].prototype[c]=r(c)}var o=document.createElement("script");o.type="text/javascript",o.async=!0,o.src=("https:"===document.location.protocol?"https:":"http:")+"//cdn.treasuredata.com/sdk/2.1/td.min.js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(o,a)}}("Treasure",this);
</script>
</head>
<body class="archive post-type-archive post-type-archive-news">
<header>
<div class="cf">
<div class="logo __desktop __other">
<a href="https://datamix.co.jp/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
</div>
<nav id="Navigation" class="a-topnav transition_quick">
<a class="home_menu hidden_dt" href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
<div class="menu-main-container"><ul id="menu-main" class="cf"><li id="menu-item-1024" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-has-children menu-item-1024"><a href="#">スクール<span>SCHOOL</span></a>
<ul class="sub-menu">
<li id="menu-item-2345" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2345"><a href="https://datamix.co.jp/data-scientist/">データサイエンティスト育成コース</a></li>
<li id="menu-item-2282" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2282"><a href="https://datamix.co.jp/introductory-data-scientist-course/">データサイエンティスト準備ステップ</a></li>
</ul>
</li>
<li id="menu-item-1368" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1368"><a href="https://datamix.co.jp/for-employer/">人材紹介<span>For Employer</span></a></li>
<li id="menu-item-1005" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1005"><a href="https://datamix.co.jp/for-company/">法人向けサービス<span>For Company</span></a></li>
<li id="menu-item-1026" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-ancestor current-menu-parent menu-item-has-children menu-item-1026"><a href="#">ニュース<span>NEWS</span></a>
<ul class="sub-menu">
<li id="menu-item-1027" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-item menu-item-1027"><a href="/news">ニュース</a></li>
<li id="menu-item-1028" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1028"><a href="https://datamix.co.jp/blog/">ブログ</a></li>
<li id="menu-item-1317" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1317"><a href="/event">イベント</a></li>
</ul>
</li>
<li id="menu-item-1029" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1029"><a href="https://datamix.co.jp/recruit/">採用情報<span>recruit</span></a></li>
<li id="menu-item-1030" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1030"><a href="https://datamix.co.jp/company/">会社概要<span>about us</span></a></li>
</ul></div>
</nav>
<!-- <div class="btn __other __desktop">
<div class="button_hvr yellow"><a href="https://datamix.co.jp/form-seminor/" class="hvr-shutter-out-horizontal"><span class="icon-plane"><b>説明会に申し込む</b></span></a></div> </div>
-->
<div id="MenuIcon" class="menu_button __mobile">
<div>
<div>
<span></span>
<span></span>
<span></span>
<span></span>
</div>
</div>
<span class="roboto uppercase">menu</span>
</div><!--2-->
<!--<div class="information transition_quick __mobile">
<div class="cta twin">
</div>
</div>-->
</div>
</header>
<div id="wrapper">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/css/news.css">
<div class="top_label">
<div>
<h1>ニュース</h1>
<h3 class="roboto uppercase">NEWS</h3>
</div>
</div>
<div class="content_section">
<p class="breadcrumb"><a href="https://datamix.co.jp/">HOME</a> > <a href="https://datamix.co.jp/news/">ニュース</a></p>
</div>
<div class="blogs content_section cf">
<div class="news-area">
<h4 class="page_sub_title blue">お知らせ</h4>
<h3 class="page_main_title blue roboto uppercase"><b>Information</b></h3>
<div class="inner">
<ul>
<li><h4><span class="date">2019.10.09</span><a href="https://datamix.co.jp/news/20191009/">2019年10月12日(土)の休講及び無料説明会中止のお知らせ</a></h4></li>
<li><h4><span class="date">2019.10.07</span><a href="https://datamix.co.jp/news/3551/">オデッセイコミュニケーションズ主催「第17回 オデッセイ ユニバーシティ」にて弊社代表が講演いたします</a></h4></li>
<li><h4><span class="date">2019.10.02</span><a href="https://datamix.co.jp/news/20191002/">DX時代の企業の人財戦略セミナー「AIを活用した組織分析、データサイエンティスト育成の実践」を株式会社ネクストエデュケーションシンク様と共催いたします</a></h4></li>
<li><h4><span class="date">2019.09.19</span><a href="https://datamix.co.jp/news/20190919/">「第8回日本HRチャレンジ大賞」においてイノベーション賞を受賞しました</a></h4></li>
<li><h4><span class="date">2019.09.10</span><a href="https://datamix.co.jp/news/20190910/">「ネクスト・ザ・ファースト46 – 次代を担う市場の開拓者-」に掲載されました</a></h4></li>
<li><h4><span class="date">2019.08.30</span><a href="https://datamix.co.jp/news/20190830/">【プレスリリース】 gacco® (ガッコ) セレクト有料講座に「データサイエンス スキル育成プログラム」を開講</a></h4></li>
<li><h4><span class="date">2019.04.05</span><a href="https://datamix.co.jp/news/20190409/">【プレスリリース】ゴールデンウィーク中でデータ分析スキルを身につけるデータサイエンス研修を提供 ― 短期間でデータ分析、機械学習の基礎知識を習得 ―</a></h4></li>
<li><h4><span class="date">2019.04.03</span><a href="https://datamix.co.jp/news/20190403/">【プレスリリース】「データサイエンティスト育成コース パートタイムプログラム」の開講を増設</a></h4></li>
<li><h4><span class="date">2019.03.12</span><a href="https://datamix.co.jp/news/20190312/">【プレスリリース】データミックスがSpeeeと業務提携を実施 ノウハウを活かした独自のビジネストランスレーター育成研修制度を提供</a></h4></li>
<li><h4><span class="date">2019.02.26</span><a href="https://datamix.co.jp/news/20190226/">【プレスリリース】国内のデータサイエンティスト育成スクールにおいて初の取組みとなる リアルな企業データを活用したデータ分析PoC『OpenPoC』の提供を開始</a></h4></li>
</ul>
</div>
<div class="pagination cf"><div></div></div>
</div>
</div>
<div class="media-area">
<div class="wrap">
<h4 class="page_sub_title blue">メディア掲載</h4>
<h3 class="page_main_title blue roboto uppercase"><b>Media</b></h3>
<div class="inner">
<ul class="clearfix">
<li class="clearfix">
<div class="image"><img src="" alt=""></div>
<h4><span class="date">2019.03.12</span><a href="https://datamix.co.jp/news/20190320/">【メディア掲載】フリーランスエンジニアNoteに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/1fd90f4f32d790f77dfd67c38d07918d.png?resize=140%2C31&ssl=1" alt=""></div>
<h4><span class="date">2018.02.15</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e6%97%a5%e5%88%8a%e5%b7%a5%e6%a5%ad%e6%96%b0%e8%81%9e%e9%9b%bb%e5%ad%90%e7%89%88%e3%81%ab%e5%bc%8a%e7%a4%be%e5%a0%85%e7%94%b0/">【メディア掲載】日刊工業新聞電子版に弊社堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/49eb521bb9c5b8089267706a57a64b7d.png?resize=140%2C100&ssl=1" alt=""></div>
<h4><span class="date">2018.02.10</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e3%83%9e%e3%82%a4%e3%83%8a%e3%83%93%e3%83%8b%e3%83%a5%e3%83%bc%e3%82%b9%e3%81%ab%e5%bc%8a%e7%a4%be%e4%bb%a3%e8%a1%a8-%e5%a0%85/">【メディア掲載】マイナビニュースに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2018/01/diamondlogo.png?resize=140%2C70&ssl=1" alt=""></div>
<h4><span class="date">2018.01.25</span><a href="https://datamix.co.jp/news/diamond_online/">【メディア掲載】Diamond onlineに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/hbol-logo.png?resize=140%2C80&ssl=1" alt=""></div>
<h4><span class="date">2017.12.28</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e3%83%8f%e3%83%bc%e3%83%90%e3%83%bc%e3%83%bb%e3%83%93%e3%82%b8%e3%83%8d%e3%82%b9%e3%83%bb%e3%82%aa%e3%83%b3%e3%83%a9%e3%82%a4/">【メディア掲載】ハーバー・ビジネス・オンラインに代表堅田のインタビュー記事が掲載されました。</a></h4>
</li>
</ul>
</div>
<div class="content_section page-template-news">
<div class="single button_wrap">
<div class="button_hvr yellow"><a href="/media" class="hvr-shutter-out-horizontal"><span class="icon-box3"><b>もっと見る</b></span></a></div>
</div>
</div>
</div>
</div>
<!-- end 20170926 kikuzawa -->
<section class="InViewSection home-companies published-area section relative cf" style="padding: 0 0 40px;">
<div class="content_section">
<h2>
<span>掲載メディア</span></h2>
<div class="published-list module-companies relative cf">
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2017/11/miniicon_ogpnikkei.png?fit=140%2C86&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/logo_NBD.png?fit=140%2C36&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2018/01/diamondlogo.png?fit=140%2C60&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/55172d3b380fdad2390fca2e86970c30.jpg?fit=140%2C34&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/9d2a3a1b6d85acbb34fdcaa2e7dfd677.jpg?fit=140%2C53&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/36392e404db2e0e87cf7e0f11adc0bc0.jpg?fit=140%2C36&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/1fd90f4f32d790f77dfd67c38d07918d.png?fit=140%2C17&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/49eb521bb9c5b8089267706a57a64b7d.png?fit=140%2C28&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/hbol-logo.png?fit=140%2C48&ssl=1" alt="">
</div>
</div>
</div>
</div>
</section>
<footer>
<div>
<div class="cf">
<div class="logo">
<a href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a><br>
<small class="copyright">©2017 DataMix</small>
</div>
<div class="ft_r">
<nav class="a-footer">
<div class="menu-ftm_new-container"><ul id="menu-ftm_new" class="cf"><li id="menu-item-1039" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-item menu-item-1039"><a href="/news">ニュース</a></li>
<li id="menu-item-980" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-980"><a href="https://datamix.co.jp/blog/">ブログ</a></li>
</ul></div> </nav>
<nav class="a-footer">
<div class="menu-footer-container"><ul id="menu-footer" class="cf"><li id="menu-item-54" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-54"><a href="https://datamix.co.jp/company/">会社概要</a></li>
<li id="menu-item-53" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-53"><a href="https://datamix.co.jp/terms-of-service/">利用規約</a></li>
<li id="menu-item-52" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-52"><a href="https://datamix.co.jp/privacy-policy/">個人情報保護方針</a></li>
<li id="menu-item-51" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-51"><a href="https://datamix.co.jp/act-on-specified-commercial-transaction/">特定商取引法に基づく表記</a></li>
<li id="menu-item-146" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-146"><a href="https://datamix.co.jp/form/">お問い合わせ</a></li>
</ul></div> </nav>
</div>
<p class="ft_robo"><img src="https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/lobo_01.png" alt=""></p>
</div>
</div>
</footer>
</div>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/main.js"></script>
<!--script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script-->
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.bgswitcher.js"></script>
<!--<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/tab.js"></script>-->
<script>
$(function() {
//クリックしたときのファンクションをまとめて指定
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target > .tab_wrap:first').css('display','block');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target2 > .tab_wrap:first').css('display','block');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target3 > .tab_wrap:first').css('display','block');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target4 > .tab_wrap:first').css('display','block');
$('.tab_target5 .tab_wrap').css('display','none');
$('.tab_target5 > .tab_wrap:first').css('display','block');
$('.tab li:first').addClass('select');
$('.tab li').click(function() {
//.index()を使いクリックされたタブが何番目かを調べ、
//indexという変数に代入します。
var index = $('.tab li').index(this);
//コンテンツを一度すべて非表示にし、
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target5 .tab_wrap').css('display','none');
//クリックされたタブと同じ順番のコンテンツを表示します。
$('.tab_target .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target2 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target3 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target4 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target5 .tab_wrap').eq(index).fadeIn("slow");
//一度タブについているクラスselectを消し、
$('.tab li').removeClass('select');
//クリックされたタブのみにクラスselectをつけます。
$(this).addClass('select');
});
});
</script>
<script type="text/javascript">
jQuery(function($) {
$('.slider').bgSwitcher({
images: ['https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_01.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_02.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_03.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_04.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_05.jpg'],
interval: 5000,
effect: "fade"
});
});
$(function() {
$ ('.instructors_sub li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main li.on').removeClass('on');
$('.instructors_main li[data-card="' + id + '"]').addClass('on');
});
$ ('.instructors_sub2 li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main2 li.on').removeClass('on');
$('.instructors_main2 li[data-card="' + id + '"]').addClass('on');
});
var rWidth = $('.round-box').outerWidth();
$('.round-box').css('height', rWidth);
var rWidth2 = $('.roundsec.round3 .round-box').outerWidth();
$('.roundsec.round3 .round-box').css('height', rWidth2);
});
</script>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.tile.js"></script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
$(".blog-area ul li").tile();
var windowSm = 750;
if (windowWidth > 750) {
$(".home-button ul li").tile();
$(".module-stepbox .module-stepbox-total_5 .module-stepbox-step").tile(5);
}
$(window).on("scroll", function() {
scrollHeight = $(document).height();
scrollPosition = $(window).height() + $(window).scrollTop();
footHeight = $("footer").innerHeight();
if (windowWidth < 750) {
if ( scrollHeight - scrollPosition <= footHeight ) {
$('.sp_fix_btn').slideUp();
} else {
$('.sp_fix_btn').slideDown();
}
}
});
});
</script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
if (windowWidth > 750) {
var mainheight = $('.page_main_banner').innerHeight()-70;
// var mainheight = $('.page_main_banner').height()+130;
var lavelheight = $('.top_label').height();
if(mainheight){
var hdheight = mainheight;
}
if(lavelheight){
var hdheight = lavelheight;
}
$('header').css('top', hdheight);
var triggerNode = $("header");
$(window).scroll(function () {
var value = $(this).scrollTop();
var triggerNodePosition = $(triggerNode).offset().top;
// 現在のスクロール位置が引き金要素の位置より下にあれば‥
if (value > hdheight) {
// なんらかの命令を実行
$('header').css({"top": 0,"position":"fixed"});
}else{
$('header').css({'top': hdheight, "position":"absolute"});
}
});
}
});
</script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create','UA-99319144-1','auto');ga('send','pageview');
</script>
<div style="display:none">
</div>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/_inc/build/photon/photon.min.js?ver=20130122'></script>
<script type='text/javascript' src='https://s0.wp.com/wp-content/js/devicepx-jetpack.js?ver=201941'></script>
<script type='text/javascript' src='https://secure.gravatar.com/js/gprofiles.js?ver=2019Octaa'></script>
<script type='text/javascript'>
/* <![CDATA[ */
var WPGroHo = {"my_hash":""};
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/modules/wpgroho.js?ver=4.9.3'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/_inc/build/lazy-images/js/lazy-images.min.js?ver=6.8'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/wp-embed.min.js?ver=4.9.3'></script>
<script type='text/javascript' src='https://stats.wp.com/e-201941.js' async='async' defer='defer'></script>
<script type='text/javascript'>
_stq = window._stq || [];
_stq.push([ 'view', {v:'ext',j:'1:6.8',blog:'155035170',post:'0',tz:'9',srv:'datamix.co.jp'} ]);
_stq.push([ 'clickTrackerInit', '155035170', '0' ]);
</script>
<!-- リマーケティング タグの Google コード -->
<!--------------------------------------------------
リマーケティング タグは、個人を特定できる情報と関連付けることも、デリケートなカテゴリに属するページに設置することも許可されません。タグの設定方法については、こちらのページをご覧ください。
http://google.com/ads/remarketingsetup
--------------------------------------------------->
<script type="text/javascript">
/* <![CDATA[ */
var google_conversion_id = 852033649;
var google_custom_params = window.google_tag_params;
var google_remarketing_only = true;
/* ]]> */
</script>
<script type="text/javascript" src="//www.googleadservices.com/pagead/conversion.js">
</script>
<noscript>
<div style="display:inline;">
<img height="1" width="1" style="border-style:none;" alt="" src="//googleads.g.doubleclick.net/pagead/viewthroughconversion/852033649/?guid=ON&script=0"/>
</div>
</noscript>
<!-- Yahoo Code for your Target List -->
<script type="text/javascript" language="javascript">
/* <![CDATA[ */
var yahoo_retargeting_id = '4F2M18WOUC';
var yahoo_retargeting_label = '';
var yahoo_retargeting_page_type = '';
var yahoo_retargeting_items = [{item_id: '', category_id: '', price: '', quantity: ''}];
/* ]]> */
</script>
<script type="text/javascript" language="javascript" src="//b92.yahoo.co.jp/js/s_retargeting.js"></script>
<script type="text/javascript">
//Configure an instance for your database
var td = new Treasure({
host: 'in.treasuredata.com',
writeKey: '9610/410a6a4e59ee7703f203ba2c070721601c08a013',
database: 'datamix_marketing',
startInSignedMode: true
});
// Enable cross-domain tracking
td.set('$global', 'td_global_id', 'td_global_id');
// Track pageview information into table
td.trackPageview('pageviews');
</script>
</body>
</html>
"""
DATAMIX_MEDIA_SOURCE = """
<!DOCTYPE html>
<html lang="ja"
itemscope
itemtype="http://schema.org/WebSite"
prefix="og: http://ogp.me/ns#" class="no-js">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="google-site-verification" content="R1OoJj7cg0JS9YC_7mCZQ3mzzA2Qe1gIn4_IJurT1X4" />
<link rel="shortcut icon" href="/favicon.ico">
<link rel="icon" type="image/png" sizes="32x32" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-16x16.png">
<link rel="apple-touch-icon" sizes="57x57" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/android-icon-192x192.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/ms-icon-144x144.png">
<link href="https://fonts.googleapis.com/css?family=Roboto:400,900" rel="stylesheet">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/style.css">
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/modernizr-custom.js"></script>
<title>ニュース | データサイエンティストを目指すならデータミックス</title>
<!-- Facebook Pixel Code -->
<script>
!function(f,b,e,v,n,t,s){if(f.fbq)return;n=f.fbq=function(){n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments)};if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s)}(window,
document,'script','https://connect.facebook.net/en_US/fbevents.js');
fbq('init', '760992884080078'); // Insert your pixel ID here.
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none"
src="https://www.facebook.com/tr?id=760992884080078&ev=PageView&noscript=1"
/></noscript>
<!-- DO NOT MODIFY -->
<!-- End Facebook Pixel Code -->
<!-- All in One SEO Pack 2.4.3 by Michael Torbert of Semper Fi Web Design[2736,2767] -->
<meta name="description" content="ニュースのページ。未経験から6ヶ月間で データサイエンティストを目指す社会人のためのデータサイエンティスト 育成専門の教育プログラム。IoT・ビッグデータ時代に必須のビジネス知識、統計学、機械学習、人工知能、データベース、プログラミング、SQLとBIツールのスキル獲得は株式会社データミックス。" />
<link rel='next' href='https://datamix.co.jp/news/page/2/' />
<link rel="canonical" href="https://datamix.co.jp/news/" />
<script type="text/javascript" >
window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)};ga.l=+new Date;
ga('create', 'UA-99319144-1', 'auto');
// Plugins
ga('send', 'pageview');
</script>
<script async src="https://www.google-analytics.com/analytics.js"></script>
<!-- /all in one seo pack -->
<link rel='dns-prefetch' href='//s0.wp.com' />
<link rel='dns-prefetch' href='//secure.gravatar.com' />
<link rel='dns-prefetch' href='//s.w.org' />
<link rel="alternate" type="application/rss+xml" title="データサイエンティストを目指すならデータミックス » ニュース フィード" href="https://datamix.co.jp/news/feed/" />
<script type="text/javascript">
window._wpemojiSettings = {"baseUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/72x72\/","ext":".png","svgUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/svg\/","svgExt":".svg","source":{"concatemoji":"https:\/\/datamix.co.jp\/test\/wp-includes\/js\/wp-emoji-release.min.js?ver=4.9.3"}};
!function(a,b,c){function d(a,b){var c=String.fromCharCode;l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,a),0,0);var d=k.toDataURL();l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,b),0,0);var e=k.toDataURL();return d===e}function e(a){var b;if(!l||!l.fillText)return!1;switch(l.textBaseline="top",l.font="600 32px Arial",a){case"flag":return!(b=d([55356,56826,55356,56819],[55356,56826,8203,55356,56819]))&&(b=d([55356,57332,56128,56423,56128,56418,56128,56421,56128,56430,56128,56423,56128,56447],[55356,57332,8203,56128,56423,8203,56128,56418,8203,56128,56421,8203,56128,56430,8203,56128,56423,8203,56128,56447]),!b);case"emoji":return b=d([55357,56692,8205,9792,65039],[55357,56692,8203,9792,65039]),!b}return!1}function f(a){var c=b.createElement("script");c.src=a,c.defer=c.type="text/javascript",b.getElementsByTagName("head")[0].appendChild(c)}var g,h,i,j,k=b.createElement("canvas"),l=k.getContext&&k.getContext("2d");for(j=Array("flag","emoji"),c.supports={everything:!0,everythingExceptFlag:!0},i=0;i<j.length;i++)c.supports[j[i]]=e(j[i]),c.supports.everything=c.supports.everything&&c.supports[j[i]],"flag"!==j[i]&&(c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&c.supports[j[i]]);c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&!c.supports.flag,c.DOMReady=!1,c.readyCallback=function(){c.DOMReady=!0},c.supports.everything||(h=function(){c.readyCallback()},b.addEventListener?(b.addEventListener("DOMContentLoaded",h,!1),a.addEventListener("load",h,!1)):(a.attachEvent("onload",h),b.attachEvent("onreadystatechange",function(){"complete"===b.readyState&&c.readyCallback()})),g=c.source||{},g.concatemoji?f(g.concatemoji):g.wpemoji&&g.twemoji&&(f(g.twemoji),f(g.wpemoji)))}(window,document,window._wpemojiSettings);
</script>
<style type="text/css">
img.wp-smiley,
img.emoji {
display: inline !important;
border: none !important;
box-shadow: none !important;
height: 1em !important;
width: 1em !important;
margin: 0 .07em !important;
vertical-align: -0.1em !important;
background: none !important;
padding: 0 !important;
}
</style>
<link rel='stylesheet' id='jetpack_css-css' href='https://datamix.co.jp/test/wp-content/plugins/jetpack/css/jetpack.css?ver=6.8' type='text/css' media='all' />
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery.js?ver=1.12.4'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery-migrate.min.js?ver=1.4.1'></script>
<link rel='https://api.w.org/' href='https://datamix.co.jp/wp-json/' />
<link rel="EditURI" type="application/rsd+xml" title="RSD" href="https://datamix.co.jp/test/xmlrpc.php?rsd" />
<link rel="wlwmanifest" type="application/wlwmanifest+xml" href="https://datamix.co.jp/test/wp-includes/wlwmanifest.xml" />
<meta name="generator" content="WordPress 4.9.3" />
<!-- Markup (JSON-LD) structured in schema.org ver.4.1.8 START -->
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": "BreadcrumbList",
"itemListElement": [
{
"@type": "ListItem",
"position": 1,
"item": {
"@id": "https://datamix.co.jp",
"name": "データサイエンティストを目指すならデータミックス"
}
},
{
"@type": "ListItem",
"position": 2,
"item": {
"@id": "https://datamix.co.jp/news/",
"name": "ニュース"
}
}
]
}
</script>
<!-- Markup (JSON-LD) structured in schema.org END -->
<link rel='dns-prefetch' href='//v0.wordpress.com'/>
<link rel='dns-prefetch' href='//i0.wp.com'/>
<link rel='dns-prefetch' href='//i1.wp.com'/>
<link rel='dns-prefetch' href='//i2.wp.com'/>
<style type='text/css'>img#wpstats{display:none}</style> <style type="text/css">
html:not( .jetpack-lazy-images-js-enabled ) .jetpack-lazy-image {
display: none;
}
</style>
<script>
document.documentElement.classList.add(
'jetpack-lazy-images-js-enabled'
);
</script>
<style type="text/css" id="syntaxhighlighteranchor"></style>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css">
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>
//hljs.initHighlightingOnLoad();
$(function()
{
$( 'pre' ).each(function( i , block )
{
hljs.highlightBlock( block ) ;
} ) ;
} ) ;
</script>
<script>
//スムーズスクロール
jQuery(function(){
// #で始まるアンカーをクリックした場合に処理
jQuery('a[href^=#]').click(function() {
// スクロールの速度
var speed = 400; // ミリ秒
// アンカーの値取得
var href= jQuery(this).attr("href");
// 移動先を取得
var target = jQuery(href == "#" || href == "" ? 'html' : href);
// 移動先を数値で取得
var position = target.offset().top;
// スムーススクロール
jQuery('body,html').animate({scrollTop:position}, speed, 'swing');
return false;
});
});
</script>
<!-- Treasure Data -->
<script type="text/javascript">
!function(t,e){if(void 0===e[t]){e[t]=function(){e[t].clients.push(this),this._init=[Array.prototype.slice.call(arguments)]},e[t].clients=[];for(var r=function(t){return function(){return this["_"+t]=this["_"+t]||[],this["_"+t].push(Array.prototype.slice.call(arguments)),this}},s=["blockEvents","unblockEvents","setSignedMode","setAnonymousMode","resetUUID","addRecord","fetchGlobalID","set","trackEvent","trackPageview","trackClicks","ready","fetchUserSegments"],n=0;n<s.length;n++){var c=s[n];e[t].prototype[c]=r(c)}var o=document.createElement("script");o.type="text/javascript",o.async=!0,o.src=("https:"===document.location.protocol?"https:":"http:")+"//cdn.treasuredata.com/sdk/2.1/td.min.js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(o,a)}}("Treasure",this);
</script>
</head>
<body class="archive post-type-archive post-type-archive-news">
<header>
<div class="cf">
<div class="logo __desktop __other">
<a href="https://datamix.co.jp/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
</div>
<nav id="Navigation" class="a-topnav transition_quick">
<a class="home_menu hidden_dt" href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
<div class="menu-main-container"><ul id="menu-main" class="cf"><li id="menu-item-1024" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-has-children menu-item-1024"><a href="#">スクール<span>SCHOOL</span></a>
<ul class="sub-menu">
<li id="menu-item-2345" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2345"><a href="https://datamix.co.jp/data-scientist/">データサイエンティスト育成コース</a></li>
<li id="menu-item-2282" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2282"><a href="https://datamix.co.jp/introductory-data-scientist-course/">データサイエンティスト準備ステップ</a></li>
</ul>
</li>
<li id="menu-item-1368" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1368"><a href="https://datamix.co.jp/for-employer/">人材紹介<span>For Employer</span></a></li>
<li id="menu-item-1005" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1005"><a href="https://datamix.co.jp/for-company/">法人向けサービス<span>For Company</span></a></li>
<li id="menu-item-1026" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-ancestor current-menu-parent menu-item-has-children menu-item-1026"><a href="#">ニュース<span>NEWS</span></a>
<ul class="sub-menu">
<li id="menu-item-1027" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-item menu-item-1027"><a href="/news">ニュース</a></li>
<li id="menu-item-1028" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1028"><a href="https://datamix.co.jp/blog/">ブログ</a></li>
<li id="menu-item-1317" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1317"><a href="/event">イベント</a></li>
</ul>
</li>
<li id="menu-item-1029" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1029"><a href="https://datamix.co.jp/recruit/">採用情報<span>recruit</span></a></li>
<li id="menu-item-1030" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1030"><a href="https://datamix.co.jp/company/">会社概要<span>about us</span></a></li>
</ul></div>
</nav>
<!-- <div class="btn __other __desktop">
<div class="button_hvr yellow"><a href="https://datamix.co.jp/form-seminor/" class="hvr-shutter-out-horizontal"><span class="icon-plane"><b>説明会に申し込む</b></span></a></div> </div>
-->
<div id="MenuIcon" class="menu_button __mobile">
<div>
<div>
<span></span>
<span></span>
<span></span>
<span></span>
</div>
</div>
<span class="roboto uppercase">menu</span>
</div><!--2-->
<!--<div class="information transition_quick __mobile">
<div class="cta twin">
</div>
</div>-->
</div>
</header>
<div id="wrapper">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/css/news.css">
<div class="top_label">
<div>
<h1>ニュース</h1>
<h3 class="roboto uppercase">NEWS</h3>
</div>
</div>
<div class="content_section">
<p class="breadcrumb"><a href="https://datamix.co.jp/">HOME</a> > <a href="https://datamix.co.jp/news/">ニュース</a></p>
</div>
<div class="blogs content_section cf">
<div class="news-area">
<h4 class="page_sub_title blue">お知らせ</h4>
<h3 class="page_main_title blue roboto uppercase"><b>Information</b></h3>
<div class="inner">
<ul>
<li><h4><span class="date">2019.10.09</span><a href="https://datamix.co.jp/news/20191009/">2019年10月12日(土)の休講及び無料説明会中止のお知らせ</a></h4></li>
<li><h4><span class="date">2019.10.07</span><a href="https://datamix.co.jp/news/3551/">オデッセイコミュニケーションズ主催「第17回 オデッセイ ユニバーシティ」にて弊社代表が講演いたします</a></h4></li>
<li><h4><span class="date">2019.10.02</span><a href="https://datamix.co.jp/news/20191002/">DX時代の企業の人財戦略セミナー「AIを活用した組織分析、データサイエンティスト育成の実践」を株式会社ネクストエデュケーションシンク様と共催いたします</a></h4></li>
<li><h4><span class="date">2019.09.19</span><a href="https://datamix.co.jp/news/20190919/">「第8回日本HRチャレンジ大賞」においてイノベーション賞を受賞しました</a></h4></li>
<li><h4><span class="date">2019.09.10</span><a href="https://datamix.co.jp/news/20190910/">「ネクスト・ザ・ファースト46 – 次代を担う市場の開拓者-」に掲載されました</a></h4></li>
<li><h4><span class="date">2019.08.30</span><a href="https://datamix.co.jp/news/20190830/">【プレスリリース】 gacco® (ガッコ) セレクト有料講座に「データサイエンス スキル育成プログラム」を開講</a></h4></li>
<li><h4><span class="date">2019.04.05</span><a href="https://datamix.co.jp/news/20190409/">【プレスリリース】ゴールデンウィーク中でデータ分析スキルを身につけるデータサイエンス研修を提供 ― 短期間でデータ分析、機械学習の基礎知識を習得 ―</a></h4></li>
<li><h4><span class="date">2019.04.03</span><a href="https://datamix.co.jp/news/20190403/">【プレスリリース】「データサイエンティスト育成コース パートタイムプログラム」の開講を増設</a></h4></li>
<li><h4><span class="date">2019.03.12</span><a href="https://datamix.co.jp/news/20190312/">【プレスリリース】データミックスがSpeeeと業務提携を実施 ノウハウを活かした独自のビジネストランスレーター育成研修制度を提供</a></h4></li>
<li><h4><span class="date">2019.02.26</span><a href="https://datamix.co.jp/news/20190226/">【プレスリリース】国内のデータサイエンティスト育成スクールにおいて初の取組みとなる リアルな企業データを活用したデータ分析PoC『OpenPoC』の提供を開始</a></h4></li>
</ul>
</div>
<div class="pagination cf"><div></div></div>
</div>
</div>
<div class="media-area">
<div class="wrap">
<h4 class="page_sub_title blue">メディア掲載</h4>
<h3 class="page_main_title blue roboto uppercase"><b>Media</b></h3>
<div class="inner">
<ul class="clearfix">
<li class="clearfix">
<div class="image"><img src="" alt=""></div>
<h4><span class="date">2019.03.12</span><a href="https://datamix.co.jp/news/20190320/">【メディア掲載】フリーランスエンジニアNoteに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/1fd90f4f32d790f77dfd67c38d07918d.png?resize=140%2C31&ssl=1" alt=""></div>
<h4><span class="date">2018.02.15</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e6%97%a5%e5%88%8a%e5%b7%a5%e6%a5%ad%e6%96%b0%e8%81%9e%e9%9b%bb%e5%ad%90%e7%89%88%e3%81%ab%e5%bc%8a%e7%a4%be%e5%a0%85%e7%94%b0/">【メディア掲載】日刊工業新聞電子版に弊社堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/49eb521bb9c5b8089267706a57a64b7d.png?resize=140%2C100&ssl=1" alt=""></div>
<h4><span class="date">2018.02.10</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e3%83%9e%e3%82%a4%e3%83%8a%e3%83%93%e3%83%8b%e3%83%a5%e3%83%bc%e3%82%b9%e3%81%ab%e5%bc%8a%e7%a4%be%e4%bb%a3%e8%a1%a8-%e5%a0%85/">【メディア掲載】マイナビニュースに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2018/01/diamondlogo.png?resize=140%2C70&ssl=1" alt=""></div>
<h4><span class="date">2018.01.25</span><a href="https://datamix.co.jp/news/diamond_online/">【メディア掲載】Diamond onlineに弊社代表 堅田のインタビューが掲載されました</a></h4>
</li>
<li class="clearfix">
<div class="image"><img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/hbol-logo.png?resize=140%2C80&ssl=1" alt=""></div>
<h4><span class="date">2017.12.28</span><a href="https://datamix.co.jp/news/%e3%80%90%e3%83%a1%e3%83%87%e3%82%a3%e3%82%a2%e6%8e%b2%e8%bc%89%e3%80%91%e3%83%8f%e3%83%bc%e3%83%90%e3%83%bc%e3%83%bb%e3%83%93%e3%82%b8%e3%83%8d%e3%82%b9%e3%83%bb%e3%82%aa%e3%83%b3%e3%83%a9%e3%82%a4/">【メディア掲載】ハーバー・ビジネス・オンラインに代表堅田のインタビュー記事が掲載されました。</a></h4>
</li>
</ul>
</div>
<div class="content_section page-template-news">
<div class="single button_wrap">
<div class="button_hvr yellow"><a href="/media" class="hvr-shutter-out-horizontal"><span class="icon-box3"><b>もっと見る</b></span></a></div>
</div>
</div>
</div>
</div>
<!-- end 20170926 kikuzawa -->
<section class="InViewSection home-companies published-area section relative cf" style="padding: 0 0 40px;">
<div class="content_section">
<h2>
<span>掲載メディア</span></h2>
<div class="published-list module-companies relative cf">
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2017/11/miniicon_ogpnikkei.png?fit=140%2C86&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/logo_NBD.png?fit=140%2C36&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2018/01/diamondlogo.png?fit=140%2C60&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/55172d3b380fdad2390fca2e86970c30.jpg?fit=140%2C34&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/9d2a3a1b6d85acbb34fdcaa2e7dfd677.jpg?fit=140%2C53&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i0.wp.com/datamix.co.jp/test/wp-content/uploads/2019/01/36392e404db2e0e87cf7e0f11adc0bc0.jpg?fit=140%2C36&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i1.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/1fd90f4f32d790f77dfd67c38d07918d.png?fit=140%2C17&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2018/02/49eb521bb9c5b8089267706a57a64b7d.png?fit=140%2C28&ssl=1" alt="">
</div>
</div>
<div>
<div>
<img src="https://i2.wp.com/datamix.co.jp/test/wp-content/uploads/2017/12/hbol-logo.png?fit=140%2C48&ssl=1" alt="">
</div>
</div>
</div>
</div>
</section>
<footer>
<div>
<div class="cf">
<div class="logo">
<a href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a><br>
<small class="copyright">©2017 DataMix</small>
</div>
<div class="ft_r">
<nav class="a-footer">
<div class="menu-ftm_new-container"><ul id="menu-ftm_new" class="cf"><li id="menu-item-1039" class="menu-item menu-item-type-custom menu-item-object-custom current-menu-item menu-item-1039"><a href="/news">ニュース</a></li>
<li id="menu-item-980" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-980"><a href="https://datamix.co.jp/blog/">ブログ</a></li>
</ul></div> </nav>
<nav class="a-footer">
<div class="menu-footer-container"><ul id="menu-footer" class="cf"><li id="menu-item-54" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-54"><a href="https://datamix.co.jp/company/">会社概要</a></li>
<li id="menu-item-53" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-53"><a href="https://datamix.co.jp/terms-of-service/">利用規約</a></li>
<li id="menu-item-52" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-52"><a href="https://datamix.co.jp/privacy-policy/">個人情報保護方針</a></li>
<li id="menu-item-51" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-51"><a href="https://datamix.co.jp/act-on-specified-commercial-transaction/">特定商取引法に基づく表記</a></li>
<li id="menu-item-146" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-146"><a href="https://datamix.co.jp/form/">お問い合わせ</a></li>
</ul></div> </nav>
</div>
<p class="ft_robo"><img src="https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/lobo_01.png" alt=""></p>
</div>
</div>
</footer>
</div>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/main.js"></script>
<!--script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script-->
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.bgswitcher.js"></script>
<!--<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/tab.js"></script>-->
<script>
$(function() {
//クリックしたときのファンクションをまとめて指定
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target > .tab_wrap:first').css('display','block');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target2 > .tab_wrap:first').css('display','block');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target3 > .tab_wrap:first').css('display','block');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target4 > .tab_wrap:first').css('display','block');
$('.tab_target5 .tab_wrap').css('display','none');
$('.tab_target5 > .tab_wrap:first').css('display','block');
$('.tab li:first').addClass('select');
$('.tab li').click(function() {
//.index()を使いクリックされたタブが何番目かを調べ、
//indexという変数に代入します。
var index = $('.tab li').index(this);
//コンテンツを一度すべて非表示にし、
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target5 .tab_wrap').css('display','none');
//クリックされたタブと同じ順番のコンテンツを表示します。
$('.tab_target .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target2 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target3 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target4 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target5 .tab_wrap').eq(index).fadeIn("slow");
//一度タブについているクラスselectを消し、
$('.tab li').removeClass('select');
//クリックされたタブのみにクラスselectをつけます。
$(this).addClass('select');
});
});
</script>
<script type="text/javascript">
jQuery(function($) {
$('.slider').bgSwitcher({
images: ['https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_01.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_02.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_03.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_04.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_05.jpg'],
interval: 5000,
effect: "fade"
});
});
$(function() {
$ ('.instructors_sub li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main li.on').removeClass('on');
$('.instructors_main li[data-card="' + id + '"]').addClass('on');
});
$ ('.instructors_sub2 li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main2 li.on').removeClass('on');
$('.instructors_main2 li[data-card="' + id + '"]').addClass('on');
});
var rWidth = $('.round-box').outerWidth();
$('.round-box').css('height', rWidth);
var rWidth2 = $('.roundsec.round3 .round-box').outerWidth();
$('.roundsec.round3 .round-box').css('height', rWidth2);
});
</script>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.tile.js"></script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
$(".blog-area ul li").tile();
var windowSm = 750;
if (windowWidth > 750) {
$(".home-button ul li").tile();
$(".module-stepbox .module-stepbox-total_5 .module-stepbox-step").tile(5);
}
$(window).on("scroll", function() {
scrollHeight = $(document).height();
scrollPosition = $(window).height() + $(window).scrollTop();
footHeight = $("footer").innerHeight();
if (windowWidth < 750) {
if ( scrollHeight - scrollPosition <= footHeight ) {
$('.sp_fix_btn').slideUp();
} else {
$('.sp_fix_btn').slideDown();
}
}
});
});
</script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
if (windowWidth > 750) {
var mainheight = $('.page_main_banner').innerHeight()-70;
// var mainheight = $('.page_main_banner').height()+130;
var lavelheight = $('.top_label').height();
if(mainheight){
var hdheight = mainheight;
}
if(lavelheight){
var hdheight = lavelheight;
}
$('header').css('top', hdheight);
var triggerNode = $("header");
$(window).scroll(function () {
var value = $(this).scrollTop();
var triggerNodePosition = $(triggerNode).offset().top;
// 現在のスクロール位置が引き金要素の位置より下にあれば‥
if (value > hdheight) {
// なんらかの命令を実行
$('header').css({"top": 0,"position":"fixed"});
}else{
$('header').css({'top': hdheight, "position":"absolute"});
}
});
}
});
</script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create','UA-99319144-1','auto');ga('send','pageview');
</script>
<div style="display:none">
</div>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/_inc/build/photon/photon.min.js?ver=20130122'></script>
<script type='text/javascript' src='https://s0.wp.com/wp-content/js/devicepx-jetpack.js?ver=201941'></script>
<script type='text/javascript' src='https://secure.gravatar.com/js/gprofiles.js?ver=2019Octaa'></script>
<script type='text/javascript'>
/* <![CDATA[ */
var WPGroHo = {"my_hash":""};
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/modules/wpgroho.js?ver=4.9.3'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/jetpack/_inc/build/lazy-images/js/lazy-images.min.js?ver=6.8'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/wp-embed.min.js?ver=4.9.3'></script>
<script type='text/javascript' src='https://stats.wp.com/e-201941.js' async='async' defer='defer'></script>
<script type='text/javascript'>
_stq = window._stq || [];
_stq.push([ 'view', {v:'ext',j:'1:6.8',blog:'155035170',post:'0',tz:'9',srv:'datamix.co.jp'} ]);
_stq.push([ 'clickTrackerInit', '155035170', '0' ]);
</script>
<!-- リマーケティング タグの Google コード -->
<!--------------------------------------------------
リマーケティング タグは、個人を特定できる情報と関連付けることも、デリケートなカテゴリに属するページに設置することも許可されません。タグの設定方法については、こちらのページをご覧ください。
http://google.com/ads/remarketingsetup
--------------------------------------------------->
<script type="text/javascript">
/* <![CDATA[ */
var google_conversion_id = 852033649;
var google_custom_params = window.google_tag_params;
var google_remarketing_only = true;
/* ]]> */
</script>
<script type="text/javascript" src="//www.googleadservices.com/pagead/conversion.js">
</script>
<noscript>
<div style="display:inline;">
<img height="1" width="1" style="border-style:none;" alt="" src="//googleads.g.doubleclick.net/pagead/viewthroughconversion/852033649/?guid=ON&script=0"/>
</div>
</noscript>
<!-- Yahoo Code for your Target List -->
<script type="text/javascript" language="javascript">
/* <![CDATA[ */
var yahoo_retargeting_id = '4F2M18WOUC';
var yahoo_retargeting_label = '';
var yahoo_retargeting_page_type = '';
var yahoo_retargeting_items = [{item_id: '', category_id: '', price: '', quantity: ''}];
/* ]]> */
</script>
<script type="text/javascript" language="javascript" src="//b92.yahoo.co.jp/js/s_retargeting.js"></script>
<script type="text/javascript">
//Configure an instance for your database
var td = new Treasure({
host: 'in.treasuredata.com',
writeKey: '9610/410a6a4e59ee7703f203ba2c070721601c08a013',
database: 'datamix_marketing',
startInSignedMode: true
});
// Enable cross-domain tracking
td.set('$global', 'td_global_id', 'td_global_id');
// Track pageview information into table
td.trackPageview('pageviews');
</script>
</body>
</html>
"""
DATAMIX_BLOG_SOURCE = """
<!DOCTYPE html>
<html lang="ja"
itemscope
itemtype="http://schema.org/Article"
prefix="og: http://ogp.me/ns#" class="no-js">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="google-site-verification" content="R1OoJj7cg0JS9YC_7mCZQ3mzzA2Qe1gIn4_IJurT1X4" />
<link rel="shortcut icon" href="/favicon.ico">
<link rel="icon" type="image/png" sizes="32x32" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/favicon-16x16.png">
<link rel="apple-touch-icon" sizes="57x57" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/android-icon-192x192.png">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="https://datamix.co.jp/test/wp-content/themes/datamix-new/icons/ms-icon-144x144.png">
<link href="https://fonts.googleapis.com/css?family=Roboto:400,900" rel="stylesheet">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/style.css">
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/modernizr-custom.js"></script>
<title>採用情報 | データサイエンティストを目指すならデータミックス</title>
<!-- Facebook Pixel Code -->
<script>
!function(f,b,e,v,n,t,s){if(f.fbq)return;n=f.fbq=function(){n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments)};if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s)}(window,
document,'script','https://connect.facebook.net/en_US/fbevents.js');
fbq('init', '760992884080078'); // Insert your pixel ID here.
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none"
src="https://www.facebook.com/tr?id=760992884080078&ev=PageView&noscript=1"
/></noscript>
<!-- DO NOT MODIFY -->
<!-- End Facebook Pixel Code -->
<!-- All in One SEO Pack 2.4.3 by Michael Torbert of Semper Fi Web Design[2736,2767] -->
<meta name="description" content="データサイエンティストや、スクール運営、バックオフィス、マーケティング担当など株式会社データミックスの採用情報。" />
<link rel="canonical" href="https://datamix.co.jp/recruit/" />
<meta property="og:title" content="採用情報 " />
<meta property="og:type" content="article" />
<meta property="og:url" content="https://datamix.co.jp/recruit/" />
<meta property="og:image" content="https://datamix.xsrv.jp/wp-content/uploads/2017/05/datamix_ogp.png" />
<meta property="og:site_name" content="データサイエンティストを目指すならデータミックス" />
<meta property="fb:app_id" content="128471241040393" />
<meta property="og:description" content="データサイエンティストや、スクール運営、バックオフィス、マーケティング担当など株式会社データミックスの採用情報。" />
<meta property="article:published_time" content="2017-10-18T10:59:14Z" />
<meta property="article:modified_time" content="2019-12-25T17:10:07Z" />
<meta name="twitter:card" content="summary" />
<meta name="twitter:title" content="採用情報 " />
<meta name="twitter:description" content="データサイエンティストや、スクール運営、バックオフィス、マーケティング担当など株式会社データミックスの採用情報。" />
<meta name="twitter:image" content="https://datamix.xsrv.jp/wp-content/uploads/2017/05/datamix_ogp.png" />
<meta itemprop="image" content="https://datamix.xsrv.jp/wp-content/uploads/2017/05/datamix_ogp.png" />
<!-- /all in one seo pack -->
<link rel='dns-prefetch' href='//s.w.org' />
<script type="text/javascript">
window._wpemojiSettings = {"baseUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/72x72\/","ext":".png","svgUrl":"https:\/\/s.w.org\/images\/core\/emoji\/2.4\/svg\/","svgExt":".svg","source":{"concatemoji":"https:\/\/datamix.co.jp\/test\/wp-includes\/js\/wp-emoji-release.min.js?ver=4.9.3"}};
!function(a,b,c){function d(a,b){var c=String.fromCharCode;l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,a),0,0);var d=k.toDataURL();l.clearRect(0,0,k.width,k.height),l.fillText(c.apply(this,b),0,0);var e=k.toDataURL();return d===e}function e(a){var b;if(!l||!l.fillText)return!1;switch(l.textBaseline="top",l.font="600 32px Arial",a){case"flag":return!(b=d([55356,56826,55356,56819],[55356,56826,8203,55356,56819]))&&(b=d([55356,57332,56128,56423,56128,56418,56128,56421,56128,56430,56128,56423,56128,56447],[55356,57332,8203,56128,56423,8203,56128,56418,8203,56128,56421,8203,56128,56430,8203,56128,56423,8203,56128,56447]),!b);case"emoji":return b=d([55357,56692,8205,9792,65039],[55357,56692,8203,9792,65039]),!b}return!1}function f(a){var c=b.createElement("script");c.src=a,c.defer=c.type="text/javascript",b.getElementsByTagName("head")[0].appendChild(c)}var g,h,i,j,k=b.createElement("canvas"),l=k.getContext&&k.getContext("2d");for(j=Array("flag","emoji"),c.supports={everything:!0,everythingExceptFlag:!0},i=0;i<j.length;i++)c.supports[j[i]]=e(j[i]),c.supports.everything=c.supports.everything&&c.supports[j[i]],"flag"!==j[i]&&(c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&c.supports[j[i]]);c.supports.everythingExceptFlag=c.supports.everythingExceptFlag&&!c.supports.flag,c.DOMReady=!1,c.readyCallback=function(){c.DOMReady=!0},c.supports.everything||(h=function(){c.readyCallback()},b.addEventListener?(b.addEventListener("DOMContentLoaded",h,!1),a.addEventListener("load",h,!1)):(a.attachEvent("onload",h),b.attachEvent("onreadystatechange",function(){"complete"===b.readyState&&c.readyCallback()})),g=c.source||{},g.concatemoji?f(g.concatemoji):g.wpemoji&&g.twemoji&&(f(g.twemoji),f(g.wpemoji)))}(window,document,window._wpemojiSettings);
</script>
<style type="text/css">
img.wp-smiley,
img.emoji {
display: inline !important;
border: none !important;
box-shadow: none !important;
height: 1em !important;
width: 1em !important;
margin: 0 .07em !important;
vertical-align: -0.1em !important;
background: none !important;
padding: 0 !important;
}
</style>
<link rel='stylesheet' id='contact-form-7-css' href='https://datamix.co.jp/test/wp-content/plugins/contact-form-7/includes/css/styles.css?ver=5.1.5' type='text/css' media='all' />
<link rel='stylesheet' id='fvch-styles-css' href='https://datamix.co.jp/test/wp-content/plugins/fv-code-highlighter/public/css/fvch-styles-dark.min.css?ver=1.2' type='text/css' media='all' />
<link rel='stylesheet' id='__EPYT__style-css' href='https://datamix.co.jp/test/wp-content/plugins/youtube-embed-plus/styles/ytprefs.min.css?ver=13.1.2.5' type='text/css' media='all' />
<style id='__EPYT__style-inline-css' type='text/css'>
.epyt-gallery-thumb {
width: 33.333%;
}
</style>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery.js?ver=1.12.4'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/jquery/jquery-migrate.min.js?ver=1.4.1'></script>
<script type='text/javascript'>
/* <![CDATA[ */
var _EPYT_ = {"ajaxurl":"https:\/\/datamix.co.jp\/test\/wp-admin\/admin-ajax.php","security":"e521b7db4c","gallery_scrolloffset":"20","eppathtoscripts":"https:\/\/datamix.co.jp\/test\/wp-content\/plugins\/youtube-embed-plus\/scripts\/","eppath":"https:\/\/datamix.co.jp\/test\/wp-content\/plugins\/youtube-embed-plus\/","epresponsiveselector":"[\"iframe[src*='youtube.com']\",\"iframe[src*='youtube-nocookie.com']\",\"iframe[data-ep-src*='youtube.com']\",\"iframe[data-ep-src*='youtube-nocookie.com']\",\"iframe[data-ep-gallerysrc*='youtube.com']\"]","epdovol":"1","version":"13.1.2.5","evselector":"iframe.__youtube_prefs__[src], iframe[src*=\"youtube.com\/embed\/\"], iframe[src*=\"youtube-nocookie.com\/embed\/\"]","ajax_compat":"","ytapi_load":"light","stopMobileBuffer":"1","vi_active":"","vi_js_posttypes":[]};
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/youtube-embed-plus/scripts/ytprefs.min.js?ver=13.1.2.5'></script>
<link rel='https://api.w.org/' href='https://datamix.co.jp/wp-json/' />
<link rel="EditURI" type="application/rsd+xml" title="RSD" href="https://datamix.co.jp/test/xmlrpc.php?rsd" />
<link rel="wlwmanifest" type="application/wlwmanifest+xml" href="https://datamix.co.jp/test/wp-includes/wlwmanifest.xml" />
<meta name="generator" content="WordPress 4.9.3" />
<link rel='shortlink' href='https://datamix.co.jp/?p=999' />
<link rel="alternate" type="application/json+oembed" href="https://datamix.co.jp/wp-json/oembed/1.0/embed?url=https%3A%2F%2Fdatamix.co.jp%2Frecruit%2F" />
<link rel="alternate" type="text/xml+oembed" href="https://datamix.co.jp/wp-json/oembed/1.0/embed?url=https%3A%2F%2Fdatamix.co.jp%2Frecruit%2F&format=xml" />
<style type="text/css">
.fvch-codeblock {
background: #2e2e2d !important;
background-position-y: 4px !important;
}
.fvch-codeblock pre, .fvch-line-number {
line-height: 1.5em !important;
font-family: 'Monaco', 'Courier New', Courier, monospace !important;
font-size: 0.8em !important;
}
</style>
<meta name="generator" content="FV Code Highlighter - https://frankverhoeven.me/"><!-- Markup (JSON-LD) structured in schema.org ver.4.1.8 START -->
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": "BreadcrumbList",
"itemListElement": [
{
"@type": "ListItem",
"position": 1,
"item": {
"@id": "https://datamix.co.jp",
"name": "データサイエンティストを目指すならデータミックス"
}
},
{
"@type": "ListItem",
"position": 2,
"item": {
"@id": "https://datamix.co.jp/recruit/",
"name": "採用情報"
}
}
]
}
</script>
<!-- Markup (JSON-LD) structured in schema.org END -->
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/styles/default.min.css">
<script src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/9.12.0/highlight.min.js"></script>
<script>
//hljs.initHighlightingOnLoad();
$(function()
{
$( 'pre' ).each(function( i , block )
{
hljs.highlightBlock( block ) ;
} ) ;
} ) ;
</script>
<script>
//スムーズスクロール
jQuery(function(){
// #で始まるアンカーをクリックした場合に処理
jQuery('a[href^=#]').click(function() {
// スクロールの速度
var speed = 400; // ミリ秒
// アンカーの値取得
var href= jQuery(this).attr("href");
// 移動先を取得
var target = jQuery(href == "#" || href == "" ? 'html' : href);
// 移動先を数値で取得
var position = target.offset().top;
// スムーススクロール
jQuery('body,html').animate({scrollTop:position}, speed, 'swing');
return false;
});
});
</script>
<!-- Google Tag Manager -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-ND9M7C5');</script>
<!-- End Google Tag Manager -->
<!-- Treasure Data -->
<script type="text/javascript">
!function(t,e){if(void 0===e[t]){e[t]=function(){e[t].clients.push(this),this._init=[Array.prototype.slice.call(arguments)]},e[t].clients=[];for(var r=function(t){return function(){return this["_"+t]=this["_"+t]||[],this["_"+t].push(Array.prototype.slice.call(arguments)),this}},s=["blockEvents","unblockEvents","setSignedMode","setAnonymousMode","resetUUID","addRecord","fetchGlobalID","set","trackEvent","trackPageview","trackClicks","ready","fetchUserSegments"],n=0;n<s.length;n++){var c=s[n];e[t].prototype[c]=r(c)}var o=document.createElement("script");o.type="text/javascript",o.async=!0,o.src=("https:"===document.location.protocol?"https:":"http:")+"//cdn.treasuredata.com/sdk/2.1/td.min.js";var a=document.getElementsByTagName("script")[0];a.parentNode.insertBefore(o,a)}}("Treasure",this);
</script>
<!-- Global site tag (gtag.js) - Google Ads: 685073848 -->
<script async src="https://www.googletagmanager.com/gtag/js?id=AW-685073848"></script>
<script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'AW-685073848');</script>
</head>
<body class="page-template page-template-templates page-template-recruit page-template-templatesrecruit-php page page-id-999 elementor-default">
<header>
<div class="cf">
<div class="logo __desktop __other">
<a href="https://datamix.co.jp/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
</div>
<nav id="Navigation" class="a-topnav transition_quick">
<a class="home_menu hidden_dt" href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a>
<div class="menu-main-container"><ul id="menu-main" class="cf"><li id="menu-item-1024" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-has-children menu-item-1024"><a href="#">スクール<span>SCHOOL</span></a>
<ul class="sub-menu">
<li id="menu-item-4332" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-4332"><a href="https://datamix.co.jp/data-scientist/">データサイエンティスト育成コース</a></li>
<li id="menu-item-2282" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-2282"><a href="https://datamix.co.jp/introductory-data-scientist-course/">データサイエンティスト準備ステップ</a></li>
<li id="menu-item-3932" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-3932"><a href="https://datamix.co.jp/tdi-course/">英語プログラム- Essential Tools</a></li>
</ul>
</li>
<li id="menu-item-1368" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1368"><a href="https://datamix.co.jp/for-employer/">人材紹介<span>For Employer</span></a></li>
<li id="menu-item-1005" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1005"><a href="https://datamix.co.jp/for-company/">法人向けサービス<span>For Company</span></a></li>
<li id="menu-item-1026" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-has-children menu-item-1026"><a href="#">ニュース<span>NEWS</span></a>
<ul class="sub-menu">
<li id="menu-item-1027" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1027"><a href="/news">ニュース</a></li>
<li id="menu-item-1317" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1317"><a href="/event">イベント</a></li>
</ul>
</li>
<li id="menu-item-4015" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-4015"><a href="https://datamix.co.jp/faq/">FAQ<span>FAQ</span></a></li>
<li id="menu-item-1029" class="menu-item menu-item-type-post_type menu-item-object-page current-menu-item page_item page-item-999 current_page_item menu-item-1029"><a href="https://datamix.co.jp/recruit/">採用情報<span>recruit</span></a></li>
<li id="menu-item-1030" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-1030"><a href="https://datamix.co.jp/company/">会社概要<span>about us</span></a></li>
</ul></div>
</nav>
<!-- <div class="btn __other __desktop">
<div class="button_hvr yellow"><a href="https://datamix.co.jp/form-seminor/" class="hvr-shutter-out-horizontal"><span class="icon-plane"><b>説明会に申し込む</b></span></a></div> </div>
-->
<div id="MenuIcon" class="menu_button __mobile">
<div>
<div>
<span></span>
<span></span>
<span></span>
<span></span>
</div>
</div>
<span class="roboto uppercase">menu</span>
</div><!--2-->
<!--<div class="information transition_quick __mobile">
<div class="cta twin">
</div>
</div>-->
</div>
</header>
<div id="wrapper">
<link rel="stylesheet" href="https://datamix.co.jp/test/wp-content/themes/datamix-new/css/recruit.css">
<div class="top_label">
<div>
<h1>採用情報</h1>
<p class="roboto uppercase">RECRUIT</p>
</div>
</div>
<div class="content_section">
<p class="breadcrumb"><a href="https://datamix.co.jp/">HOME</a> > <a href="https://datamix.co.jp/recruit/">採用情報</a></p>
</div>
<div class="page-template-blog" style="border-bottom: 1px solid #d4eaf6;">
<div class="blogs content_section cf">
<div class="upper">
<p>私たちデータミックスは、「データサイエンスを当たり前に」することで、データの活用を促し、より良い社会を実現したいと本気で考えています。データミックスはそんな未来を一緒に創っていける仲間に出会えるのを楽しみにしています。</p>
</div>
<div class="message-area">
<div class="content_section_instructor default_page_fontsize">
<h2 class="page_sub_title blue">メンバーからのメッセージ</h2>
<p class="page_main_title blue roboto uppercase"><b>Message</b></p>
<div class="instrucors_box cf">
<ul class="instructors_main">
<li data-card="1" class="on">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2017/04/d_instructor_katada02-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">堅田 洋資<br>
<span>Yosuke Katada</span></h3>
<p class="inst_role">ブートキャンプステップ、ベーシックステップ、アドバンスステップ「レコメンデーション」クラス</p>
<p>代表取締役社長<br />
データサイエンティスト<br />
<br />
データミックスではクライアントの課題を自分事だと捉え、技術的に難易度の高い課題にチャレンジすることを奨励しています。受講生・卒業生の皆さんやクライアント、そしてメンバーと一緒に成長していきたい方の応募をお待ちしています。</p>
</div>
</div>
</div>
</li>
<li data-card="2">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/10/20180415-2435-e1540443636597-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">石井ゆり香<br>
<span>Yurika Ishii</span></h3>
<p class="inst_role">マネージャー</p>
<p>大学卒業後、メーカー、広告制作会社、システムコンサルティング会社で経理を担当。2018年よりデータミックスに参画。<br />
<br />
いろいろな経験を積みたい方、スピード感を持って前向きに仕事に取り組みたい方のご応募をお待ちしています。一緒に会社を盛り上げていきましょう!<br />
</p>
</div>
</div>
</div>
</li>
<li data-card="3">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2017/10/recruit_miyoshi01-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">三好大悟<br>
<span>Daigo Miyoshi</span></h3>
<p class="inst_role">データサイエンティスト</p>
<p>慶応義塾大学理工学部管理工学科 卒業<br />
<br />
データサイエンティストは、やる気と熱意が一番大事だと感じています。私自身まだまだ未熟ですが、良きライバルとなって切磋琢磨できる方・仕事に全力投球できる方と仕事ができたらと思います!一緒にデータミックスを日本一のデータサイエンティスト集団にしましょう!</p>
</div>
</div>
</div>
</li>
<li data-card="4">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4728-e1545289354704-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">渡部孝一<br>
<span>Koichi Watabe</span></h3>
<p class="inst_role">人事マネージャー</p>
<p>エンジニアからキャリアをスタートし、コンサルティングファーム 、IT企業にて、採用、育成、制度、組織開発、労務と幅広く人材マネジメントに従事。2018年よりデータミックスに参画。<br />
当社のミッションビジョンに共感し、「データサイエンス」という新しい市場を一緒に創り上げるパイオニア精神をお持ちの方、ご応募お待ちしております!<br />
</p>
</div>
</div>
</div>
</li>
<li data-card="5">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4856-e1545289569276-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">清水嵩文<br>
<span>Takafumi Shimizu</span></h3>
<p class="inst_role">データ分析コンサルタント</p>
<p>前職ではベンチャー企業でインターネット広告の営業をし、2018年からデータミックスに参画。<br />
データミックスにはスタートアップならではの自由があるので、自分らしく働きたい人にあってると思います!<br />
一緒にデータサイエンスを楽しんで、学んで、活用していきましょう!</p>
</div>
</div>
</div>
</li>
<li data-card="6">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4741-e1545289322125-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">亀岡瑶<br>
<span>Yo Kameoka</span></h3>
<p class="inst_role">データ分析コンサルタント</p>
<p>食品CROにて統計解析職に従事、2018年データミックスに参画。<br />
データミックスでは、教育事業とコンサルティング事業を通じて、確かな知識とスキルを身につけることができます!<br />
スピード感がある環境で、共に刺激し成長していける方をお待ちしています!</p>
</div>
</div>
</div>
</li>
<li data-card="7">
<div class="gradient"></div>
<div class="instructors_detail">
<div class="img_wrapper"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4904-e1545292200368-390x491.jpg" alt=""></div>
<div class="text_wrapper">
<div class="position">
<h3 class="name-inst">高橋由佳<br>
<span>Yuka Takahashi</span></h3>
<p class="inst_role">スクール運営 サービス&アクイジション担当</p>
<p>ブライダル業界を経て、IT企業・広報担当責任者などのマネジメント業務に従事。2018年からデータミックスに参画。<br />
<br />
仕事を全力で楽しみながら、切磋琢磨しあう私たちです。<br />
受講生・卒業生の皆さんとのコミュニケーションからも、たくさんのことを学べます。<br />
データミックスを一緒に盛り上げてくれる方、ご応募お待ちしております。<br />
<br />
</p>
</div>
</div>
</div>
</li>
</ul>
<ul class="instructors_sub cf">
<li data-thumbnail="1"><img src="https://datamix.co.jp/test/wp-content/uploads/2017/04/d_instructor_katada02-125x168.jpg" alt=""></li>
<li data-thumbnail="2"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/10/20180415-2435-e1540443636597-125x168.jpg" alt=""></li>
<li data-thumbnail="3"><img src="https://datamix.co.jp/test/wp-content/uploads/2017/10/recruit_miyoshi01-125x168.jpg" alt=""></li>
<li data-thumbnail="4"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4728-e1545289354704-125x168.jpg" alt=""></li>
<li data-thumbnail="5"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4856-e1545289569276-125x168.jpg" alt=""></li>
<li data-thumbnail="6"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4741-e1545289322125-125x168.jpg" alt=""></li>
<li data-thumbnail="7"><img src="https://datamix.co.jp/test/wp-content/uploads/2018/12/20181021_4904-e1545292200368-125x168.jpg" alt=""></li>
</ul>
</div>
</div>
</div>
</div>
</div>
<div class="content_section page-template-company">
<div class="company-recruit">
<h2 class="page_sub_title blue">採用情報</h2>
<p class="page_main_title blue roboto uppercase"><b>Recruit</b></p>
<div class="wantedly content_section cf">
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/91659/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/110548/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/260266/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/217897/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/311975/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/409755/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/409750/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/399566/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/415750/widget"></iframe>
</div>
<div>
<iframe frameborder="0" scrolling="no" src="https://www.wantedly.com/projects/415757"></iframe>
</div>
</div>
</div>
</div>
<div class="blog-area">
<div class="wrap">
<h2 class="page_sub_title blue">ブログ</h2>
<p class="page_main_title blue roboto uppercase"><b>Blog</b></p>
<div class="inner">
<ul class="clearfix">
<li class="clearfix">
<div class="image"><img src="https://datamix.co.jp/test/wp-content/uploads/2020/02/DSC04901-e1581986270347-140x140.jpg" alt=""></div>
<h3 class="h4"><span class="date">2020.02.17</span><a href="https://datamix.co.jp/interview-fujita-coo/">「データサイエンスはMBA以上の武器になる」- データミックスCOO藤田</a></h3>
</li>
<li class="clearfix">
<div class="image"><img src="https://datamix.co.jp/test/wp-content/uploads/2020/02/4432c5645dbce8524c751750ac89e05b-140x140.jpg" alt=""></div>
<h3 class="h4"><span class="date">2020.02.14</span><a href="https://datamix.co.jp/dtst_shimizu/">清水 嵩文_データサイエンティスト育成のフロンティア_インストラクター紹介</a></h3>
</li>
<li class="clearfix">
<div class="image"><img src="https://datamix.co.jp/test/wp-content/uploads/2020/02/Screen-Shot-2020-02-07-at-19.00.18-140x140.png" alt=""></div>
<h3 class="h4"><span class="date">2020.02.06</span><a href="https://datamix.co.jp/blog-what-is-data-science/">データサイエンス(Data Science)とは?</a></h3>
</li>
<li class="clearfix">
<div class="image"><img src="https://datamix.co.jp/test/wp-content/uploads/2020/02/n_23ku_2019-140x140.png" alt=""></div>
<h3 class="h4"><span class="date">2020.02.04</span><a href="https://datamix.co.jp/blog-taiki-jidou/">保育園に入りやすい区はどこ? 〜23区別「待機児童の状況」の変化〜</a></h3>
</li>
</ul>
</div>
<div class="home-news">
<div class="content_section">
<div class="single">
<div class="button_hvr yellow"><a href="/blog" class="hvr-shutter-out-horizontal"><span class="icon-box3"><b>もっと見る</b></span></a></div>
</div>
</div>
</div>
</div>
</div>
<footer>
<div>
<div class="cf">
<div class="logo">
<a href="/">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 1000 199.875">
<g>
<path d="M391.914,81.907h-76.178l7.106,24.162h54.641
c15.624,0,20.335,7.936,20.335,20.336v28.976c0,12.396-4.711,20.331-20.335,20.331h-32.119v-58.27h-29.628v82.433h76.178
c24.409,0,35.533-10.302,35.533-35.53v-46.902C427.447,92.214,416.323,81.907,391.914,81.907"/>
<polygon points="808.343,81.907 781.711,157.484 755.078,81.907 723.765,81.907
723.765,199.875 751.528,199.875 751.528,136.515 774.006,199.875 789.42,199.875 811.893,136.515 811.893,199.875
839.656,199.875 839.656,81.907 "/>
<path d="M669.96,107.37h-51.917l7.109,19.312h33.271
c11.476,0,15.542,5.396,16.134,13.945h-42.08c-19.51,0-28.398,8.231-28.398,28.397v2.451c0,20.162,8.889,28.399,28.398,28.399
h37.483c19.509,0,28.397-8.237,28.397-28.399v-35.708C698.357,115.606,689.469,107.37,669.96,107.37 M658.423,180.562h-14.415
c-12.489,0-15.663-4.399-15.663-10.312c0-5.906,3.174-10.315,15.663-10.315h30.671v4.376
C674.679,174.224,670.913,180.562,658.423,180.562"/>
<path d="M502.035,107.37h-51.917l7.107,19.312h33.278
c11.468,0,15.54,5.396,16.131,13.945h-42.083c-19.51,0-28.401,8.231-28.401,28.397v2.451c0,20.162,8.891,28.399,28.401,28.399
h37.483c19.508,0,28.396-8.237,28.396-28.399v-35.708C530.431,115.606,521.543,107.37,502.035,107.37 M490.503,180.562h-14.419
c-12.488,0-15.664-4.399-15.664-10.312c0-5.906,3.176-10.315,15.664-10.315h30.672v4.376
C506.757,174.224,502.988,180.562,490.503,180.562"/>
<path d="M594.838,180.562h-8.442c-12.489,0-16.256-6.339-16.256-16.251v-37.629
h26.83l-7.108-19.312H570.14V81.908h-23.674v25.462h-12.392l7.104,19.312h5.287v44.794c0,20.162,8.889,28.399,28.397,28.399h27.084
L594.838,180.562z"/>
<rect x="855.827" y="107.37" width="23.68" height="92.505"/>
<path d="M879.512,87.95c0,6.536-5.307,11.842-11.843,11.842
c-6.54,0-11.846-5.306-11.846-11.842c0-6.545,5.306-11.851,11.846-11.851C874.205,76.099,879.512,81.405,879.512,87.95"/>
<path d="M978.274,152.559c3.682-4.46,6.687-10.602,9.884-18.744L1000,105.333
h-23.679l-9.71,21.316c-5.142,9.415-6.334,16.256-18.819,16.256h-0.53c-12.485,0-13.683-6.841-18.824-16.256l-9.705-21.316h-23.68
l11.838,28.481c3.197,8.143,6.207,14.284,9.888,18.744c-3.681,4.451-6.69,10.598-9.888,18.74l-11.838,28.486h23.68l9.705-21.321
c5.142-9.415,6.339-16.251,18.824-16.251h0.53c12.485,0,13.678,6.836,18.819,16.251l9.71,21.321H1000l-11.842-28.486
C984.961,163.156,981.956,157.01,978.274,152.559"/>
<path d="M243.551,107.248c-5.841-4.239-13.999-2.943-18.244,2.893
c-14.025,19.303-35.756,30.378-59.62,30.378h-0.005v26.117h0.005c32.323,0,61.757-14.997,80.754-41.146
C250.679,119.658,249.385,111.488,243.551,107.248"/>
<path d="M276.071,79.24c0,8.931-7.238,16.167-16.168,16.167
c-8.931,0-16.167-7.236-16.167-16.167s7.236-16.171,16.167-16.171C268.833,63.069,276.071,70.309,276.071,79.24"/>
<path d="M87.816,107.248c5.836-4.235,14.002-2.943,18.239,2.893
c14.032,19.308,35.763,30.379,59.627,30.379V63.07L102.616,0H0v199.875h165.682v-33.239c-32.323,0-61.754-14.997-80.754-41.146
C80.689,119.657,81.982,111.488,87.816,107.248 M71.462,95.407c-8.931,0-16.167-7.236-16.167-16.167
c0-8.93,7.236-16.17,16.167-16.17s16.166,7.24,16.166,16.17C87.628,88.171,80.393,95.407,71.462,95.407"/>
</g>
</svg> </a><br>
<small class="copyright">©2017 DataMix</small>
</div>
<div class="ft_r">
<nav class="a-footer">
<div class="menu-ftm_new-container"><ul id="menu-ftm_new" class="cf"><li id="menu-item-1039" class="menu-item menu-item-type-custom menu-item-object-custom menu-item-1039"><a href="/news">ニュース</a></li>
<li id="menu-item-4016" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-4016"><a href="https://datamix.co.jp/faq/">スクールに関するFAQ</a></li>
</ul></div> </nav>
<nav class="a-footer">
<div class="menu-footer-container"><ul id="menu-footer" class="cf"><li id="menu-item-54" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-54"><a href="https://datamix.co.jp/company/">会社概要</a></li>
<li id="menu-item-53" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-53"><a href="https://datamix.co.jp/terms-of-service/">利用規約</a></li>
<li id="menu-item-52" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-52"><a href="https://datamix.co.jp/privacy-policy/">プライバシーポリシー</a></li>
<li id="menu-item-51" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-51"><a href="https://datamix.co.jp/act-on-specified-commercial-transaction/">特定商取引法に基づく表記</a></li>
<li id="menu-item-146" class="menu-item menu-item-type-post_type menu-item-object-page menu-item-146"><a href="https://datamix.co.jp/form/">お問い合わせ</a></li>
</ul></div> </nav>
</div>
<p class="ft_robo"><img src="https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/lobo_01.png" alt=""></p>
</div>
</div>
</footer>
</div>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/main.js"></script>
<!--script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script-->
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.bgswitcher.js"></script>
<!--<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/tab.js"></script>-->
<script>
$(function() {
//クリックしたときのファンクションをまとめて指定
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target > .tab_wrap:first').css('display','block');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target2 > .tab_wrap:first').css('display','block');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target3 > .tab_wrap:first').css('display','block');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target4 > .tab_wrap:first').css('display','block');
$('.tab_target5 .tab_wrap').css('display','none');
$('.tab_target5 > .tab_wrap:first').css('display','block');
$('.tab li:first').addClass('select');
$('.tab li').click(function() {
//.index()を使いクリックされたタブが何番目かを調べ、
//indexという変数に代入します。
var index = $('.tab li').index(this);
//コンテンツを一度すべて非表示にし、
$('.tab_target .tab_wrap').css('display','none');
$('.tab_target2 .tab_wrap').css('display','none');
$('.tab_target3 .tab_wrap').css('display','none');
$('.tab_target4 .tab_wrap').css('display','none');
$('.tab_target5 .tab_wrap').css('display','none');
//クリックされたタブと同じ順番のコンテンツを表示します。
$('.tab_target .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target2 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target3 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target4 .tab_wrap').eq(index).fadeIn("slow");
$('.tab_target5 .tab_wrap').eq(index).fadeIn("slow");
//一度タブについているクラスselectを消し、
$('.tab li').removeClass('select');
//クリックされたタブのみにクラスselectをつけます。
$(this).addClass('select');
});
});
</script>
<script type="text/javascript">
jQuery(function($) {
$('.slider').bgSwitcher({
images: ['https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_01.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_02.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_03.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_04.jpg', 'https://datamix.co.jp/test/wp-content/themes/datamix-new/assets/slide_05.jpg'],
interval: 5000,
effect: "fade"
});
});
$(function() {
$ ('.instructors_sub li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main li.on').removeClass('on');
$('.instructors_main li[data-card="' + id + '"]').addClass('on');
});
$ ('.instructors_sub2 li').hover(function(){
var _self = $(this);
var id = _self.data('thumbnail');
$('.instructors_main2 li.on').removeClass('on');
$('.instructors_main2 li[data-card="' + id + '"]').addClass('on');
});
var rWidth = $('.round-box').outerWidth();
$('.round-box').css('height', rWidth);
var rWidth2 = $('.roundsec.round3 .round-box').outerWidth();
$('.roundsec.round3 .round-box').css('height', rWidth2);
});
</script>
<script src="https://datamix.co.jp/test/wp-content/themes/datamix-new/js/jquery.tile.js"></script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
$(".blog-area ul li").tile();
var windowSm = 750;
if (windowWidth > 750) {
$(".home-button ul li").tile();
$(".module-stepbox .module-stepbox-total_5 .module-stepbox-step").tile(5);
}
$(window).on("scroll", function() {
scrollHeight = $(document).height();
scrollPosition = $(window).height() + $(window).scrollTop();
footHeight = $("footer").innerHeight();
if (windowWidth < 750) {
if ( scrollHeight - scrollPosition <= footHeight ) {
$('.sp_fix_btn').slideUp();
} else {
$('.sp_fix_btn').slideDown();
}
}
});
});
</script>
<script>
$(window).on('load resize', function(){
var windowWidth = $(window).width();
if (windowWidth > 750) {
var mainheight = $('.page_main_banner').innerHeight()-70;
// var mainheight = $('.page_main_banner').height()+130;
var lavelheight = $('.top_label').height();
if(mainheight){
var hdheight = mainheight;
}
if(lavelheight){
var hdheight = lavelheight;
}
$('header').css('top', hdheight);
var triggerNode = $("header");
$(window).scroll(function () {
var value = $(this).scrollTop();
var triggerNodePosition = $(triggerNode).offset().top;
// 現在のスクロール位置が引き金要素の位置より下にあれば‥
if (value > hdheight) {
// なんらかの命令を実行
$('header').css({"top": 0,"position":"fixed"});
}else{
$('header').css({'top': hdheight, "position":"absolute"});
}
});
}
});
</script>
<script>
/*
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create','UA-99319144-1','auto');
ga('linker:autoLink', ['datamix.co.jp','datamix-info.resv.jp']);
ga('send','pageview');
*/
</script>
<script type='text/javascript'>
/* <![CDATA[ */
var codePrettifyLoaderBaseUrl = "https:\/\/datamix.co.jp\/test\/wp-content\/plugins\/code-prettify\/prettify";
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/code-prettify/prettify/run_prettify.js?ver=1.4.0'></script>
<script type='text/javascript'>
/* <![CDATA[ */
var wpcf7 = {"apiSettings":{"root":"https:\/\/datamix.co.jp\/wp-json\/contact-form-7\/v1","namespace":"contact-form-7\/v1"}};
/* ]]> */
</script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/contact-form-7/includes/js/scripts.js?ver=5.1.5'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-content/plugins/youtube-embed-plus/scripts/fitvids.min.js?ver=13.1.2.5'></script>
<script type='text/javascript' src='https://datamix.co.jp/test/wp-includes/js/wp-embed.min.js?ver=4.9.3'></script>
<!-- リマーケティング タグの Google コード -->
<!--------------------------------------------------
リマーケティング タグは、個人を特定できる情報と関連付けることも、デリケートなカテゴリに属するページに設置することも許可されません。タグの設定方法については、こちらのページをご覧ください。
http://google.com/ads/remarketingsetup
--------------------------------------------------->
<script type="text/javascript">
/* <![CDATA[ */
var google_conversion_id = 852033649;
var google_custom_params = window.google_tag_params;
var google_remarketing_only = true;
/* ]]> */
</script>
<script type="text/javascript" src="//www.googleadservices.com/pagead/conversion.js">
</script>
<noscript>
<div style="display:inline;">
<img height="1" width="1" style="border-style:none;" alt="" src="//googleads.g.doubleclick.net/pagead/viewthroughconversion/852033649/?guid=ON&script=0"/>
</div>
</noscript>
<!-- Yahoo Code for your Target List -->
<script type="text/javascript" language="javascript">
/* <![CDATA[ */
var yahoo_retargeting_id = '4F2M18WOUC';
var yahoo_retargeting_label = '';
var yahoo_retargeting_page_type = '';
var yahoo_retargeting_items = [{item_id: '', category_id: '', price: '', quantity: ''}];
/* ]]> */
</script>
<script type="text/javascript" language="javascript" src="//b92.yahoo.co.jp/js/s_retargeting.js"></script>
<script type="text/javascript">
//Configure an instance for your database
var td = new Treasure({
host: 'in.treasuredata.com',
writeKey: '9610/410a6a4e59ee7703f203ba2c070721601c08a013',
database: 'datamix_marketing',
startInSignedMode: true
});
// Enable cross-domain tracking
td.set('$global', 'td_global_id', 'td_global_id');
// Track pageview information into table
td.trackPageview('pageviews');
</script>
</body>
</html>
"""
MEETUP_API_SOURCE = """
[{"created":1569657937000,"duration":7200000,"fee":{"accepts":"cash","amount":1000.0,"currency":"JPY","description":"","label":"Price","required":false},"id":"265234301","name":"レコメンデーション論文を読む!データミックスゼミ第3回","rsvp_limit":25,"date_in_series_pattern":false,"status":"upcoming","time":1572066000000,"local_date":"2019-10-26","local_time":"14:00","updated":1569657937000,"utc_offset":32400000,"waitlist_count":0,"yes_rsvp_count":4,"venue":{"id":26481303,"name":"データミックス","lat":35.69807815551758,"lon":139.756103515625,"repinned":true,"address_1":"Chiyoda City, Kanda Jinbōchō, 2-chōme−2−44","city":"Tōkyō-to","country":"jp","localized_country_name":"Japan"},"group":{"created":1539055790000,"name":"DataMix.Connect","id":30152644,"join_mode":"approval","lat":35.66999816894531,"lon":139.77000427246094,"urlname":"datamix","who":"メンバー","localized_location":"Tokyo, Japan","state":"","country":"jp","region":"en_US","timezone":"Asia/Tokyo"},"link":"https://www.meetup.com/datamix/events/265234301/","description":"<p>PythonやRのライブラリを使ってデータ分析をしたり、統計学や機械学習の基礎は理解できた。そして、さらなる飛躍に向けて、最先端の論文を読んでみたい・・・</p> <p>そんな皆様を対象に、データミックスゼミ(通称ゼミ)を実験的に始めます。</p> <p>【ゼミの内容】<br/>データミックスゼミでは、3ヶ月間にわたってテーマに沿った論文を複数読み、毎月末に集まって、論文の内容や今後読みたい論文などを話し合っていきます。また、集まらない期間は、わからないことを質問をし合ったり、「面白い!」と思ったことを投稿できるオンライン上のディスカッションボードを用意します。ゼミスタイルですので、ファシリテーターが中心となって、参加者どうしがディスカッションするスタイルです。</p> <p>詳細は、こちらをご覧ください。<br/><a href=\\"https://datamix-seminar.s3-ap-northeast-1.amazonaws.com/datamix-seminar-announce-20190627.pdf\\" class=\\"linkified\\">https://datamix-seminar.s3-ap-northeast-1.amazonaws.com/datamix-seminar-announce-20190627.pdf</a></p> <p>【7月から開始するゼミのテーマ】<br/>7月から開始するテーマはレコメンデーションと自然言語処理です。</p> <p>\u203bこのMeetupはレコメンデーションのキックオフです</p> <p>【スケジュール】<br/>レコメンデーション<br/>7/27(土) 14:00~16:00(キックオフ)(済み)<br/>8/31(土) 14:00~16:00(第1回目対面ゼミ)(済み)<br/>9/28(土) 14:00~16:00(第2回目対面ゼミ)(済み)<br/>10/26(土) 14:00~16:00(第3回目対面ゼミ)</p> <p>【対象者】<br/>データミックスのアドバンスステップを修了されている方で以下の1~3のいずれかに該当する方<br/>1. 機械学習や統計学に関する学術論文を読んでみたいと思っているが、読み方がわからない。<br/>2. 一人で読んでも挫折しそう・・・<br/>3. 読んでて「面白い!」と思った部分を人と共有したい</p> <p>【次回読む論文】<br/>TEM: Tree-enhanced Embedding Model for Explainable Recommendation<br/><a href=\\"https://dl.acm.org/citation.cfm?id=3186066\\" class=\\"linkified\\">https://dl.acm.org/citation.cfm?id=3186066</a></p> <p>【ゲスト参加】<br/>もしご友人で興味がある方がいらっしゃいましたらぜひお誘いください<br/>\u203b在校生・卒業生1名につき2名まで</p> <p>【費用】<br/>各回1,000円(会場払)</p> ","visibility":"public","member_pay_fee":false},{"created":1569663801000,"duration":7200000,"fee":{"accepts":"cash","amount":1000.0,"currency":"JPY","description":"","label":"Price","required":false},"id":"265235011","name":"自然言語処理の論文を読む!データミックスゼミ第3回","rsvp_limit":30,"date_in_series_pattern":false,"status":"upcoming","time":1572076800000,"local_date":"2019-10-26","local_time":"17:00","updated":1569663801000,"utc_offset":32400000,"waitlist_count":0,"yes_rsvp_count":5,"venue":{"id":25967181,"name":"株式会社データミックス ","lat":35.69807052612305,"lon":139.7562713623047,"repinned":true,"address_1":"千代田区神田神保町2-44","address_2":"第2石坂ビル2階","city":"東京都","country":"jp","localized_country_name":"Japan"},"group":{"created":1539055790000,"name":"DataMix.Connect","id":30152644,"join_mode":"approval","lat":35.66999816894531,"lon":139.77000427246094,"urlname":"datamix","who":"メンバー","localized_location":"Tokyo, Japan","state":"","country":"jp","region":"en_US","timezone":"Asia/Tokyo"},"link":"https://www.meetup.com/datamix/events/265235011/","description":"<p>PythonやRのライブラリを使ってデータ分析をしたり、統計学や機械学習の基礎は理解できた。そして、さらなる飛躍に向けて、最先端の論文を読んでみたい・・・</p> <p>そんな皆様を対象に、データミックスゼミ(通称ゼミ)を実験的に始めます。</p> <p>【ゼミの内容】<br/>データミックスゼミでは、3ヶ月間にわたってテーマに沿った論文を複数読み、毎月末に集まって、論文の内容や今後読みたい論文などを話し合っていきます。また、集まらない期間は、わからないことを質問をし合ったり、「面白い!」と思ったことを投稿できるオンライン上のディスカッションボードを用意します。</p> <p>詳細は、こちらをご覧ください。<br/><a href=\\"https://datamix-seminar.s3-ap-northeast-1.amazonaws.com/datamix-seminar-announce-20190627.pdf\\" class=\\"linkified\\">https://datamix-seminar.s3-ap-northeast-1.amazonaws.com/datamix-seminar-announce-20190627.pdf</a></p> <p>【7月から開始するゼミのテーマ】<br/>このミートアップは、【自然言語処理】をテーマにしたゼミです。<br/>\u203bレコメンデーションのゼミ参加希望の方はご注意ください</p> <p>【スケジュール】<br/>7/27(土) 17:00~19:00(キックオフ)済み<br/>8/31(土) 17:00~19:00(第1回目対面ゼミ)済み<br/>9/28(土) 17:00~19:00(第2回目対面ゼミ)済み<br/>10/26(土) 17:00~19:00(第3回目対面ゼミ)</p> <p>【対象者】<br/>データミックスのアドバンスステップを修了されている方で以下の1~3のいずれかに該当する方<br/>1. 機械学習や統計学に関する学術論文を読んでみたいと思っているが、読み方がわからない。<br/>2. 一人で読んでも挫折しそう・・・<br/>3. 読んでて「面白い!」と思った部分を人と共有したい</p> <p>【次回読む論文】<br/><a href=\\"https://arxiv.org/abs/1810.04805\\" class=\\"linkified\\">https://arxiv.org/abs/1810.04805</a></p> <p>【ゲスト参加】<br/>もしご友人で興味がある方がいらっしゃいましたらぜひお誘いください<br/>\u203b在校生・卒業生1名につき2名まで</p> <p>【費用】<br/>各回1,000円(会場払)</p> ","visibility":"public","member_pay_fee":false}]
"""
|
[
"ij4nu8d4fw@gmail.com"
] |
ij4nu8d4fw@gmail.com
|
90f284e04501a00ff62afab5f4d11a2ad546a865
|
54dbbf0b3dd9ace6e3b51cb2632ae1d9302ea529
|
/编程小白的第一本 Python 入门书/类.py
|
d34cba3071a14e5a5166c402a9777084329ebe7a
|
[] |
no_license
|
zzxmona/pythontrain
|
c42f0bb89f31fea3149b21db38f74f03f3872946
|
afcfa9ba533b52adef86d51e98cc96abb3a627d5
|
refs/heads/master
| 2023-04-30T20:28:44.239500
| 2021-05-31T01:27:49
| 2021-05-31T01:27:49
| 364,789,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,927
|
py
|
# 类的实例化最好加()以免报错
class z:
con = [1, 2, 3, 4]
name = 'zzx'
__name = 'zzx'
abc = z()
print(abc.con)
print(z.name)
class two:
def __init__(self, final):
self.x = final
def name2(self):
print('zzx', '22')
def name3(self):
return 'zzx'
def name4(self, name5):
print(name5)
x = two('xxx')
print(x.x)
x.name2()
x.name4('zzx name5')
class three():
def __init__(self):
self.name = 'zzx'
def age(self):
return '22'
test3 = three()
print(test3.name)
print(test3.age())
class CocaCola:
formula = ['caffeine', 'sugar', 'water', 'soda']
def __init__(self):
for element in self.formula:
print('Coke has {}!'.format(element))
def drink(self):
print('Energy!')
coke = CocaCola()
class CocaCola2():
formula = ['caffeine', 'sugar', 'water', 'soda']
def __init__(self, logo_name):
self.local_logo = logo_name
def drink(self):
print('Energy!')
coke2 = CocaCola2('可口可乐')
print(coke2.local_logo)
print(coke2.formula)
class five():
name = 'zzx'
age = '22'
sex = '男'
def __init__(self, id):
self.id = id
def lie(self):
print('{} {} {} {}'.format(self.id, self.name, self.age, self.sex))
f = five(201732110226)
f.lie()
class jcfive(five):
test = 'test'
def five2(self):
print(self.test)
jcfive1 = jcfive('zjnu')
jcfive1.lie()
jcfive1.five2()
class te1():
def tes1(self):
return 'tes1'
class te2(te1):
def tes2(self):
print('tes2')
t2 = te2()
print(t2.tes1())
class TestA:
attr = 1
def __init__(self):
self.name = 'zzx'
self.attr = 33
def rename(self):
name2 = 'zzx'
return name2
obj_a = TestA()
print(obj_a.attr)
obj_a.attr = 42
obj_a.name = 'zx'
print(obj_a.attr, obj_a.name)
print(obj_a.rename())
|
[
"2577625924@qq.com"
] |
2577625924@qq.com
|
ed551b6f6b71ee37ff9df69bd2107696845fb278
|
d7e68dadcab9933d1ceb89c4ac4d96993721ce07
|
/PCA/pca.py
|
cce7c2cc767b931bd8143c336a9cd22b97f0c4d1
|
[] |
no_license
|
syedroshanzameer/Data-Mining
|
ff83faaffd07cf8b61783f7e160af06b65be31ae
|
d5dcdf9b04e76ec6b3d22e3349da933b6bfa8632
|
refs/heads/master
| 2021-09-10T16:34:37.349758
| 2018-03-29T11:24:00
| 2018-03-29T11:24:00
| 105,457,638
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,169
|
py
|
# author : Roshan Zameer Syed
# id:99999-2920
# description: Principal Component Analysis of the data set "arrhythmia.data"
import pandas as pd
import numpy as np
from sklearn.preprocessing import Imputer
from sklearn.decomposition import PCA
data = pd.read_csv('arrhythmia.data', header=None) # Read data from the file
data.isnull().sum().sum()
data = data.replace('?', np.NaN) # Replace missing data with NaN
imp = Imputer(missing_values='NaN', strategy='mean', axis=0) # Fill missing values with "Mean"
imp.fit(data)
data_clean = imp.transform(data) # Transform the data
#print(data_clean)
pca = PCA(n_components=80)
pca.fit(data_clean)
data_red = pca.transform(data_clean)
print("Eigen Values: ", pca.explained_variance_) # Printing Eigen Values
print("Eigen Vectors: ", pca.components_) # Printing Eigen Vectors
# print(data_red)
# print (data.shape)
# print(data_clean.shape)
# print(data_red.shape)
print("Variance Ratio: ", pca.explained_variance_ratio_) # Printing Variance Ratio
print("Sum of the ratio's: ", pca.explained_variance_ratio_.sum()) # Sum of ratio's : 0.996325978866 = 99.6%
|
[
"RSyed9564@muleriders.saumag.edu"
] |
RSyed9564@muleriders.saumag.edu
|
eba5e24cb7ae539f05831d88b27d99b2346a8f0a
|
ec9129d3eb1880df9f0b54c76510352a7e004b0c
|
/tools/make_vps_tarball.py
|
b03537feaa59ec1a6a93c522cfd621963bf12eba
|
[] |
no_license
|
eugen-don/vps
|
4057e6ddb1db274dbd8d78fa926376cfc3a40aa7
|
6a16569868241b35d8137b7f2b2f8db0cf67ff55
|
refs/heads/master
| 2021-01-11T16:29:53.109075
| 2014-05-14T09:20:33
| 2014-05-14T09:20:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 771
|
py
|
#!/usr/bin/env python
import sys
import os
import _env
import ops.os_init as os_init
import conf
assert conf.OS_IMAGE_DIR and os.path.isdir(conf.OS_IMAGE_DIR)
def usage():
print """usage: \n%s [image_path/partion_path] [tarball_dir]
""" % (sys.argv[0])
def main():
if len(sys.argv) < 3:
usage()
os._exit(0)
img_path = sys.argv[1]
tarball_dir = sys.argv[2]
if not os.path.exists(img_path):
print "%s not exists" % (img_path)
os._exit(1)
if not os.path.isdir(tarball_dir):
print '%s is not a directory' % (tarball_dir)
os._exit(1)
tarball_path = os_init.pack_vps_fs_tarball(img_path, tarball_dir)
print "%s packed in %s" % (img_path, tarball_path)
if "__main__" == __name__:
main()
|
[
"frostyplanet@gmail.com"
] |
frostyplanet@gmail.com
|
7bf8347897e39eb95aac73a02b6b6f56d93586c6
|
d2fb817130e9d8f40dc25fec5e8e5e7d42f91ec7
|
/scons_gbd_docs/Gbd/Docs/Mkdocs/MkdocsBuild.py
|
a54edcf9ea65abd0a9e048337b5f47f23b444f26
|
[
"MIT"
] |
permissive
|
ASoftTech/Scons.Gbd.Docs
|
1d8a32aed7a4b43186ea661baee6fef1832eb266
|
4d9fb7585d9565f57306774efb4342fe9b8822f2
|
refs/heads/master
| 2020-03-08T12:58:35.290077
| 2018-05-28T20:48:23
| 2018-05-28T20:48:23
| 128,145,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,089
|
py
|
"""
This tool will generate the documentation output as html
using markdown files as an input via mkdocs to an output directory
"""
from __future__ import (division, print_function,
absolute_import, unicode_literals)
import SCons.Script
from SCons.Environment import Environment
import os
import sys
import os.path as path
from scons_gbd_docs.Gbd.Docs.Mkdocs.Common import MkdocsCommon
from scons_gbd_docs.Gbd.Docs.Mkdocs.Common.MkdocsConfig import MkdocsConfig
from SCons.Script import Builder
def exists(env):
"""Check if we're okay to load this builder"""
return MkdocsCommon.detect(env)
def generate(env):
"""Called when the tool is loaded into the environment at startup of script"""
assert(exists(env))
if 'Mkdocs_Config' not in env:
env['Mkdocs_Config'] = MkdocsConfig(env)
env['Mkdocs_Config'].set_defaults()
scanner = env.Scanner(
MkdocsCommon.scanner,
name='MkdocsScanner'
)
bld = Builder(
action=__Build_func,
emitter=MkdocsCommon.emitter,
source_scanner=scanner,
)
env.Append(BUILDERS={'MkdocsBuild': bld})
def __Build_func(target, source, env):
"""Actual builder that does the work after the SConstruct file is parsed"""
cfg = env['Mkdocs_Config']
assert isinstance(cfg, MkdocsConfig)
cmdopts = [cfg.Exe, 'build']
cmdopts.append('--config-file=' + str(source[0]))
if cfg.CleanBuild:
cmdopts.append('--clean')
elif not cfg.CleanBuild:
cmdopts.append('--dirty')
if cfg.Strict:
cmdopts.append('--strict')
if cfg.Theme:
cmdopts.append('--theme=$Mkdocs_Theme')
if cfg.CustomDir:
cmdopts.append('--theme-dir=$Mkdocs_CustomDir')
if env['Mkdocs_SiteDir'] is not None:
cmdopts.append('--site-dir=$Mkdocs_SiteDir')
if cfg.Quiet:
cmdopts.append('--quiet')
if cfg.Verbose:
cmdopts.append('--verbose')
cmdopts = cmdopts + cfg.ExtraArgs
print('Building MkDocs Documentation:')
env.Execute(env.Action([cmdopts], chdir=cfg.WorkingDir))
|
[
"garlicbready@googlemail.com"
] |
garlicbready@googlemail.com
|
04ae589706bee6d73d70525a05dd97e1c16387fc
|
bf45d6fe3d0c6ee6e74c0c63c4206eee72361383
|
/sketchit/draw.py
|
58ebbb85db158fb5ff66bb82afb2a06c4ddb2b3d
|
[
"MIT"
] |
permissive
|
tambibhavika2000/sketchme
|
00d6273b5b4523dc8a1e5f3d22fd58790af80896
|
00c7ccff4531d48fb5ef2c403c4bb0e0b1c749bd
|
refs/heads/main
| 2023-07-13T06:32:13.071137
| 2021-09-01T12:58:01
| 2021-09-01T12:58:01
| 402,060,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
import cv2
def sketchit(path):
image=cv2.imread(path)
grey_img=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
invert=cv2.bitwise_not(grey_img)
blur=cv2.GaussianBlur(invert,(21,21),0)
invertedblur=cv2.bitwise_not(blur)
sketch=cv2.divide(grey_img , invertedblur,scale=256.0)
cv2.imwrite('sketch.png',sketch)
path=input("Enter Path of Image: ")
sketchit(path)
|
[
"noreply@github.com"
] |
noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.