blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3585f52a7877b9c940a3270f4b4e14c82136bb34 | f5d0e85b40c5f25a5ac05458915121fe572d0253 | /final_project/db/orm_builder.py | ff7a82ae36e1ead64ea7eab005a54eec016a7c0f | [] | no_license | Salex406/2020-1-Atom-QA-Python-A-Zharikov | f06756c5dd5c2d2676989923c4861b43178859de | ff018f33cc38d5b0c15e37701a9c5fd8e6b35c9d | refs/heads/master | 2022-12-12T01:09:33.958453 | 2020-05-29T11:56:28 | 2020-05-29T11:56:28 | 243,089,696 | 0 | 0 | null | 2022-12-08T04:08:06 | 2020-02-25T19:56:01 | Python | UTF-8 | Python | false | false | 1,970 | py | from db.orm_model import Base, User
from db.mysql_orm_client import MysqlOrmConnection
class MysqlOrmBuilder:
def __init__(self, connection: MysqlOrmConnection):
self.connection = connection
#self.engine = connection.connection.engine
def add_user(self, user):
self.connection.session.add(user)
self.connection.session.commit()
def del_user(self, user):
self.connection.session.delete(user)
self.connection.session.commit()
def upd_access(self, user, value):
user.access = value
self.connection.session.commit()
def get_users(self):
for instance in self.connection.session.query(User).order_by(User.id):
print(instance.username, instance.password, instance.access)
def check_user(self, user):
self.connection.session.commit()
exists = self.connection.session.query(User.id).filter_by(username=user.username).scalar() is not None
return exists
def check_user_by_name(self, username):
self.connection.session.commit()
exists = self.connection.session.query(User.id).filter_by(username=username).scalar() is not None
return exists
def delete_user(self, user):
self.connection.session.query(User.id).filter(User.username==user.username).delete()
self.connection.session.commit()
def get_datetime(self, user):
self.connection.session.commit()
for obj in self.connection.session.query(User).filter(User.username==user.username):
return obj.start_active_time
def get_active(self, user):
self.connection.session.commit()
for obj in self.connection.session.query(User).filter(User.username==user.username):
return obj.active
def get_access(self, user):
self.connection.session.commit()
obj = self.connection.session.query(User).filter(User.username==user.username).first()
return obj.access
| [
"noreply@github.com"
] | noreply@github.com |
b985a285791f135dc77e03e0e840812e7dbfbb74 | 5107c33d9466f67266cd5bb936461beab60a5a4f | /was-config-scripts/start-stop-server-sample.py | 34dda5d2b22c1b14c99fb8871ef4dc529ccad8be | [] | no_license | huy/was_scripts | 1158dcf7fc24190efa3322bb750289e31bd78cd1 | 4eb293489d570109a3a094238d7bec33ce81b88e | refs/heads/master | 2021-01-10T22:06:33.857375 | 2011-10-03T04:22:06 | 2011-10-03T04:22:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | nodeName='FCASAWNODE01'
serverName='Sample'
AdminControl.startServer(serverName,nodeName)
AdminControl.stopServer(serverName,nodeName)
| [
"lehuy20@gmail.com"
] | lehuy20@gmail.com |
97191e8af03f621938f6eedc22b25d168d508737 | e0bb6e1e7d4091283db64efb879f3d6d665c6bdd | /backend/branding_env/bin/f2py | 0b20e25eb202fb4c377a52c7af8696227a021c9c | [] | no_license | akshitaggar123/PhishingCheckTool | 3c2adebf3ea15ca84b0fc5ddfb7abef81a565de7 | 5577b42723cf34d4328c49e16fc9d5aa63f1e576 | refs/heads/master | 2023-06-15T16:37:51.224792 | 2021-07-15T07:10:13 | 2021-07-15T07:10:13 | 386,191,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | #!/home/atharva/Desktop/DP/branding/backend/branding_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nick.talman@capexmove.io"
] | nick.talman@capexmove.io | |
4c8dfd726327d3951104e170186c23e037872432 | 0b83d3dfaaf0ead56567a1b95bfc51ade0113ada | /knowledge_graph/sentiment_analysis/stock_sentiment.py | 42929c3d1c51844a74ece101a060b740fc786f30 | [] | no_license | wagaman/deep_learning | 8ad76fb153ae9e87b351a2afef79da9985e66ea0 | 815a5706183063522d5a26c321b047ee1ab812cf | refs/heads/master | 2020-06-16T19:13:19.918883 | 2018-09-17T08:40:56 | 2018-09-17T08:40:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,887 | py | # -*- coding: utf-8 -*-
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy import Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
import numpy as np
import pandas as pd
from ylib import ylog
from requests.exceptions import ConnectionError, ChunkedEncodingError
from snownlp import SnowNLP
import logging
from pyltp import SentenceSplitter
from ylib.preprocessing import preprocess_string
from ylib.preprocessing import strip_numeric
from ylib.preprocessing import remove_stopwords
from ylib.preprocessing import strip_punctuation
from ylib.preprocessing import tokenize
from collections import defaultdict
from timeit import default_timer
import os
from pyltp import Postagger
from pyltp import NamedEntityRecognizer
import itertools
import matplotlib.pyplot as plt
from tqdm import tqdm
import datetime
filter_setting = [tokenize, strip_punctuation]
user_path = os.path.expanduser("~")
LTP_DATA_DIR = user_path + '/share/software/ltp_data_v3.4.0' # ltp模型目录的路径
# 词性标注
pos_model_path = os.path.join(LTP_DATA_DIR,
'pos.model') # 词性标注模型路径,模型名称为`pos.model`
postagger = Postagger() # 初始化实例
postagger.load(pos_model_path) # 加载模型
# 命名实体识别
ner_model_path = os.path.join(LTP_DATA_DIR,
'ner.model') # 命名实体识别模型路径,模型名称为`pos.model`
recognizer = NamedEntityRecognizer() # 初始化实例
recognizer.load(ner_model_path) # 加载模型
engine = create_engine(
'mysql+mysqlconnector://datatec:0.618@[172.16.103.103]:3306/JYDB',
echo=False)
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
df_analyst_report = pd.read_pickle(
user_path + '/share/deep_learning/data/sentiment/df_analyst_report.pkl')
def analyze_sentiment(df_text):
"""
natural language processing on every row from the input.
1. for loop dataframe:
2. preprocess text in the df.
3. get entity using pyLTP
4. get sentiment, keywords, summary using SnowNLP.
5. append result to df
Keyword Arguments:
df_text --
"""
df_result = pd.DataFrame(columns=[
'datetime', 'people', 'geography', 'organization', 'keyword', 'summary',
'score'
])
for item in df_text.iterrows():
# print(item[1]['Conclusion'])
logging.info(item[0])
text = item[1]['Conclusion']
datetime = item[1]['WritingDate']
if not pd.isnull(text):
text_split = preprocess_string(text)
# 词性标注
# postagger = Postagger() # 初始化实例
words = text_split.split() # 分词结果
postags = postagger.postag(words) # 词性标注
# 命名实体识别
# recognizer = NamedEntityRecognizer() # 初始化实例
netags = recognizer.recognize(words, postags) # 命名实体识别
dict_netags = defaultdict(list)
ls_netags = list(zip(netags, words))
for x, y in ls_netags:
dict_netags[x].append(y)
s = SnowNLP(text)
score = s.sentiments * 2
# # 人名(Nh)、地名(Ns)、机构名(Ni。)
# # B、I、E、S
ls_organization = [
dict_netags[x] for x in ['S-Ni', 'B-Ni', 'E-Ni', 'I-Ni']
]
ls_people = [
dict_netags[x] for x in ['S-Nh', 'B-Nh', 'E-Nh', 'I-Nh']
]
ls_geography = [
dict_netags[x] for x in ['S-Ns', 'B-Ns', 'E-Ns', 'I-Ns']
]
try:
df_result = df_result.append(
{
'datetime':
datetime,
'keyword':
','.join(s.keywords()),
'organization':
list(itertools.chain.from_iterable(ls_organization)),
'people':
list(itertools.chain.from_iterable(ls_people)),
'geography':
list(itertools.chain.from_iterable(ls_geography)),
'summary':
';'.join(s.summary()),
'score':
score
# 'text': text,
},
ignore_index=True)
except:
continue
return df_result
# # 人名(Nh)、地名(Ns)、机构名(Ni。)
# # B、I、E、S
# ls_organization = [dict_netags[x] for x in ['S-Ni', 'B-Ni', 'E-Ni', 'I-Ni']]
# ls_people = [dict_netags[x] for x in ['S-Nh', 'B-Nh', 'E-Nh', 'I-Nh']]
# ls_geography = [dict_netags[x] for x in ['S-Ns', 'B-Ns', 'E-Ns', 'I-Ns']]
df_result = analyze_sentiment(df_analyst_report[:100])
df_result.to_pickle('df_sentiment.pkl')
organization = None
# organization = '中通'
people = '亿元'
# people = '刘士余'
geography = None
# geography = '中国'
# date = pd.to_datetime('20170308', format='%Y%m%d', errors='ignore')
date = pd.date_range(start='2017-10-25', end='20171026')
if people or organization or geography:
df_filter = df_result[df_result.apply(
lambda row: people in row.people or organization in row.organization or geography in row.geography,
axis=1)]
# elif
# df_filter = df_result[df_result.apply(
# lambda row: organization in row.organization, axis=1)]
df_sentiment = df_filter[['datetime', 'score']].groupby('datetime').mean()
df_sentiment['count'] = df_filter[['datetime',
'score']].groupby('datetime').count()
print(df_sentiment.ix[date])
| [
"victor.wuv@gmail.com"
] | victor.wuv@gmail.com |
fff111530074c14c0b4cbac42013951e18ba2e39 | 31b52f5d90f650d79b0b047b63c735abf2b66eed | /deploying_simple_api/wsgi.py | 48ef4462a781585fa99e8f24d65b6ae7f295c458 | [] | no_license | kawiraanitah/Working-with-images.md | 80016cf80e98527e05b0e3686f2a413dc334458f | bdcf08d5a9c945d59c98d0a7ba8efee39abc539a | refs/heads/master | 2021-07-23T01:47:29.037097 | 2017-10-31T10:16:23 | 2017-10-31T10:16:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | """
WSGI config for deploying_simple_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "deploying_simple_api.settings")
application = get_wsgi_application()
| [
"annitahkawira@gmail.com"
] | annitahkawira@gmail.com |
f7847724a46f8376bfbee1a9af9c8aa2252c04fc | 33942b1ad1f8468dd5c981c66eaacd858b76a6c5 | /spider.py | 045b552e352e8f39cce693d6b553dee34c37ccc6 | [] | no_license | ldcx1/gad-python-gr-2 | 7eb10d95c10c32a0deb507c153a5636c8d9d4560 | ccb28e04b652460e2946d587b647be89367cfa06 | refs/heads/main | 2023-06-12T23:12:23.635823 | 2021-07-01T18:20:37 | 2021-07-01T18:20:37 | 364,982,913 | 0 | 0 | null | 2021-07-01T18:20:38 | 2021-05-06T17:09:22 | Python | UTF-8 | Python | false | false | 671 | py | import requests
from bs4 import BeautifulSoup
URL = 'https://lpf.ro/liga-1'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
stage_table = soup.find(class_='clasament_general white-shadow etape_meciuri')
#print(stage_table.prettify())
team_rows1 = stage_table.find_all(class_='echipa-etapa-1')
team_rows2 = stage_table.find_all(class_='echipa-etapa-2')
teams = []
for team1, team2 in zip(team_rows1, team_rows2):
team_name1 = team1.find(class_='hiddenMobile').text.strip()
team_name2 = team2.find(class_='hiddenMobile').text.strip()
print(team_name1, " -- ", team_name2)
teams.append([team_name1, team_name2])
print(teams)
| [
"lese.d@yahoo.com"
] | lese.d@yahoo.com |
6a1e582aac50b2b48c3932c39d9ce9c576a88e4f | b9cb8cccc9d2fb0f647ca76010347687d61e5cc9 | /quotes/migrations/0006_ticker.py | d347ade992ccd09930722496f5ac2319bb3201a5 | [] | no_license | jyothis-thomas/stocker-application-final | 5ef5e24200af0f37e8749830725cdc8f3f3c48e0 | 2ee2f7d2e8dfcc740ca0b97f9dce62c2a041f32c | refs/heads/master | 2021-06-24T00:56:44.343861 | 2019-11-15T07:31:04 | 2019-11-15T07:31:04 | 218,010,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | # Generated by Django 2.2.5 on 2019-10-15 12:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quotes', '0005_auto_20191009_0839'),
]
operations = [
migrations.CreateModel(
name='ticker',
fields=[
('company_name', models.TextField()),
('ticker_symbols', models.CharField(max_length=10, primary_key=True, serialize=False)),
],
),
]
| [
"jyothisth@gmail.com"
] | jyothisth@gmail.com |
3ff0de7975c7afb0e89a0caa130b293089928fee | 6fcf9267cbb05bc25623e72193bbb6278c4206c9 | /notifications/lambdas/cloudwatch-alarm-notification-handler/lambda_function.py | fb2dcc3241b566bee3dd007f55e2f1894595c1c6 | [
"MIT"
] | permissive | kagodarog/Infrastructure | 4ee0ba79e297196ad5f8f8fac18128a959951c4d | d309a7dcfd36c2f94ce383fe5d7ed6c76f59db5f | refs/heads/master | 2023-03-11T02:24:54.953960 | 2021-02-23T19:39:57 | 2021-02-23T19:39:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,027 | py | # Invoked by: SNS Subscription
# Returns: Error or status message
#
# Triggered by messages sent to various SNS topics resulting from CloudWatch
# Alarms being triggered or changing states.
import boto3
import os
import json
import urllib.parse
from dateutil.parser import parse
import datetime
import re
sns = boto3.client("sns")
sts = boto3.client("sts")
cloudwatch = boto3.client("cloudwatch")
SLACK_ICON = ":ops-cloudwatch-alarm:"
SLACK_USERNAME = "Amazon CloudWatch Alarms"
# Return a boto3 CloudWatch client with credentials for the account where the
# alarm originated
def cloudwatch_client(alarm):
account_id = alarm["AWSAccountId"]
role_name = os.environ["CROSS_ACCOUNT_CLOUDWATCH_ALARM_IAM_ROLE_NAME"]
role = sts.assume_role(
RoleArn=f"arn:aws:iam::{account_id}:role/{role_name}",
RoleSessionName="notifications_lambda_reader",
)
return boto3.client(
"cloudwatch",
aws_access_key_id=role["Credentials"]["AccessKeyId"],
aws_secret_access_key=role["Credentials"]["SecretAccessKey"],
aws_session_token=role["Credentials"]["SessionToken"],
)
def channel_for_topic(sns_topic_arn):
if "OpsFatal" in sns_topic_arn:
return "#ops-fatal"
elif "OpsError" in sns_topic_arn:
return "#ops-error"
elif "OpsWarn" in sns_topic_arn:
return "#ops-warn"
elif "OpsInfo" in sns_topic_arn:
return "#ops-info"
else:
return "#ops-debug"
def color_for_alarm(alarm):
if alarm["NewStateValue"] == "ALARM":
return "#cc0000"
elif alarm["NewStateValue"] == "OK":
return "#019933"
else:
return "#e07701"
def alarm_slack_attachment(alarm):
# Extract datapoint values from the SNS alarm data
# eg, "Threshold Crossed: 1 datapoint [10.0 (05/09/18 12:15:00)] was
# greater than or equal to the threshold (10.0)."
datapoints = re.findall(r"([0-9]+\.[0-9]+) ", alarm["NewStateReason"])
datapoints_list = "`\n`".join(datapoints)
trigger = alarm["Trigger"]
cw = cloudwatch_client(alarm)
# Get the complete alarm info for this alarm (Only partial data may have
# been included in the SNS message)
alarm_infos = cw.describe_alarms(AlarmNames=[alarm["AlarmName"]])
# TODO There shouldn't be any cases where this is happening anymore, since
# alarm data is being queried using a privileged role in all cases now
if not alarm_infos["MetricAlarms"]:
# Usually this list will be empty because the alarm is in a different
# account. Use a simplified message in those cases.
return {
"color": color_for_alarm(alarm),
"fallback": f"{alarm['NewStateValue']} – {alarm['AlarmName']}",
"title": f"{alarm['NewStateValue']} – {alarm['AlarmName']}",
"text": f"{alarm['AlarmDescription']}",
"ts": round(parse(alarm["StateChangeTime"]).timestamp()),
"fields": [
{
"title": "Datapoints",
"value": f"`{datapoints_list}`",
"short": False,
}
],
}
alarm_info = alarm_infos["MetricAlarms"][0]
alarm_region = alarm_info["AlarmArn"].split(":", 4)[3]
# If the alarm doesn't have DatapointsToAlarm defined, force it equal to
# the EvaluationPeriods
if "DatapointsToAlarm" not in alarm_info:
alarm_info["DatapointsToAlarm"] = alarm_info["EvaluationPeriods"]
# Get a count of how many times this alarm has alarmed in the last 24 hours
now = datetime.datetime.now(datetime.timezone.utc)
alarm_history = cw.describe_alarm_history(
AlarmName=alarm["AlarmName"],
HistoryItemType="StateUpdate",
StartDate=now - datetime.timedelta(hours=24),
EndDate=now,
MaxRecords=100,
)
if "AlarmHistoryItems" in alarm_history:
items = alarm_history["AlarmHistoryItems"]
alarms = filter(lambda x: ("to ALARM" in x["HistorySummary"]), items)
alarms_count = len(list(alarms))
else:
alarms_count = 0
# Construct a URL for this alarm in the Console
cw_console_url = "https://console.aws.amazon.com/cloudwatch/home"
alarm_name_escaped = urllib.parse.quote(alarm["AlarmName"])
alarm_console_url = (
f"{cw_console_url}?region={alarm_region}"
f"#alarm:alarmFilter=ANY;name={alarm_name_escaped}"
)
cw_logs = "n/a"
# All periods are 10, 30, or a multiple of 60
# Each datapoint is the aggregate (SUM, AVERAGE, etc) of one period
if trigger["Period"] >= 60:
each_datapoint = f"{round(trigger['Period'] / 60)} minute"
else:
each_datapoint = f"{trigger['Period']} second"
# ExtendedStatistic is used for percentile statistics.
# ExtendedStatistic and Statistic are mutually exclusive.
if "ExtendedStatistic" in trigger:
stat = trigger["ExtendedStatistic"]
elif "Statistic" in trigger:
stat = trigger["Statistic"]
elif "Metrics" in trigger:
stat = "metric math"
else:
stat = "unknown"
if "MetricName" in trigger:
metric_name = trigger["MetricName"]
else:
metric_name = "expression"
# eg "5 minute TargetResponseTime average"
threshold_left = f"{each_datapoint} `{metric_name}` {stat.lower()}"
trigger_period = trigger["Period"] * trigger["EvaluationPeriods"]
trigger_period_label = "seconds"
if trigger_period >= 60:
trigger_period = round(trigger_period / 60)
trigger_period_label = "minutes"
if trigger["EvaluationPeriods"] == 1:
# Entire threshold breach was a single period/datapoint
threshold_right = ""
elif alarm_info["DatapointsToAlarm"] == alarm_info["EvaluationPeriods"]:
# Threshold breach was multiple, consecutive periods
threshold_right = f"for {trigger_period} consecutive {trigger_period_label}"
else:
# Threshold breach was "M of N" periods
threshold_right = (
f"at least {alarm_info['DatapointsToAlarm']} "
f"times in {trigger_period} {trigger_period_label}"
)
if trigger["ComparisonOperator"] == "GreaterThanOrEqualToThreshold":
comparison = "≥"
elif trigger["ComparisonOperator"] == "GreaterThanThreshold":
comparison = ">"
elif trigger["ComparisonOperator"] == "LessThanThreshold":
comparison = "<"
elif trigger["ComparisonOperator"] == "LessThanOrEqualToThreshold":
comparison = "≤"
threshold = (
f"{threshold_left} *{comparison}* `{trigger['Threshold']}` {threshold_right}"
)
# Log URL handling
# eg, alarm['StateChangeTime'] = 2019-08-03T01:46:44.418+0000
state_change_time = parse(alarm["StateChangeTime"])
log_end = state_change_time.strftime("%Y-%m-%dT%H:%M:%S")
log_period = trigger["Period"] * trigger["EvaluationPeriods"]
log_start_time = state_change_time - datetime.timedelta(seconds=log_period)
log_start = log_start_time.strftime("%Y-%m-%dT%H:%M:%S")
namespace = "Math/Multiple"
if "Namespace" in trigger:
namespace = trigger["Namespace"]
if trigger["Namespace"] == "AWS/Lambda":
# Find the function name
for dimension in trigger["Dimensions"]:
if dimension["name"] == "FunctionName":
function_name = dimension["value"]
cw_logs = (
f"<https://console.aws.amazon.com/cloudwatch/home?region={alarm_region}"
f"#logEventViewer:group=/aws/lambda/{function_name};"
f"start={log_start}Z;end={log_end}Z|CloudWatch Logs>"
)
return {
"color": color_for_alarm(alarm),
"fallback": f"{alarm['NewStateValue']} – {alarm['AlarmName']}",
"title_link": alarm_console_url,
"title": f"{alarm['NewStateValue']} – {alarm['AlarmName']}",
"text": f"{alarm['AlarmDescription']}",
"footer": f"{namespace} – {alarm['Region']}",
"ts": round(parse(alarm["StateChangeTime"]).timestamp()),
"fields": [
{
"title": "Last 24 Hours",
"value": f"{alarms_count} alarms",
"short": True,
},
{
"title": "Logs",
"value": cw_logs,
"short": True,
},
{
"title": "Threshold breach",
"value": threshold,
"short": False,
},
{
"title": "Datapoints",
"value": f"`{datapoints_list}`",
"short": False,
},
],
}
def ok_slack_attachment(alarm):
cw = cloudwatch_client(alarm)
# Get the complete alarm info for this alarm (Only partial data may have
# been included in the SNS message)
alarm_infos = cw.describe_alarms(AlarmNames=[alarm["AlarmName"]])
if not alarm_infos["MetricAlarms"]:
return {
"color": color_for_alarm(alarm),
"fallback": f"{alarm['NewStateValue']} – {alarm['AlarmName']}",
"title": f"{alarm['NewStateValue']} – {alarm['AlarmName']}",
"ts": round(parse(alarm["StateChangeTime"]).timestamp()),
}
alarm_info = alarm_infos["MetricAlarms"][0]
alarm_region = alarm_info["AlarmArn"].split(":", 4)[3]
# Calculate the duration of the previous alarm state. The previous
# state may not exist or may not be an alarm, so this needs to fail
# gracefully
duration = "Unavailable"
try:
# Retrieve the alarm history (only state updates)
now = datetime.datetime.now(datetime.timezone.utc)
alarm_history = cw.describe_alarm_history(
AlarmName=alarm["AlarmName"],
HistoryItemType="StateUpdate",
StartDate=now - datetime.timedelta(hours=24),
EndDate=now,
)
items = alarm_history["AlarmHistoryItems"]
# Since a state change triggered this, this should always be > 0
if len(items) > 0:
# The NewStateValue of the alarm was OK, so the most recent history
# item should be an OK state (the inciting OK state, in fact)
ok_item = items[0]
# See history_data.json
history_data = json.loads(ok_item["HistoryData"])
if history_data["oldState"]["stateValue"] == "ALARM":
alarm_time = history_data["oldState"]["stateReasonData"]["startDate"]
alarm_time = parse(alarm_time)
ok_time = ok_item["Timestamp"]
dif = ok_time - alarm_time
duration = f"{round(dif.total_seconds() / 60)} min."
except Exception:
pass
cw_console_url = "https://console.aws.amazon.com/cloudwatch/home"
alarm_name_escaped = urllib.parse.quote(alarm["AlarmName"])
alarm_console_url = (
f"{cw_console_url}?region={alarm_region}"
f"#alarm:alarmFilter=ANY;name={alarm_name_escaped}"
)
namespace = "Math/Multiple"
if "Namespace" in alarm["Trigger"]:
namespace = alarm["Trigger"]["Namespace"]
return {
"color": color_for_alarm(alarm),
"fallback": f"{alarm['NewStateValue']} – {alarm['AlarmName']}",
"title": f"{alarm['NewStateValue']} – {alarm['AlarmName']}",
"title_link": alarm_console_url,
"text": f"Alarm duration: {duration}",
"footer": f"{namespace} – {alarm['Region']}",
"ts": round(parse(alarm["StateChangeTime"]).timestamp()),
}
def slack_message(sns_payload):
alarm = json.loads(sns_payload["Message"])
if alarm["NewStateValue"] == "OK":
attachment = ok_slack_attachment(alarm)
else:
attachment = alarm_slack_attachment(alarm)
print(
json.dumps(
{
"NewStateValue": alarm["NewStateValue"],
"TopicArn": sns_payload["TopicArn"],
}
)
)
return {
"channel": channel_for_topic(sns_payload["TopicArn"]),
"username": SLACK_USERNAME,
"icon_emoji": SLACK_ICON,
"attachments": [attachment],
}
def lambda_handler(event, context):
try:
sns_payload = event["Records"][0]["Sns"]
sns.publish(
TopicArn=os.environ["SLACK_MESSAGE_RELAY_TOPIC_ARN"],
Message=json.dumps(slack_message(sns_payload)),
)
except Exception as e:
print(e)
sns.publish(
TopicArn=os.environ["SLACK_MESSAGE_RELAY_TOPIC_ARN"],
Message=json.dumps(
{
"channel": "#ops-warn",
"username": SLACK_USERNAME,
"icon_emoji": SLACK_ICON,
"text": (
"The following CloudWatch Alarm notification was not handled "
f"successfully:\n\n {json.dumps(event)}\n\n{e}"
),
}
),
)
| [
"chris@farski.com"
] | chris@farski.com |
46411143ff9e2fc8dddc42ad13fee63873328438 | 26290167d60075c12e04a95cd0ce87c70ec5a06c | /Aula 9/Exc5.py | 0201a7dc940b7d4a2d8c9fc36ee30ee0802105e3 | [] | no_license | jmveronez/Exercicios | d4186538e5236a947ad9bf3eee45236d20303855 | dffae0f9621b57c71dd9403ceefec49385213947 | refs/heads/main | 2023-04-21T08:27:23.129966 | 2021-06-01T01:45:36 | 2021-06-01T01:45:36 | 366,562,078 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | #5. O Sr. Manoel Joaquim expandiu seus negócios para além dos negócios de 1,99
# e agora possui uma loja de conveniências. Faça um programa que implemente uma
# caixa registradora rudimentar. O programa deverá receber um número desconhecido
# de valores referentes aos preços das mercadorias. Um valor zero deve ser informado pelo operador
# para indicar o final da compra. O programa deve então mostrar o total da compra e perguntar o valor
# em dinheiro que o cliente forneceu, para então calcular e mostrar o valor do troco.
# Após esta operação, o programa deverá voltar ao ponto inicial, para registrar a próxima compra.
# A saída deve ser conforme o exemplo abaixo
produtos = []
produto = int
while produto != 0:
produto = float(input("Digite o valor dos produtos comprados, digite 0 para sair: "))
if produto > 0:
produtos.append(produto)
else:
break
def caixa_registradora(produtos):
valor_total = sum(produtos)
print("\nO valor total da compra é R$",valor_total,"\n")
dinheiro = float(input("Quanto o cliente está pagando? "))
troco = dinheiro - valor_total
if troco > 0:
print("O troco será de R$",troco)
else:
print("Não precisa de troco.")
caixa_registradora(produtos) | [
"jmveronez234@gmail.com"
] | jmveronez234@gmail.com |
dd422b4ebe4b9e6aeb1fc219d30133cd31641577 | 296287f05a1efed570b8da9ce56d3f6492126d73 | /snippets/draw_text_in_image.py | fcbb7d055099100cada3113b7ce8812f110ddacb | [] | no_license | formazione/python_book | 145f8a2598b6b75736a7c33a796b9fdd8cff668e | d7822b312c1db028bb70e25385a74b227a9a2609 | refs/heads/main | 2023-07-05T20:15:18.166771 | 2021-08-12T14:14:25 | 2021-08-12T14:14:25 | 320,499,187 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | from PIL import Image,ImageDraw,ImageFont
import tkinter as tk
def create_img_with_text(text=""):
if text == "":
text = "Pythonprogramming.altervista.org"
# sample text and font
unicode_text = u"Pythonprogramming.altervista.org"
font = ImageFont.truetype(
"C:\\Program Files\\Android\\Android Studio\\jre\\jre\\lib\\fonts\\DroidSans.ttf",
24,
encoding="unic")
# get the line size
text_width, text_height = font.getsize(unicode_text)
# create a blank canvas with extra space between lines
canvas2 = Image.new('RGB', (text_width + 10, text_height + 10), "orange")
# draw the text onto the text canvas2, and use black as the text color
draw = ImageDraw.Draw(canvas2)
draw.text((5,5), text, 'blue', font)
canvas2.save("mytext.png", "PNG")
canvas2.show()
def win_with_image():
root = tk.Tk()
root.title("Animation")
root.state("zoomed")
canvas = tk.Canvas(root, width=400, height=500)
print(canvas['width'])
canvas.pack()
img = tk.PhotoImage(file="mytext.png")
canvas.create_image(int(canvas['width']) // 2,int(canvas['height']) // 2, image=img, anchor=tk.W)
root.mainloop()
create_img_with_text("This is cool")
# win_with_image()
| [
"gatto.gio@gmail.com"
] | gatto.gio@gmail.com |
f8daba901324d5ea0615121beabf40a701c230a2 | c9ee7bd79a504597a32eac0d78e54d29ee139572 | /00_algorithm/Stage1/06_Recursion/problem1.py | 2800efe2ce84e24a74f138372160bd8dfa3988a9 | [] | no_license | owenyi/encore | eb3424594da3f67c304a8a9453fc2813d4bbba0d | 878ca04f87050a27aa68b98cba0c997e9f740d5d | refs/heads/main | 2023-07-13T10:08:24.626014 | 2021-08-23T13:40:54 | 2021-08-23T13:40:54 | 349,922,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | # 10829 이진수변환-재귀...좀 더러움
def toBin(dec, bin=''):
if dec == 1: return int('1' + bin)
bin = str(dec % 2) + bin
return toBin(dec // 2, bin)
N = int(input())
print(toBin(N)) | [
"67588446+owenyi@users.noreply.github.com"
] | 67588446+owenyi@users.noreply.github.com |
078c40258e6bf4fcda2fc2317f847dddfb2bce21 | 83292e8ee5b14a30f61dcaf3067129e161832366 | /douban_film.py | a798c36cfe2919cb8fa74d911c62c1883780d1e7 | [] | no_license | A620-Work-Exchange/Application-Integration | 19197513f1aef67f27b4b984a736cd28ff9c8ac1 | baada55dd1b988112afd6bd4dc781670983337b8 | refs/heads/master | 2020-05-20T20:03:38.842375 | 2019-05-18T09:11:20 | 2019-05-18T09:11:20 | 185,736,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | from urllib.request import urlopen
from time import sleep
import requests
from json_util import JsonUtil
import json
def get_top_film():
url = 'https://api.douban.com/v2/movie/top250'
for start in range(0, 250, 50):
req = requests.get(url, params={'start': start, 'count': 50})
data = req.json()
for movie in data['subjects']:
print(movie)
movie_str = json.dumps(movie)
JsonUtil.write_file('1.json', movie_str)
sleep(0.3)
get_top_film() | [
"2529716798@qq.com"
] | 2529716798@qq.com |
8037be52473b23f014eb6bf1e401f0f64dc04411 | 0103ea7172455056b578ef507ff3ef4553c06072 | /PerfMeasure.py | 7cd8813d07928c46d8b2824eb8d320d790ddc921 | [
"MIT"
] | permissive | JasonLC506/LabelRanking | 25c9735f3023fc478140536bd56b52de98272d08 | d93b1d42b0301dc8aa45f0fda8ec1e8e27b1be0f | refs/heads/master | 2021-01-25T07:50:09.852546 | 2017-06-08T21:42:00 | 2017-06-08T21:42:00 | 93,675,200 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 10,851 | py | """
calculate performance measures
Three perf measures used correspond to those in perf_list in perfMeaure
acc@3: perf_list["acc3"]
tau: perf_list["kendalltau"]
GMR: perf_list["g_mean_pair"]
"""
import numpy as np
import itertools
from scipy.stats.mstats import gmean
import math
from ReadData import rankOrder
NOISE = 0.001
NONERECALL = 1e-3
def perfMeasure(y_pred, y_test, rankopt = False):
"""
calculate performance measure for label ranking
with normal ranking input y_pred and y_test, rankopt = True
with votes for each label input y_pred and y_test (for LogR and NAIVE specifically), rankopt = False
"""
# performance measurement #
# with background noise probability distribution
if type(y_test)!= np.ndarray:
y_test = np.array(y_test)
Nsamp = y_test.shape[0]
Nclass = y_test.shape[1]
perf_list={"acc3":0, "dcg":1, "llh":2, "recall":3, "recallsub": 3+Nclass, "Nsc": 3+2*Nclass, "kld": 3 + 3*Nclass,
"g_mean":4+3*Nclass, "kendalltau": 5+3*Nclass,
"recallpair": 6+3*Nclass, "Nsp": 6+3*Nclass+Nclass*Nclass, "g_mean_pair": 6+3*Nclass+2*Nclass*Nclass}
Nperf = max(perf_list.values())+1
perf=[0 for i in range(Nperf)]
if not rankopt:
rank_test = map(rankOrder,y_test)
rank_pred = map(rankOrder,y_pred)
else:
if type(y_test) == np.ndarray:
rank_test = y_test.tolist()
else:
rank_test = y_test
if type(y_pred) == np.ndarray:
rank_pred = y_pred.tolist()
else:
rank_pred = y_pred
# acc3 #
if Nclass>=3:
for i in range(Nsamp):
acc3 = 1
for j in range(3):
if rank_test[i][j]>=0 and rank_pred[i][j]>=0 and rank_test[i][j]!=rank_pred[i][j]:
acc3 = 0
perf[perf_list["acc3"]] += acc3
perf[perf_list["acc3"]]=perf[perf_list["acc3"]]/(1.0*Nsamp)
else:
print "cannot calculate acc@3 for less than 3 classes"
# recall #
recall = map(recallAll, itertools.izip(rank_pred, rank_test))
recall = np.array(recall)
recall = np.mean(recall, axis=0)
for i in range(Nclass):
perf[perf_list["recall"] + i] = recall[i]
# recallsub #
recall_sub, Nsamp_class = recallSub(rank_pred, rank_test)
for i in range(Nclass):
perf[perf_list["recallsub"] + i] = recall_sub[i]
perf[perf_list["Nsc"] + i] = Nsamp_class[i]
# recallpair #
recall_pair, Nsamp_pair = recallPair(rank_pred, rank_test)
cnt = 0
for i in range(Nclass):
for j in range(Nclass):
perf[perf_list["recallpair"] + cnt] =recall_pair[i][j]
perf[perf_list["Nsp"] + cnt] = Nsamp_pair[i][j]
cnt += 1
# G-Mean-pair #
recall_pair = np.array(recall_pair)
recall_pair_masked = np.ma.masked_invalid(recall_pair)
g_mean_pair = gmean(recall_pair_masked, axis=None)
perf[perf_list["g_mean_pair"]] = g_mean_pair
# G-Mean #
recall_sub = np.array(recall_sub)
recall_sub_masked = np.ma.masked_invalid(recall_sub)
g_mean = gmean(recall_sub_masked)
# g_mean = gmean(recall)
perf[perf_list["g_mean"]] = g_mean
# Kendall's Tau traditional one#
perf[perf_list["kendalltau"]] = KendallTau(rank_pred, rank_test)
# #
if rankopt:
return perf
# --------------- for probability output ------------------------ #
# llh (log-likelihood)#
y_pred_noise=addNoise(y_pred)# add noise prior
# y_pred_noise = y_pred
for i in range(Nsamp):
llh = 0
for j in range(Nclass):
if y_test[i][j]==0:
maxllh=0
else:
maxllh=y_test[i][j]*math.log(y_test[i][j])
llh += (y_test[i][j]*math.log(y_pred_noise[i][j])-maxllh)
try:
llh = llh + sum(y_test[i])*math.log(sum(y_test[i]))
except ValueError, e:
print y_test[i]
raise e
perf[perf_list["llh"]] += llh
perf[perf_list["kld"]] = perf[perf_list["llh"]]/(1.0*np.sum(y_test)) ## normalized by total emoticons
perf[perf_list["llh"]]=perf[perf_list["llh"]]/(1.0*Nsamp) ## normalized by # samples
# dcg #
for i in range(Nsamp):
dcg = 0
maxdcg = 0
for j in range(Nclass):
if rank_pred[i][j]>=0:
dcg += y_test[i][rank_pred[i][j]]/math.log(j+2)
if rank_test[i][j]>=0:
maxdcg += y_test[i][rank_test[i][j]]/math.log(j+2)
perf[perf_list["dcg"]] += dcg*1.0/maxdcg
perf[perf_list["dcg"]]= perf[perf_list["dcg"]] /(1.0*Nsamp)
return perf
def KendallTau(rank_pred, rank_test):
# traditionally defined based on concordant and discordant pairs
# that is missing pairs are excluded
if type(rank_pred)!=list:
rank_pred = rank_pred.tolist()
if type(rank_test)!=list:
rank_test = rank_test.tolist()
Nsamp = len(rank_test)
Nrank = len(rank_test[0]) # the length of each complete rank
tau = [np.nan for i in range(Nsamp)]
for samp in range(Nsamp):
cor = 0
dis = 0
rt = rank_test[samp]
rp = rank_pred[samp]
for i in range(Nrank):
emoti_i = rt[i]
if emoti_i < 0: # no emoti at the index, emoti_j<0 too
break # same for lower ranking emoti
for j in range(i+1, Nrank):
emoti_j = rt[j]
if emoti_j < 0:
break # use traditional Kendall, no such constraint exists
if emoti_i not in rp: # higher ranked emoti not exist, discordant
break # use traditional Kendall, no such constraint exists
if emoti_j not in rp:
break # use traditional Kendall, no such constraint exists
else:
pred_i = rp.index(emoti_i)
pred_j = rp.index(emoti_j)
if pred_i < pred_j:
cor += 1
else:
dis += 1
if cor + dis >= 1:
tau[samp] = (cor - dis) * 1.0 / (cor + dis)
tau = np.array(tau,dtype = "float")
return np.nanmean(tau)
def recallAll(two_rank):
# calculate recall for all classes with input (rank1,rank2)
# consider all posts
pred,test = two_rank
if type(pred)!=list:
pred = pred.tolist()
if type(test)!=list:
test = test.tolist()
Nclass = len(test)
recall = [1.0 for i in range(Nclass)]
for rank_test in range(Nclass):
emoti = test[rank_test]
if emoti<0:
continue
if emoti not in pred:
recall[emoti] = 0.0
continue
rank_pred = pred.index(emoti)
if rank_pred > rank_test:
recall[emoti] = 0.0
### test
# print "rank_test", rank_test, "emoti", emoti, "rank_pred", rank_pred
return recall
def recallSub(rank_pred, rank_test):
# consider recall of each emoticon in those posts it appears in
if type(rank_pred)!=list:
rank_pred = rank_pred.tolist()
if type(rank_test)!=list:
rank_test = rank_test.tolist()
Nclass = len(rank_pred[0])
recall = [[] for i in range(Nclass)]
Nsamp_class = [0.0 for i in range(Nclass)] # #samples each class appears in
Nsamp = len(rank_pred)
for i in range(Nsamp):
for emoti in range(Nclass):
if emoti not in rank_test[i]:
continue # no such emoticon appears
Nsamp_class[emoti] += 1
rt = rank_test[i].index(emoti)
if emoti not in rank_pred[i]:
recall[emoti].append(0.0)
continue
rp = rank_pred[i].index(emoti)
if rp <= rt:
recall[emoti].append(1.0)
else:
recall[emoti].append(0.0)
for i in range(Nclass):
if Nsamp_class[i] < 1.0:
recall[i] = np.nan
else:
recall[i] = sum(recall[i])/Nsamp_class[i]
if recall[i] < NONERECALL:
print "NONERECALL:", recall[i], "for emoticon:", i
recall[i] = NONERECALL
return recall, Nsamp_class
def recallPair(rank_pred, rank_test):
# include labels not appearing in rank #
if type(rank_pred)!=list:
rank_pred = rank_pred.tolist()
if type(rank_test)!=list:
rank_test = rank_test.tolist()
Nclass = len(rank_pred[0])
recall = [[0 for i in range(Nclass)] for j in range(Nclass)]
Nsamp_pair = [[0 for i in range(Nclass)] for j in range(Nclass)]
Nsamp = len(rank_pred)
for i in range(Nsamp):
for emoti_a in range(Nclass):
for emoti_b in range(emoti_a + 1, Nclass):
prior = None
latter = None
if emoti_a not in rank_test[i]:
if emoti_b not in rank_test[i]:
continue
else:
prior = emoti_b
latter = emoti_a
else:
if emoti_b not in rank_test[i]:
prior = emoti_a
latter = emoti_b
else:
# both appear in rank #
ind_a = rank_test[i].index(emoti_a)
ind_b = rank_test[i].index(emoti_b)
if ind_a < ind_b:
prior = emoti_a
latter = emoti_b
else:
prior = emoti_b
latter = emoti_a
if prior is None or latter is None:
continue
Nsamp_pair[prior][latter] += 1
if prior not in rank_pred[i]:
continue
else:
if latter not in rank_pred[i]:
recall[prior][latter] += 1
else:
if rank_pred[i].index(prior) < rank_pred[i].index(latter):
recall[prior][latter] += 1
else:
continue
for i in range(Nclass):
for j in range(i+1, Nclass):
if Nsamp_pair[i][j] < 1:
recall[i][j] = np.NaN
else:
recall[i][j] = float(recall[i][j]+1)/(Nsamp_pair[i][j]+2)
if Nsamp_pair[j][i] < 1:
recall[j][i] = np.NaN
else:
recall[j][i] = float(recall[j][i]+1)/(Nsamp_pair[j][i]+2)
for i in range(Nclass):
recall[i][i] = np.nan
return recall, Nsamp_pair
def addNoise(dist_list):
noise = np.array([NOISE for i in range(dist_list.shape[1])])
return map(lambda x: (x+noise)/sum(x+noise), dist_list)
| [
"jpz5181@ist.psu.edu"
] | jpz5181@ist.psu.edu |
0b256fb682b243b39e161f749081d6496bf0d471 | 4e763658dac306478683838040bd7c22ce54ab3a | /data_processing.py | df9556a3bf51919a5109da6c65f22fb5d603dd32 | [] | no_license | umd-fire-coml/FFNet | 6ab1e07e625213ee12c5c537f08791fdbdb4f7c4 | 978b7750573fec436dff29d1e0bd97ee246b25cd | refs/heads/master | 2022-03-10T21:27:35.346350 | 2019-11-17T14:53:05 | 2019-11-17T14:53:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,279 | py | import scipy.misc
import random
import numpy as np
import copy
import sys
img_path = '/home/speit_ie02/Data/KITTI/kitti_data/training/image_2/'
label_path = '/home/speit_ie02/Data/KITTI/kitti_data/training/label_2/'
# FFNet needs a 2D object detection model or 2D object detection result for KITTI evaluation
# define your 2D object detection result here
result_2d_path = ''
# 0: test / 1: train / 2: write test results
toTrain = int(sys.argv[1])
# num_train_img: total number of images for train and val (not only for training)
num_train_img = 7481
num_val_img = 1481
num_train_data = 0
num_val_data = 0
class_dict = {
'Car': 0,
'Van': 0,
'Truck': 3,
'Pedestrian': 1,
'Person_sitting': 3,
'Cyclist': 2,
'Tram': 3,
'Misc': 3,
'DontCare': 3
}
target_element = [0, 3, 4, 5, 6, 7, 8, 9, 10]
train_batch_pointer = 0
PI = 3.141592654
angle_diff_max = 30
def generate_one_hot_list(index, depth):
one_hot_list = []
for i in range(depth):
one_hot_list.append(0)
one_hot_list[index] = 1
return one_hot_list
def determine_angle(sin_value, cos_value):
if cos_value >= 0:
return np.arcsin(sin_value)
elif sin_value >= 0:
return PI - np.arcsin(sin_value)
elif sin_value < 0:
return -PI - np.arcsin(sin_value)
# no classification in the multi-bin mechanism
def determine_average_degree(angle_array, to_display):
a_value = []
a_diff_value = []
a_sin = []
a_cos = []
for angle_index in range(len(angle_array)):
sin_value = angle_array[angle_index][0][0]
cos_value = angle_array[angle_index][0][1]
sc_set = np.array([sin_value, cos_value])
l2_normed_sc_set = sc_set / np.sqrt(np.sum(sc_set ** 2))
sin_value = l2_normed_sc_set[0]
cos_value = l2_normed_sc_set[1]
angle_value = determine_angle(sin_value, cos_value)
a_diff_value.append(angle_value*180/PI)
angle_value = angle_value - PI/4 + PI*angle_index/2
sin_value = np.sin(angle_value); a_sin.append(sin_value)
cos_value = np.cos(angle_value); a_cos.append(cos_value)
angle_value = angle_value*180/PI
angle_value = angle_value % 360
a_value.append(angle_value)
# error excluding algorithm
for i in range(len(a_value)):
a_rest = []
for j in range(len(a_value)):
if j != i:
a_rest.append(a_value[j])
x1 = abs(a_value[i] - a_rest[0])
x2 = abs(a_value[i] - a_rest[1])
x3 = abs(a_value[i] - a_rest[2])
delta_angle = max(x1, x2, x3) - min(x1, x2, x3)
if x1 > delta_angle and x2 > delta_angle and x3 > delta_angle:
if delta_angle < 2*angle_diff_max:
del a_sin[i]
del a_cos[i]
break
variance_degree = np.var(a_value)
sin_value = sum(a_sin) / len(a_sin)
cos_value = sum(a_cos) / len(a_cos)
angle_value = determine_angle(sin_value, cos_value)
if to_display:
print(a_value)
# print(a_diff_value)
return angle_value*180/PI, variance_degree
def find_second_largest_element(a):
index = np.argmax(a)
a[index] = 0
index = np.argmax(a)
return a[index]
# each line has 16 elements
# 0: object class
# 1: if truncated
# 2: if blocked
# 3: observed angle
# 4~7: 2D bounding box: xmin, ymin, xmax, ymax
# 8~10: 3D bounding box dimensions: height, width, length
# 11~13: 3D bounding box location: x, y, z
# 14: yaw angle
# 15: detection confidence
def get_txt_data(filename):
with open(filename, 'r') as f:
# get object class and characteristics
object_data = []
for ln in f.readlines():
line_data = ln.strip().split(" ")[:]
line_target_data = []
if line_data[0] == 'Pedestrian':
for i in target_element:
line_target_data.append(line_data[i])
object_data.append(line_target_data)
return object_data
def organize_train_data():
img_epoch = []
label_epoch = []
for i in range(num_train_img):
if i < 2000 or i > 3480:
seqname = str(i).zfill(6)
img = scipy.misc.imread(img_path + seqname + '.png')
label = get_txt_data(label_path + seqname + '.txt')
for j in range(len(label)):
target_img = copy.deepcopy(img[int(float(label[j][3])):int(float(label[j][5]))+1,int(float(label[j][2])):int(float(label[j][4]))+1])
target_img = target_img.astype(np.float32) / 255.0
target_img = scipy.misc.imresize(target_img, [224, 224])
target_label = copy.deepcopy(label[j])
img_epoch.append(target_img)
label_epoch.append(target_label)
print('train data', i, 'complete')
dataset = list(zip(img_epoch, label_epoch))
random.shuffle(dataset)
img_epoch, label_epoch = zip(*dataset)
return img_epoch, label_epoch
def organize_val_data():
img_epoch = []
label_epoch = []
for i in range(num_val_img):
seqname = str(2000+i).zfill(6)
img = scipy.misc.imread(img_path + seqname + '.png')
label = get_txt_data(label_path + seqname + '.txt')
for j in range(len(label)):
target_img = copy.deepcopy(img[int(float(label[j][3])):int(float(label[j][5]))+1,int(float(label[j][2])):int(float(label[j][4]))+1])
target_img = target_img.astype(np.float32) / 255.0
target_img = scipy.misc.imresize(target_img, [224, 224])
target_label = copy.deepcopy(label[j])
img_epoch.append(target_img)
label_epoch.append(target_label)
print('val data', i+2000, 'complete')
dataset = list(zip(img_epoch, label_epoch))
random.shuffle(dataset)
img_epoch, label_epoch = zip(*dataset)
return img_epoch, label_epoch
img_epoch_train = []
label_epoch_train = []
img_epoch_val = []
boxsize_epoch_val = []
d_epoch_val = []
c_epoch_val = []
a_epoch_val = []
a = []
if toTrain == 1:
img_epoch_train, label_epoch_train = organize_train_data()
num_train_data = len(label_epoch_train)
if toTrain == 0 or toTrain == 1:
img_epoch_val, label_epoch_val = organize_val_data()
num_val_data = len(label_epoch_val)
box_min_epoch_val = np.array([label[2:4] for label in label_epoch_val]).astype(np.float32)
box_max_epoch_val = np.array([label[4:6] for label in label_epoch_val]).astype(np.float32)
boxsize_epoch_val = box_max_epoch_val - box_min_epoch_val
for label in label_epoch_val:
c_epoch_val.append(generate_one_hot_list(class_dict[label[0]], 2))
d_epoch_val.append(label[6:9])
a_epoch_val.append([label[1]])
a = np.zeros(num_val_data)
for i in range(num_val_data):
a[i] += i
def get_train_data(batch_size):
global train_batch_pointer
img_batch = []
label_batch = []
for i in range(batch_size):
img_batch.append(img_epoch_train[train_batch_pointer])
label_batch.append(label_epoch_train[train_batch_pointer])
train_batch_pointer += 1
if train_batch_pointer >= num_train_data:
train_batch_pointer = 0
box_batch = [label[2:6] for label in label_batch]
box_min_batch = np.array([label[2:4] for label in label_batch]).astype(np.float32)
box_max_batch = np.array([label[4:6] for label in label_batch]).astype(np.float32)
boxsize_batch = box_max_batch - box_min_batch
d_batch = [label[6:9] for label in label_batch]
c_batch = [generate_one_hot_list(class_dict[label[0]], 2) for label in label_batch]
a_batch = [[label[1]] for label in label_batch]
return img_batch, boxsize_batch, d_batch, c_batch, a_batch
def extract_random_val_batch(batch_size):
random.shuffle(a)
img_batch = []
boxsize_batch = []
d_batch = []
c_batch = []
a_batch = []
for i in range(batch_size):
sample_index = int(a[i])
img_batch.append(img_epoch_val[sample_index])
boxsize_batch.append(boxsize_epoch_val[sample_index])
d_batch.append(d_epoch_val[sample_index])
c_batch.append(c_epoch_val[sample_index])
a_batch.append(a_epoch_val[sample_index])
return img_batch, boxsize_batch, d_batch, c_batch, a_batch
| [
"charliezhao1999@outlook.com"
] | charliezhao1999@outlook.com |
01cfde38c8965eca7aaedb20680ee6be5f676118 | e1ffc5d925b990906ad47fd1d046036324451218 | /conversation/models.py | 16a7b98c878a5588620014601c3b03325c2eda91 | [] | no_license | nikadam/cartloop | ed04312cf88031352a745f9a01b11152936757e0 | 4d1a6a6c50b59589410c487d348ab4fd4f2cf7b4 | refs/heads/main | 2023-01-10T10:43:09.238164 | 2020-11-06T11:52:49 | 2020-11-06T11:52:49 | 309,955,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
name = models.CharField(
max_length=50,
null=True,
blank=True
)
is_operator = models.BooleanField(default=False)
is_client = models.BooleanField(default=False)
def __str__(self):
if self.username:
return f"{self.username}"
return f"{self.name}"
class Store(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return f"{self.name}"
class Group(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return f"{self.name}"
class Client(models.Model):
user = models.OneToOneField(
'User',
on_delete=models.CASCADE,
primary_key=True
)
def __str__(self):
return f"{self.user.name}"
class Operator(models.Model):
user = models.OneToOneField(
'User',
on_delete=models.CASCADE,
primary_key=True
)
group = models.ForeignKey(
'Group',
related_name='operators',
on_delete=models.CASCADE
)
def __str__(self):
return f"{self.user.name}({self.group})"
class Conversation(models.Model):
store = models.ForeignKey(
'Store',
on_delete=models.CASCADE
)
operator = models.ForeignKey(
'User',
on_delete=models.CASCADE,
related_name='operator_conversations'
)
client = models.ForeignKey(
'User',
on_delete=models.CASCADE,
related_name='client_conversations'
)
def __str__(self):
return f"""Conversation between {self.operator}
and {self.client} for store {self.store}"""
class Chat(models.Model):
NEW = 'N'
SENT = 'S'
STATUS_CHOICES = [
(NEW, 'New'),
(SENT, 'Sent'),
]
from django.core.validators import RegexValidator
payload = models.CharField(
max_length=300,
validators=[
RegexValidator(
regex='^[a-zA-Z0-9\{\}$%_\-\/~@#$%^&*()!? ]*$',
message='Message must be Alphanumeric or must have any of these special char({}$%_-/~@#$%^&*()!?)',
code='invalid_payload'
)
])
status = models.CharField(
max_length=1,
choices=STATUS_CHOICES,
default=NEW
)
conversation = models.ForeignKey(
'Conversation',
on_delete=models.CASCADE,
related_name='chats'
)
user = models.ForeignKey(
'User',
on_delete=models.CASCADE,
related_name='user_chats'
)
create_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.conversation}"
class ScheduleChat(models.Model):
conversation = models.ForeignKey(
'Conversation',
on_delete=models.CASCADE,
related_name='schedule_chats'
)
chat = models.ForeignKey(
'Chat',
on_delete=models.CASCADE,
related_name='schedule_chats'
)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.conversation}"
| [
"nishantkumarkadam@gmail.com"
] | nishantkumarkadam@gmail.com |
56b0642ed03574de0e00e8f080073b4c32e42527 | 7df6d8f83b3359c6013c6e2be60b3ad0c54fae70 | /CraneOffer/Crane_offer_excel_to_Json.py | 72b86f1bbdbe06ac046426c78ffb635fa63ecff9 | [] | no_license | wangshuai917/Gamedev_Tools | 3bdf9e81d4596f52086ab3b49cd419e3ef52fb9c | 911d351e9bfd8bb3ac440c7d1f0c984e0c9f99da | refs/heads/master | 2023-07-07T19:37:24.870409 | 2021-08-11T10:15:54 | 2021-08-11T10:15:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,614 | py | from openpyxl import load_workbook
import json
import os
work_dir = os.getcwd()
xlsx_dir = "CraneOfferconfigs.xlsx"
workbook_dir = os.path.join(work_dir, xlsx_dir)
print(workbook_dir)
json_file_name = "CraneOfferConfig.json"
json_dir = os.path.join(work_dir, json_file_name)
print(json_dir)
wb = load_workbook(workbook_dir)
ws = wb.active
# 工具
def clean_null(input_value):
empty_value = ""
if input_value == None:
return empty_value
return input_value
def get_bool(input_string):
'''
input_string = "true" return true
input_string = "false" return false
'''
if input_string == "true":
return True
elif input_string == "false":
return False
else:
return False
container_dic = {}
# 总配置
container_dic.update({"id": ws.cell(4, 2).value})
container_dic.update({"name": ws.cell(5, 2).value})
container_dic.update({"coin_rate": ws.cell(6, 2).value})
container_dic.update({"start_time": ws.cell(7, 2).value})
container_dic.update({"end_time": ws.cell(8, 2).value})
container_dic.update({"begin_time": ws.cell(9, 2).value})
container_dic.update({"refresh_interval": ws.cell(10, 2).value})
print(" 总配置完成 ")
# 小奖励配置
ticket_conf_list = []
container_dic.update({"ticket_conf_list": ticket_conf_list})
for x in range(3):
if (ws.cell(17,1+x*10).value!= None):
diff = x*10
ticket_conf_list_unit_dic = {}
ticket_conf_list_unit_dic.update({"min_stage": ws.cell(14,2+diff).value})
ticket_conf_list_unit_dic.update({"max_stage": ws.cell(14,9+diff).value})
ticket_levels_list = []
ticket_conf_list_unit_dic.update({"ticket_levels": ticket_levels_list})
row_index = 17
while ws.cell(row_index,1).value!= None:
ticket_levels_unit_dict = {}
ticket_levels_unit_dict.update({"level": ws.cell(row_index, 1+diff).value})
ticket_levels_unit_dict.update({"min": ws.cell(row_index, 2+diff).value})
ticket_levels_unit_dict.update({"max": ws.cell(row_index, 3+diff).value})
ticket_levels_unit_dict.update({"grade": ws.cell(row_index, 4+diff).value})
reward_dic = {}
ticket_levels_unit_dict.update({"reward": reward_dic})
reward_dic.update({"prop_type": ws.cell(row_index, 5+diff).value})
reward_dic.update({"prop_id": ws.cell(row_index, 6+diff).value})
reward_dic.update({"prop_num": ws.cell(row_index, 7+diff).value})
reward_dic.update({"prop_color": ws.cell(row_index, 8+diff).value})
reward_dic.update({"chest_type": ws.cell(row_index, 9+diff).value})
ticket_levels_list.append(ticket_levels_unit_dict)
row_index = row_index + 1
ticket_conf_list.append(ticket_conf_list_unit_dic)
print("小奖励配置完成")
# 大奖励配置
big_reward_list = []
container_dic.update({"big_reward_list": big_reward_list})
for x in range(3):
if(ws.cell(60,1+x*10).value!= None):
diff = x*10
big_reward_list_unit_dic = {}
big_reward_list_unit_dic.update({"min_stage": ws.cell(56, 2+diff).value})
big_reward_list_unit_dic.update({"max_stage": ws.cell(56, 6+diff).value})
big_rewards_list = []
big_reward_list_unit_dic.update({"big_rewards": big_rewards_list})
row_index = 60
while ws.cell(row_index,1).value!= None:
big_reward_unit_dic = {}
big_reward_unit_dic.update({"unlock_level": ws.cell(row_index,1+diff).value})
reward_dic = {}
reward_dic.update({"prop_type": ws.cell(row_index, 2+diff).value})
reward_dic.update({"prop_id": ws.cell(row_index, 3+diff).value})
reward_dic.update({"prop_num": ws.cell(row_index, 4+diff).value})
reward_dic.update({"prop_color": ws.cell(row_index, 5+diff).value})
reward_dic.update({"chest_type": ws.cell(row_index, 6+diff).value})
big_reward_unit_dic.update({"reward": reward_dic})
big_rewards_list.append(big_reward_unit_dic)
row_index = row_index + 1
big_reward_list.append(big_reward_list_unit_dic)
print("大奖励配置完成")
# offer配置
offer_list = []
container_dic.update({"offer_list": offer_list})
row_index = 80
while ws.cell(row_index, 1).value!= None:
offer_list_unit_dic = {}
offer_list_unit_dic.update( {"type": ws.cell(row_index, 1).value})
offer_list_unit_dic.update( {"offer_id": ws.cell(row_index, 2).value})
offer_list_unit_dic.update( {"consume_type": ws.cell(row_index, 3).value})
offer_list_unit_dic.update( {"consume_num": ws.cell(row_index, 4).value})
if ws.cell(row_index,5).value!=None:
reward_list = []
offer_list_unit_dic.update({"reward_list": reward_list})
for x in range(3):
if ws.cell(row_index, 5+x*5).value!= None:
diff = x*5
reward_list_unit_dic = {}
reward_list_unit_dic.update({"prop_type": ws.cell(row_index, 5+diff).value})
reward_list_unit_dic.update({"prop_id": ws.cell(row_index, 6+diff).value})
reward_list_unit_dic.update({"prop_num": ws.cell(row_index, 7+diff).value})
reward_list_unit_dic.update({"prop_color": ws.cell(row_index, 8+diff).value})
reward_list_unit_dic.update({"chest_type": ws.cell(row_index, 9+diff).value})
reward_list.append(reward_list_unit_dic)
print(row_index)
offer_list.append(offer_list_unit_dic)
row_index = row_index + 1
print("Offer配置完成")
# UI相关配置
# ticket_reward_bg_list
ticket_reward_bg_list = []
container_dic.update({"ticket_reward_bg_list": ticket_reward_bg_list})
column_index = 4
for x in range(3):
diff = x*5
if ws.cell( 98, column_index+diff).value != None:
ticket_reward_bg_list_unt_dic = {}
ticket_reward_bg_list_unt_dic.update({"min_stage": ws.cell(98, column_index+diff).value})
ticket_reward_bg_list_unt_dic.update({"max_stage": ws.cell(99, column_index+diff).value})
ticket_reward_bg = {}
ticket_reward_bg_list_unt_dic.update({"ticket_reward_bg": ticket_reward_bg})
ticket_reward_bg.update({"main_bg_icon": ws.cell(100,column_index+diff).value})
ticket_reward_bg.update({"title_bg_icon": ws.cell(101,column_index+diff).value})
ticket_reward_bg.update({"tab_btn_icon_normal": ws.cell(102,column_index+diff).value})
ticket_reward_bg.update({"tab_btn_icon_select": ws.cell(103,column_index+diff).value})
ticket_reward_bg.update({"tiao_fu_icon": ws.cell(104,column_index+diff).value})
ticket_reward_bg.update({"big_reward_progress_effcet": ws.cell(105,column_index+diff).value})
ticket_reward_bg.update({"big_reward_progress_bg": ws.cell(106,column_index+diff).value})
tabRGB = []
tabRGB_sel = []
tabRGB_outline_sel = []
outlineRGB = []
projectionRGB = []
Gradienttop = []
Gradientdown = []
tabRGB = [ws.cell(107, column_index+diff).value, ws.cell(107, column_index+diff+1).value, ws.cell(107, column_index+diff+2).value, ws.cell(107, column_index+diff+3).value]
tabRGB_sel = [ws.cell(108, column_index+diff).value, ws.cell(108, column_index+diff+1).value, ws.cell(108, column_index+diff+2).value, ws.cell(108, column_index+diff+3).value]
tabRGB_outline_sel = [ws.cell(109, column_index+diff).value, ws.cell(109, column_index+diff+1).value, ws.cell(109, column_index+diff+2).value, ws.cell(109, column_index+diff+3).value]
outlineRGB = [ws.cell(110, column_index+diff).value, ws.cell(110, column_index+diff+1).value, ws.cell(110, column_index+diff+2).value, ws.cell(110, column_index+diff+3).value]
projectionRGB = [ws.cell(111, column_index+diff).value, ws.cell(111, column_index+diff+1).value, ws.cell(111, column_index+diff+2).value, ws.cell(111, column_index+diff+3).value]
Gradienttop = [ws.cell(112, column_index+diff).value, ws.cell(112, column_index+diff+1).value, ws.cell(112, column_index+diff+2).value, ws.cell(112, column_index+diff+3).value]
Gradientdown = [ws.cell(113, column_index+diff).value, ws.cell(113, column_index+diff+1).value, ws.cell(113, column_index+diff+2).value, ws.cell(113, column_index+diff+3).value]
ticket_reward_bg.update({"tabRGB": tabRGB})
ticket_reward_bg.update({"tabRGB_sel": tabRGB_sel})
ticket_reward_bg.update({"tabRGB_outline_sel": tabRGB_outline_sel})
ticket_reward_bg.update({"outlineRGB": outlineRGB})
ticket_reward_bg.update({"projectionRGB": projectionRGB})
ticket_reward_bg.update({"Gradienttop": Gradienttop})
ticket_reward_bg.update({"Gradientdown": Gradientdown})
ticket_reward_bg_list.append(ticket_reward_bg_list_unt_dic)
# notify
notify_dic = {}
container_dic.update({"notify": notify_dic})
notify_dic.update({"start_content": ws.cell(118, 3).value})
notify_dic.update({"refresh_content": ws.cell(119, 3).value})
notify_dic.update({"end_content": ws.cell(120, 3).value})
notify_dic.update({"time_before_end": ws.cell(121, 3).value})
# ui_conf
ui_conf_dic = {}
container_dic.update({"ui_conf": ui_conf_dic})
ui_conf_dic.update({"token_icon": ws.cell(125, 3).value})
ui_conf_dic.update({"token_icon_dui": ws.cell(126, 3).value})
ui_conf_dic.update({"token_model_effect": ws.cell(127, 3).value})
# ear_open
ear_open_dic = {}
container_dic.update({"ear_open": ear_open_dic})
ear_open_dic.update({"A": ws.cell(130, 3).value})
ear_open_dic.update({"B": ws.cell(131, 3).value})
# popup_config_ab
popup_config_ab_dic = {}
container_dic.update({"popup_config_ab": popup_config_ab_dic})
A_dic = {}
popup_config_ab_dic.update({"A": A_dic})
after_club_upgrade_dic = {}
A_dic.update({"after_club_upgrade": after_club_upgrade_dic})
after_club_upgrade_dic.update({"available": get_bool(ws.cell(136, 5).value)})
after_club_upgrade_dic.update({"days": ws.cell(137, 5).value})
after_club_upgrade_dic.update({"limit": ws.cell(138, 5).value})
after_open_chest_dic = {}
A_dic.update({"after_open_chest": after_open_chest_dic})
after_open_chest_dic.update({"available": get_bool(ws.cell(139, 5).value)})
after_open_chest_dic.update({"days": ws.cell(140, 5).value})
after_open_chest_dic.update({"limit": ws.cell(141, 5).value})
login_dic = {}
A_dic.update({"login": login_dic})
login_dic.update({"available": get_bool(ws.cell(142, 5).value)})
login_dic.update({"days": ws.cell(143, 5).value})
login_dic.update({"limit": ws.cell(144, 5).value})
B_dic = {}
popup_config_ab_dic.update({"B": B_dic})
after_club_upgrade_dic = {}
B_dic.update({"after_club_upgrade": after_club_upgrade_dic})
after_club_upgrade_dic.update({"available": get_bool(ws.cell(145, 5).value)})
after_club_upgrade_dic.update({"days": ws.cell(146, 5).value})
after_club_upgrade_dic.update({"limit": ws.cell(147, 5).value})
after_open_chest_dic = {}
B_dic.update({"after_open_chest": after_open_chest_dic})
after_open_chest_dic.update({"available": get_bool(ws.cell(148, 5).value)})
after_open_chest_dic.update({"days": ws.cell(149, 5).value})
after_open_chest_dic.update({"limit": ws.cell(150, 5).value})
login_dic = {}
B_dic.update({"login": login_dic})
login_dic.update({"available": get_bool(ws.cell(151, 5).value)})
login_dic.update({"days": ws.cell(152, 5).value})
login_dic.update({"limit": ws.cell(153, 5).value})
print("UI配置完成")
# 输出json
with open(json_dir, "w") as json_file:
json_str = json.dumps(container_dic, indent=4)
json_file.write(json_str)
| [
"1446598178@qq.com"
] | 1446598178@qq.com |
593e53c60f0516295d431c9288b9ff7fae2deb8e | 15e586540892790d42a1ecb930f7fc5c7e50328e | /backend/util/__init__.py | 634cd9575dd342fe1d771e4f68d1e140a10adfe0 | [
"Apache-2.0"
] | permissive | felixu1992/testing-platform | f6a73ffcfd839e4d79e0bd5eb1d0c1292bcc5b57 | d7008343c25ec7f47acb670ae5c9b9b5f0593d63 | refs/heads/main | 2023-07-03T08:57:33.272071 | 2021-08-13T12:16:36 | 2021-08-13T12:16:36 | 310,037,662 | 0 | 0 | null | 2020-12-24T06:40:00 | 2020-11-04T15:09:15 | Python | UTF-8 | Python | false | false | 175 | py | from backend.util.jwt_token import Security, UserHolder
from backend.util.resp_data import Response
from backend.util.utils import *
from backend.util.execute import Executor
| [
"xufei_0320@163.com"
] | xufei_0320@163.com |
55cf1f4ce50995a9d07d8447d478a6db0d5bb5be | bb198232df12a1adb9e8a6164ff2a403bf3107cf | /wifi-dump-parser-3/template-parsing.py | 5ebdb33d76793457a8eace4b4066f6e2e0ee8ee9 | [] | no_license | vanello/wifi-arsenal | 9eb79a43dfdd73d3ead1ccd5d2caf9bad9e327ee | 1ca4c5a472687f8f017222893f09a970652e9a51 | refs/heads/master | 2021-01-16T22:00:37.657041 | 2015-09-03T03:40:43 | 2015-09-03T03:40:43 | 42,060,303 | 1 | 0 | null | 2015-09-07T15:24:11 | 2015-09-07T15:24:11 | null | UTF-8 | Python | false | false | 21,786 | py | #Author : Abhinav Narain
#Date : May 6, 2013
#Purpose : To read the binary files with data from BISmark deployment in homes
# Gives the frames: transmitted and received by the Access point in human readable form
# To test the output of the files with the dumps on clients; and understanding the trace
#
# Contention delay
#
import os,sys,re
import gzip
import struct
from header import *
from mac_parser import *
from stats import *
from rate import *
try:
import cPickle as pickle
except ImportError:
import pickle
missing_files=[]
tx_time_data_series=[]
tx_time_mgmt_series=[]
tx_time_ctrl_series=[]
rx_time_data_series=[]
rx_time_mgmt_series=[]
rx_time_ctrl_series=[]
if len(sys.argv) !=5 :
print len(sys.argv)
print "Usage : python station-process.py data/<data.gz> mgmt/<mgmt.gz> ctrl/<ctrl.gz> <outputfile> "
sys.exit(1)
#compare regular expression for filenameif argv[1] for the lexicographic /time ordering so that we load them in order in the first place
data_f_dir=sys.argv[1]
mgmt_f_dir=sys.argv[2]
ctrl_f_dir=sys.argv[3]
output_type=sys.argv[4]
data_fs=os.listdir(data_f_dir)
ctrl_fs=os.listdir(ctrl_f_dir)
data_file_header_byte_count=0
ctrl_file_header_byte_count=0
mgmt_file_header_byte_count=0
file_counter=0
file_timestamp=0
filename_list=[]
unix_time=set()
for data_f_n in data_fs :
filename_list.append(data_f_n.split('-'))
unix_time.add(int(data_f_n.split('-')[1]))
if not (data_f_n.split('-')[2]=='d'):
print "its not a data file ; skip "
continue
filename_list.sort(key=lambda x : int(x[3]))
filename_list.sort(key=lambda x : int(x[1]))
for data_f_name_list in filename_list : #data_fs :
data_f_name="-".join(data_f_name_list)
data_f= gzip.open(data_f_dir+data_f_name,'rb')
data_file_content=data_f.read()
data_f.close()
data_file_current_timestamp=0
data_file_seq_n=0
bismark_id_data_file=0
start_64_timestamp_data_file=0
for i in xrange(len(data_file_content )):
if data_file_content[i]=='\n':
bismark_data_file_header = str(data_file_content[0:i])
ents= bismark_data_file_header.split(' ')
bismark_id_data_file=ents[0]
start_64_timestamp_data_file= int(ents[1])
data_file_seq_no= int(ents[2])
data_file_current_timestamp=int(ents[3])
data_file_header_byte_count =i
break
data_contents=data_file_content.split('\n----\n')
header_and_correct_data_frames = data_contents[0]
err_data_frames = data_contents[1]
correct_data_frames_missed=data_contents[2]
err_data_frames_missed=data_contents[3]
ctrl_f_name = data_f_name
ctrl_f_name =re.sub("-d-","-c-",ctrl_f_name)
try :
ctrl_f= gzip.open(ctrl_f_dir+ctrl_f_name,'rb')
ctrl_file_content=ctrl_f.read()
except :
print "CTRL file not present ", ctrl_f_name
missing_files.append([ctrl_f_name,data_file_current_timestamp])
continue
ctrl_f.close()
mgmt_f_name = data_f_name
mgmt_f_name = re.sub("-d-","-m-",mgmt_f_name)
try :
mgmt_f= gzip.open(mgmt_f_dir+mgmt_f_name,'rb')
mgmt_file_content=mgmt_f.read()
except :
print "MGMT file not present ",mgmt_f_name
missing_files.append([mgmt_f_name,data_file_current_timestamp])
continue
mgmt_f.close()
mgmt_file_current_timestamp=0
mgmt_file_seq_no=0
bismark_id_mgmt_file=0
start_64_timestamp_mgmt_file=0
ctrl_file_current_timestamp=0
ctrl_file_seq_no=0
bismark_id_ctrl_file=0
start_64_timestamp_ctrl_file=0
for i in xrange(len(mgmt_file_content )):
if mgmt_file_content[i]=='\n':
bismark_mgmt_file_header = str(mgmt_file_content[0:i])
ents= bismark_mgmt_file_header.split(' ')
bismark_id_mgmt_file=ents[0]
start_64_timestamp_mgmt_file=int(ents[1])
mgmt_file_seq_no= int(ents[2])
mgmt_file_current_timestamp= int(ents[3])
mgmt_file_header_byte_count =i
break
mgmt_contents=mgmt_file_content.split('\n----\n')
header_and_beacon_mgmt_frames = mgmt_contents[0]
common_mgmt_frames = mgmt_contents[1]
err_mgmt_frames=mgmt_contents[2]
beacon_mgmt_frames_missed=mgmt_contents[3]
common_mgmt_frames_missed=mgmt_contents[4]
err_mgmt_frames_missed=mgmt_contents[5]
for i in xrange(len(ctrl_file_content )):
if ctrl_file_content[i]=='\n':
bismark_ctrl_file_header = str(ctrl_file_content[0:i])
ents= bismark_ctrl_file_header.split(' ')
bismark_id_ctrl_file= ents[0]
start_64_timestamp_ctrl_file= int(ents[1])
ctrl_file_seq_no= int(ents[2])
ctrl_file_current_timestamp=int(ents[3])
ctrl_file_header_byte_count =i
break
ctrl_contents=ctrl_file_content.split('\n----\n')
header_and_correct_ctrl_frames = ctrl_contents[0]
err_ctrl_frames = ctrl_contents[1]
correct_ctrl_frames_missed=ctrl_contents[2]
err_ctrl_frames_missed=ctrl_contents[3]
#done with reading the binary blobs from file ; now check for timestamps are correct
if (not (ctrl_file_current_timestamp == mgmt_file_current_timestamp == data_file_current_timestamp )) :
print "timestamps don't match "
sys.exit(1)
else :
file_timestamp=ctrl_file_current_timestamp
if (not (ctrl_file_seq_no == mgmt_file_seq_no == data_file_seq_no)):
print "sequence number don't match "
sys.exit(1)
if (len(ctrl_contents) != 4 or len(data_contents) != 4 or len(mgmt_contents) !=6) :
print "for ctrl " ,len (ctrl_contents) ,"for data", len(data_contents), "for mgmt", len(mgmt_contents)
print "file is malformed or the order of input folders is wrong "
continue
'''
if (data_file_current_timestamp < t1-1):
continue
if (data_file_current_timestamp >t2-1):
break
print t1, data_file_current_timestamp, t2
'''
#The following code block parses the data file
#print "----------done with missed .. now with actual data "
correct_data_frames=header_and_correct_data_frames[data_file_header_byte_count+1:]
data_index=0
for idx in xrange(0,len(correct_data_frames)-DATA_STRUCT_SIZE ,DATA_STRUCT_SIZE ):
global file_timestamp
frame=correct_data_frames[data_index:data_index+DATA_STRUCT_SIZE]
offset,success,tsf= 8,-1,0
header = frame[:offset]
frame_elem=defaultdict(list)
monitor_elem=defaultdict(list)
(version,pad,radiotap_len,present_flag)=struct.unpack('<BBHI',header)
(success,frame_elem,monitor_elem)=parse_radiotap(frame,radiotap_len,present_flag,offset,monitor_elem,frame_elem)
if success:
for key in frame_elem.keys():
tsf=key
parse_data_frame(frame,radiotap_len,frame_elem)
temp=frame_elem[tsf]
temp.insert(0,tsf)
#print temp
if radiotap_len == RADIOTAP_RX_LEN:
rx_time_data_series.append(temp)
elif radiotap_len ==RADIOTAP_TX_LEN :
tx_time_data_series.append(temp)
else :
print "impossible radiotap len detected ; Report CERN", radiotap_len
else:
print "success denied; correct data frame"
data_index=data_index+DATA_STRUCT_SIZE
del frame_elem
del monitor_elem
'''
data_index=0
for idx in xrange(0,len(err_data_frames)-DATA_ERR_STRUCT_SIZE,DATA_ERR_STRUCT_SIZE ):
frame=err_data_frames[data_index:data_index+DATA_ERR_STRUCT_SIZE]
offset,success,tsf= 8,-1,0
header = frame[:offset]
frame_elem=defaultdict(list)
monitor_elem=defaultdict(list)
(version,pad,radiotap_len,present_flag)=struct.unpack('<BBHI',header)
(success,frame_elem,monitor_elem)=parse_radiotap(frame,radiotap_len,present_flag,offset,monitor_elem,frame_elem)
if success:
for key in frame_elem.keys():
tsf=key
parse_err_data_frame(frame,radiotap_len,frame_elem)
temp=frame_elem[tsf]
temp.insert(0,tsf)
if radiotap_len == RADIOTAP_RX_LEN:
rx_time_data_series.append(temp)
elif radiotap_len ==RADIOTAP_TX_LEN :
tx_time_series.append(temp)
print "wrong err data tx frame "
else :
print "impossible radiotap len detected ; Report CERN"
else :
print "success denied; incorrect data frame"
data_index= data_index+DATA_ERR_STRUCT_SIZE
del frame_elem
del monitor_elem
'''
#The following code block parses the mgmt files
beacon_mgmt_frames=header_and_beacon_mgmt_frames[mgmt_file_header_byte_count+1:]
mgmt_index=0
for idx in xrange(0,len(beacon_mgmt_frames)-MGMT_BEACON_STRUCT_SIZE ,MGMT_BEACON_STRUCT_SIZE ):
global file_timestamp
frame=beacon_mgmt_frames[mgmt_index:mgmt_index+MGMT_BEACON_STRUCT_SIZE]
offset,success,tsf= 8,-1,0
header = frame[:offset]
frame_elem,monitor_elem=defaultdict(list),defaultdict(list)
(version,pad,radiotap_len,present_flag)=struct.unpack('<BBHI',header)
if not( radiotap_len ==RADIOTAP_RX_LEN or radiotap_len == RADIOTAP_TX_LEN) :
print "the radiotap header is not correct "
sys.exit(1)
(success,frame_elem,monitor_elem)=parse_radiotap(frame,radiotap_len,present_flag,offset,monitor_elem,frame_elem)
if success :
for key in frame_elem.keys():
tsf=key
temp=frame_elem[tsf]
temp.insert(0,tsf)
parse_mgmt_beacon_frame(frame,radiotap_len,frame_elem)
if radiotap_len== RADIOTAP_TX_LEN :
tx_time_mgmt_series.append(temp)
print temp
else :
print "beacon success denied"
mgmt_index=mgmt_index+MGMT_BEACON_STRUCT_SIZE
del frame_elem
del monitor_elem
mgmt_index=0
for idx in xrange(0,len(common_mgmt_frames)-MGMT_COMMON_STRUCT_SIZE,MGMT_COMMON_STRUCT_SIZE ):
global file_timestamp
frame=common_mgmt_frames[mgmt_index:mgmt_index+MGMT_COMMON_STRUCT_SIZE]
offset,success,tsf= 8,-1,0
header = frame[:offset]
frame_elem,monitor_elem=defaultdict(list),defaultdict(list)
(version,pad,radiotap_len,present_flag)=struct.unpack('<BBHI',header)
if not( radiotap_len ==RADIOTAP_RX_LEN or radiotap_len == RADIOTAP_TX_LEN) :
print "the radiotap header is not correct "
sys.exit(1)
(success,frame_elem,monitor_elem)=parse_radiotap(frame,radiotap_len,present_flag,offset,monitor_elem,frame_elem)
if success==1 :
for key in frame_elem.keys():
tsf=key
temp=frame_elem[tsf]
temp.insert(0,tsf)
parse_mgmt_common_frame(frame,radiotap_len,frame_elem)
if radiotap_len== RADIOTAP_TX_LEN :
tx_time_mgmt_series.append(temp)
else :
print "common mgmt success denied"
mgmt_index= mgmt_index+MGMT_COMMON_STRUCT_SIZE
del frame_elem
del monitor_elem
'''
mgmt_index=0
for idx in xrange(0,len(err_mgmt_frames)-MGMT_ERR_STRUCT_SIZE,MGMT_ERR_STRUCT_SIZE ):
global file_timestamp
frame=err_mgmt_frames[mgmt_index:mgmt_index+MGMT_ERR_STRUCT_SIZE]
offset,success,tsf= 8,-1,0
header = frame[:offset]
frame_elem,monitor_elem=defaultdict(list),defaultdict(list)
(version,pad,radiotap_len,present_flag)=struct.unpack('<BBHI',header)
if not( radiotap_len ==RADIOTAP_RX_LEN or radiotap_len == RADIOTAP_TX_LEN) :
print "the radiotap header is not correct "
sys.exit(1)
(success,frame_elem,monitor_elem)=parse_radiotap(frame,radiotap_len,present_flag,offset,monitor_elem,frame_elem)
if success==1 :
for key in frame_elem.keys():
tsf=key
temp=frame_elem[tsf]
temp.insert(0,tsf)
parse_mgmt_err_frame(frame,radiotap_len,frame_elem)
if radiotap_len== RADIOTAP_TX_LEN :
tx_time_mgmt_common_series.append(temp)
print "err: mgmt tx frame"
else:
print "success denied"
mgmt_index= mgmt_index+MGMT_ERR_STRUCT_SIZE
del frame_elem
del monitor_elem
'''
#print "----------done with missed .. now with actual ctrl data "
correct_ctrl_frames=header_and_correct_ctrl_frames[ctrl_file_header_byte_count+1:]
ctrl_index=0
for idx in xrange(0,len(correct_ctrl_frames)-CTRL_STRUCT_SIZE ,CTRL_STRUCT_SIZE ):
global file_timestamp
frame=correct_ctrl_frames[ctrl_index:ctrl_index+CTRL_STRUCT_SIZE]
offset,success,tsf= 8,-1,0
header = frame[:offset]
frame_elem, monitor_elem=defaultdict(list),defaultdict(list)
(version,pad,radiotap_len,present_flag)=struct.unpack('<BBHI',header)
if not( radiotap_len ==RADIOTAP_RX_LEN or radiotap_len == RADIOTAP_TX_LEN) :
print "the radiotap header is not correct "
sys.exit(1)
(success,frame_elem,monitor_elem)=parse_radiotap(frame,radiotap_len,present_flag,offset,monitor_elem,frame_elem)
if success :
for key in frame_elem.keys():
tsf=key
parse_ctrl_frame(frame,radiotap_len,frame_elem)
temp=frame_elem[tsf]
temp.insert(0,tsf)
if radiotap_len ==RADIOTAP_TX_LEN :
tx_time_ctrl_series.append(temp)
elif radiotap_len ==RADIOTAP_RX_LEN :
rx_time_ctrl_series.append(temp)
else :
print "ctrl success denied"
ctrl_index=ctrl_index+CTRL_STRUCT_SIZE
del frame_elem
del monitor_elem
'''
ctrl_index=0
for idx in xrange(0,len(err_ctrl_frames)-CTRL_ERR_STRUCT_SIZE,CTRL_ERR_STRUCT_SIZE):
global file_timestamp
frame=err_ctrl_frames[ctrl_index:ctrl_index+CTRL_ERR_STRUCT_SIZE]
offset,success,tsf= 8,-1,0
header = frame[:offset]
frame_elem,monitor_elem=defaultdict(list),defaultdict(list)
(version,pad,radiotap_len,present_flag)=struct.unpack('<BBHI',header)
if not( radiotap_len ==RADIOTAP_RX_LEN or radiotap_len == RADIOTAP_TX_LEN) :
print "the radiotap header is not correct "
sys.exit(1)
(success,frame_elem,monitor_elem)=parse_radiotap(frame,radiotap_len,present_flag,offset,monitor_elem,frame_elem)
if success ==1:
for key in frame_elem.keys():
tsf=key
parse_ctrl_err_frame(frame,radiotap_len,frame_elem)
temp=frame_elem[tsf]
temp.insert(0,tsf)
if radiotap_len == RADIOTAP_RX_LEN:
rx_time_ctrl_series.append(temp)
elif radiotap_len ==RADIOTAP_TX_LEN :
tx_time_ctrl_series.append(temp)
print "wrong: ctrl frame "
else :
print "success denied"
ctrl_index= ctrl_index+CTRL_ERR_STRUCT_SIZE
del frame_elem
del monitor_elem
'''
file_counter +=1
if file_counter %10 == 0:
print file_counter
print "now processing the files to calculate time "
tx_time_data_series.sort(key=lambda x:x[0])
tx_time_ctrl_series.sort(key=lambda x:x[0])
tx_time_mgmt_series.sort(key=lambda x:x[0])
mgmt_zero_queue_tx=[]
mgmt_retx_count=[]
ctrl_zero_tx=[]
ctrl_retx_count=[]
Station_list=list(Station)
Station_tx_retx_count = defaultdict(list)
for i in range(0,len(tx_time_ctrl_series)):
frame=tx_time_ctrl_series[i]
c_frame_tx_flags_radiotap=frame[1]
c_frame_retx=frame[2]
c_frame_mpdu_queue_size=frame[5]
c_frame_retx=frame[2]
if c_frame_tx_flags_radiotap[0]==0 :
ctrl_retx_count.append(c_frame_retx)
if c_frame_mpdu_queue_size ==0 and c_frame_retx==0 :
ctrl_zero_tx.append()
for i in range(0,len(tx_time_mgmt_series)):
frame=tx_time_mgmt_series[i]
c_frame_tx_flags_radiotap=frame[1]
c_frame_retx=frame[2]
c_frame_mpdu_queue_size=frame[5]
c_frame_retx=frame[2]
c_frame_total_time=frame[4]
if c_frame_tx_flags_radiotap[0]==0 :
ctrl_retx_count.append(c_frame_retx)
if c_frame_mpdu_queue_size ==0 and c_frame_retx==0 :
ctrl_zero_tx.append(c_frame_total_time)
print "in tx looping "
Station_tx_series=defaultdict(list)
for j in range(0,len(Station_list)):
for i in range(0,len(tx_time_data_series)):
frame = tx_time_data_series[i]
if frame[11]==Station_list[j] :
width, half_gi, shortPreamble= 0,0,0
phy,kbps=frame[7],temp[0]*500
prop_time=(frame[-1]*8.0 *1000000)/ (frame[3] *1000000) #frame[-1] is the size of frame in bytes
temp= frame[10]
abg_rix,pktlen= temp[0], frame[17]
airtime,curChan=-1,-23
if abg_rix == -1 :
n_rix=temp[-1][0]
airtime=ath_pkt_duration(n_rix, pktlen, width, half_gi,shortPreamble)
else :
airtime= -1 #ath9k_hw_computetxtime(phy,kbps,pktlen,abg_rix,shortPreamble,curChan)
Station_tx_series[frame[11]].append([frame[0],frame[1],frame[2],frame[3],frame[4],frame[5],frame[6],frame[8],frame[7],frame[9],frame[13],frame[14],frame[15],frame[16],frame[17],airtime,prop_time])
# 0 ,1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11
#time [0],txflags[1],retx[2],success_rate[3],total_time[4],Q len [5],A-Q len [6], Q-no[7],phy_type[8],retx_rate_list[9],seq no[13],fragment no[14],mac-layer-flags[15], frame-prop-type[16], framesize[17],prop time,temp
# 12 ,13 ,14 ,16, 17
print"format:tsf, txflags, retx, successful bitrate, total time,Qlen,AMPDU-Q len,Q no, phy-type,retx rate list,seq no, frag no, mac-layer flags, frame prop type,frame size, frame-prop time"
for j in Station_tx_series.keys():
#j is the station name
print "TX Station :", j
list_of_frames= Station_tx_series[j]
for i in range(1,len(list_of_frames)):
frame=list_of_frames[i]
''' #used when looking for consecutive frames
previous_frame=list_of_frames[i-1]
c_frame_departure=frame[0]
p_frame_departure=previous_frame[0]
c_frame_total_time=frame[4]
p_frame_total_time=previous_frame[4]
c_frame_mpdu_queue_size=frame[5]
p_frame_mpdu_queue_size=previous_frame[5]
c_frame_ampdu_queue_size=frame[6]
p_frame_ampdu_queue_size=previous_frame[6]
c_frame_queue_no=frame[7]
p_frame_queue_no=previous_frame[7]
c_frame_phy_flag=frame[8]
p_frame_phy_flag=previous_frame[8]
c_frame_seq_no=frame[10]
p_frame_seq_no=previous_frame[10]
c_frame_frag_no=frame[11]
p_frame_frag_no=previous_frame[11]
c_frame_size= frame[-1]
p_frame_size= previous_frame[-1]
c_frame_tx_flags=frame[1]
'''
c_tx_flags_radiotap=frame[1]
#if c_tx_flags_radiotap[0]==0 :
c_frame_mpdu_queue_size= frame[5]
c_frame_retx= frame[2]
if c_frame_mpdu_queue_size ==0 and c_frame_retx==0 :
print frame
# 0 ,1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11
#time [0],txflags[1],retx[2],success_rate[3],total_time[4],Q len [5],A-Q len [6], Q-no[7],phy_type[8],retx_rate_list[9],seq no[12],fragment no[13],mac-layer-flags[14], frame-prop-type[15], framesize[16],prop time
# 12 ,13 ,14 ,16
print "done with a station "
print "in rx_looping "
Station_rx_series=defaultdict(list)
print "RECIVED FRAMES "
print "format : time,flags,freq, rx_flags,success rate, rx_queue_time,framesize , signal,RSSI, seq number,frag no,retry frame,prop time"
for i in range(0,len(rx_time_data_series)):
frame = rx_time_data_series[i]
for i in range(0,len(Station_list)):
if frame[12]==Station_list[i] :
prop_time=(frame[10]*8.0 *1000000)/ (frame[8] *1000000)
Station_rx_series[frame[12]].append([frame[0],frame[1],frame[2],frame[7],frame[8],frame[9] ,frame[10],frame[4],frame[11],frame[14],frame[15],frame[16][1],prop_time])
#print frame[12],frame[0],frame[1],frame[2],frame[7],frame[8],frame[9],frame[10],frame[4],frame[11],frame[14],frame[15],frame[16][1],prop_time
#time [0],flags[1],freq[2], rx_flags[7],success rate [8], rx_queue_time[9],framesize [10], signal [4],RSSI [11], seq number [14], fragment no [15],retry frame [16][1],prop time
'''
for j in Station_rx_series.keys():
list_of_frames= Station_rx_series[j]
print "RX Station ",j
for i in range(1,len(list_of_frames)):
frame= list_of_frames[i]
print frame
'''
for i in range(0,len(missing_files)):
print missing_files[i]
print "number of files that can't be located ", len(missing_files)
| [
"oleg.kupreev@gmail.com"
] | oleg.kupreev@gmail.com |
f2e7f0e94bba710d8fdae5692b1f3256e1ae55d1 | 0bfb55b41282803db96b90e7bba73d86be7e8553 | /submissions/migrations/0002_auto_20161028_0540.py | cd60356ef018a13dc5711524a56d9a60a4a3a77a | [
"MIT"
] | permissive | OpenFurry/honeycomb | eebf2272f8ae95eb686ad129555dbebcf1adcd63 | c34eeaf22048948fedcae860db7c25d41b51ff48 | refs/heads/master | 2021-01-11T01:52:40.978564 | 2016-12-29T18:08:38 | 2016-12-29T18:08:38 | 70,649,821 | 2 | 2 | null | 2016-12-29T18:08:39 | 2016-10-12T01:22:38 | Python | UTF-8 | Python | false | false | 2,187 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-28 05:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('usermgmt', '0001_initial'),
('submissions', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='submission',
name='allowed_groups',
field=models.ManyToManyField(blank=True, to='usermgmt.FriendGroup'),
),
migrations.AddField(
model_name='submission',
name='folders',
field=models.ManyToManyField(blank=True, through='submissions.FolderItem', to='submissions.Folder'),
),
migrations.AddField(
model_name='submission',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='folderitem',
name='folder',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.Folder'),
),
migrations.AddField(
model_name='folderitem',
name='submission',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.Submission'),
),
migrations.AddField(
model_name='folder',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='folder',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='submissions.Folder'),
),
migrations.AddField(
model_name='folder',
name='submissions',
field=models.ManyToManyField(through='submissions.FolderItem', to='submissions.Submission'),
),
]
| [
"madison.scott-clary@canonical.com"
] | madison.scott-clary@canonical.com |
510abea419d84515b47f26c995b7cd11ec16b305 | d105e84f7c5546bd2a0cc3d73f8660acb35436e1 | /dbsn_gray/gray_kd_test.py | 55b87c187124b1811ff359fcea3515b1d91f97dd | [] | no_license | lvjj18/DBSN | 3ccf1947e313fbd319df8838be3b85e7a983caf6 | 4cf51b0c3b9ba7b0d3b2ab9e881a73d51058451c | refs/heads/master | 2022-12-12T00:31:19.319266 | 2020-08-31T11:53:28 | 2020-08-31T11:53:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,921 | py | #
import os
import random
import datetime
import time
from pathlib import Path
import math
from scipy.optimize import curve_fit
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from data import create_dataset
from gray_kd_options import opt
from net.backbone_net import DBSN_Model
from net.sigma_net import Sigma_mu_Net, Sigma_n_Net
from net.mwcnn_gray import MWCNN
from util.utils import batch_psnr,findLastCheckpoint
seed=0
random.seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if seed == 0:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main(args):
# set val set
val_setname = args.valset
dataset_val = create_dataset(val_setname, 'val', args).load_data()
# net architecture
dbsn_net = DBSN_Model(in_ch = args.input_channel,
out_ch = args.output_channel,
mid_ch = args.middle_channel,
blindspot_conv_type = args.blindspot_conv_type,
blindspot_conv_bias = args.blindspot_conv_bias,
br1_block_num = args.br1_block_num,
br1_blindspot_conv_ks =args.br1_blindspot_conv_ks,
br2_block_num = args.br2_block_num,
br2_blindspot_conv_ks = args.br2_blindspot_conv_ks,
activate_fun = args.activate_fun)
sigma_mu_net = Sigma_mu_Net(in_ch=args.middle_channel,
out_ch=args.sigma_mu_output_channel,
mid_ch=args.sigma_mu_middle_channel,
layers=args.sigma_mu_layers,
kernel_size=args.sigma_mu_kernel_size,
bias=args.sigma_mu_bias)
sigma_n_net = Sigma_n_Net(in_ch=args.sigma_n_input_channel,
out_ch=args.sigma_n_output_channel,
mid_ch=args.sigma_n_middle_channel,
layers=args.sigma_n_layers,
kernel_size=args.sigma_n_kernel_size,
bias=args.sigma_n_bias)
cnn_denoiser_net = MWCNN()
# Move to GPU
dbsn_model = nn.DataParallel(dbsn_net, args.device_ids).cuda()
sigma_mu_model = nn.DataParallel(sigma_mu_net, args.device_ids).cuda()
sigma_n_model = nn.DataParallel(sigma_n_net, args.device_ids).cuda()
cnn_denoiser_model = cnn_denoiser_net.cuda()
tmp_ckpt=torch.load(args.last_ckpt,map_location=torch.device('cuda', args.device_ids[0]))
training_params = tmp_ckpt['training_params']
start_epoch = training_params['start_epoch']
# Initialize dbsn_model
pretrained_dict=tmp_ckpt['state_dict_dbsn']
model_dict=dbsn_model.state_dict()
pretrained_dict_update = {k: v for k, v in pretrained_dict.items() if k in model_dict}
assert(len(pretrained_dict)==len(pretrained_dict_update))
assert(len(pretrained_dict_update)==len(model_dict))
model_dict.update(pretrained_dict_update)
dbsn_model.load_state_dict(model_dict)
# Initialize sigma_mu_model
pretrained_dict=tmp_ckpt['state_dict_sigma_mu']
model_dict=sigma_mu_model.state_dict()
pretrained_dict_update = {k: v for k, v in pretrained_dict.items() if k in model_dict}
assert(len(pretrained_dict)==len(pretrained_dict_update))
assert(len(pretrained_dict_update)==len(model_dict))
model_dict.update(pretrained_dict_update)
sigma_mu_model.load_state_dict(model_dict)
# Initialize sigma_n_model
pretrained_dict=tmp_ckpt['state_dict_sigma_n']
model_dict=sigma_n_model.state_dict()
pretrained_dict_update = {k: v for k, v in pretrained_dict.items() if k in model_dict}
assert(len(pretrained_dict)==len(pretrained_dict_update))
assert(len(pretrained_dict_update)==len(model_dict))
model_dict.update(pretrained_dict_update)
sigma_n_model.load_state_dict(model_dict)
# Initialize cnn_denoiser_model
pretrained_dict=tmp_ckpt['state_dict_cnn_denoiser']
model_dict=cnn_denoiser_model.state_dict()
pretrained_dict_update = {k: v for k, v in pretrained_dict.items() if k in model_dict}
assert(len(pretrained_dict)==len(pretrained_dict_update))
assert(len(pretrained_dict_update)==len(model_dict))
model_dict.update(pretrained_dict_update)
cnn_denoiser_model.load_state_dict(model_dict)
# --------------------------------------------
# Evaluation
# --------------------------------------------
print("Evaluating on "+str(val_setname[0]))
dbsn_model.eval()
sigma_mu_model.eval()
sigma_n_model.eval()
cnn_denoiser_model.eval()
with torch.no_grad():
psnr_val = 0
psnr_val_dbsn = 0
psnr_val_cnn_denoiser = 0
for count, data in enumerate(dataset_val):
# input
img_val = data['clean'].cuda()
img_noise_val = data['noisy'].cuda()
# crop the input (8x MWCNN)
img_val = img_val[:,:,:-1,:-1]
img_noise_val = img_noise_val[:,:,:-1,:-1]
_,C,H,W = img_noise_val.shape
# forward
cnn_denoiser_out_val = cnn_denoiser_model(img_noise_val)
mu_out_val, mid_out_val = dbsn_model(img_noise_val)
sigma_mu_out_val = sigma_mu_model(mid_out_val)
#
sigma_mu_val = sigma_mu_out_val ** 2
if args.noise_type == 'gaussian':
sigma_n_out_val = sigma_n_model(img_noise_val)
sigma_n_out_val = sigma_n_out_val.mean(dim=(2,3), keepdim=True).repeat(1,1,H,W)
else:
sigma_n_out_val = sigma_n_model(mu_out_val)
noise_est_val = F.softplus(sigma_n_out_val - 4) + (1e-3)
sigma_n_val = noise_est_val ** 2
map_out_val = (img_noise_val * sigma_mu_val + mu_out_val * sigma_n_val) / (sigma_mu_val + sigma_n_val)
# compute PSNR
psnr_mu = batch_psnr(mu_out_val.clamp(0., 1.), img_val.clamp(0., 1.), 1.)
psnr_val+=psnr_mu
psnr_dbsn = batch_psnr(map_out_val.clamp(0., 1.), img_val.clamp(0., 1.), 1.)
psnr_val_dbsn+=psnr_dbsn
psnr_cnn_denoiser = batch_psnr(cnn_denoiser_out_val.clamp(0., 1.), img_val.clamp(0., 1.), 1.)
psnr_val_cnn_denoiser+=psnr_cnn_denoiser
# print
print("Image[%d]: psnr_mu=%.4f, psnr_dbsn=%.4f, psnr_cnn_denoiser=%.4f " % (count, psnr_mu, psnr_dbsn, psnr_cnn_denoiser))
psnr_val /= len(dataset_val)
psnr_val_dbsn /= len(dataset_val)
psnr_val_cnn_denoiser /= len(dataset_val)
# print
print("VAL avg psnr_mu: %.4f, avg psnr_dbsn: %.4f, avg psnr_cnn_denoiser: %.4f \n" % (psnr_val, psnr_val_dbsn, psnr_val_cnn_denoiser))
if __name__ == "__main__":
main(opt)
exit(0)
| [
"xhwu.cpsl.hit@gmail.com"
] | xhwu.cpsl.hit@gmail.com |
ee3dbda8b19a10b1e5348fd84e2fbaa94ac30ee0 | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/properties/minPressureSetpoint.py | 37a8d2471523d2fe28bceff3606f5ef910265dfe | [] | no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | """minPressureSetpoint standard property type, originally defined in resource
file set standard 00:00:00:00:00:00:00:00-0."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.datapoints.press
from pylon.resources.standard import standard
class minPressureSetpoint(pylon.resources.datapoints.press.press):
"""minPressureSetpoint standard property type. Minimum pressure.
Setpoint for the operational low pressure limit."""
def __init__(self):
super().__init__(
)
self._default_bytes = b'\x00\x00'
self._original_name = 'SCPTminPressureSetpoint'
self._property_scope, self._property_key = 0, 234
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = minPressureSetpoint()
pass
| [
"lcoppa@rocketmail.com"
] | lcoppa@rocketmail.com |
2a97d4fde1b262d7d7571c5622491d16841bed3f | 313bb88c43d74995e7426f9482c6c8e670fdb63c | /07-modules/example6_module.py | 8926b868c6f45aa8be74c33928f9bfcea9bd86be | [] | no_license | martakedzior/python-course | 8e93fcea3e9e1cb51920cb1fcf3ffbb310d1d654 | 3af2296c2092023d91ef5ff3b4ef9ea27ec2f227 | refs/heads/main | 2023-05-06T07:26:58.452520 | 2021-05-26T16:50:26 | 2021-05-26T16:50:26 | 339,822,876 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import example6_module
import shapes
if __name__ == '__ main__':
print(shapes2.rectangle_aera(3, 4))
else:
print('Jestem teraz modulem')
print('wartość zmiennej __name__:', __name__)
print('moduł - wartość zmiennej __name__:', __name__) | [
"marta.kedzior@wp.pl"
] | marta.kedzior@wp.pl |
84fc8575d753c1931e2f5b8c06106cebe611a00b | de5d6ddaf21b60608294fd7d434850631664051e | /gobou/bin/django-admin.py | cde98064ddc44372a8aab2665d97d9044f9ff72b | [] | no_license | okumura-shinij/my-first-blog | ea9a80d19382bd6d3a5b062476b3399d6b866d03 | 782e0d2ef1a6a8219e646101ee33e26c13fd136d | refs/heads/master | 2022-11-27T19:42:14.091497 | 2020-07-30T15:40:14 | 2020-07-30T15:40:14 | 283,203,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | #!/Users/okumurashinji/django/gobou/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"okumura.womb@gmail.com"
] | okumura.womb@gmail.com |
6f8f5bf67bebaee6514119e5b95fa511517820e1 | 5b3beb37c5285caccde895e0c6c8c091bb8c4a29 | /app/app.py | 01518818fdc6b1a3e9c75d7575d4e0bc3c1ee744 | [] | no_license | igaurab/align_image_flask_server | 51ead5844afd4552b46c2cd27d0be26b53c84ddd | 14a9f281eb39662e6392813593980fc07236dcec | refs/heads/master | 2023-04-25T11:10:00.990589 | 2021-05-03T03:42:11 | 2021-05-03T03:42:11 | 363,807,532 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | from flask import Flask, jsonify, request, send_from_directory
from align_image import align_image
import cv2
from PIL import Image as Image
import numpy as np
import os
app = Flask(__name__)
UPLOAD_FOLDER = os.path.join(app.root_path, 'output/')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/uploads/<path:filename>', methods=['GET', 'POST'])
def download(filename):
uploads = UPLOAD_FOLDER
return send_from_directory(directory=uploads, filename=filename)
@app.route('/')
@app.route('/uploads')
def upload_image():
return """
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post action="/alignment" enctype=multipart/form-data>
<input type=file name=image>
<input type=submit value=Upload>
</form>
"""
@app.route('/alignment', methods=['POST'])
def alignment():
file = request.files['image']
npimg = np.frombuffer(file.read() , np.uint8)
image = cv2.imdecode(npimg,cv2.IMREAD_COLOR)
result = align_image(image)
image_name = file.filename
result.save(filename=app.config['UPLOAD_FOLDER'] + 'output-' + image_name)
download_url = f"http://localhost:5000/uploads/output-{image_name}"
return jsonify({'data': download_url})
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True) | [
"gaurab.panthee@docsumo.com"
] | gaurab.panthee@docsumo.com |
7af99e7d5fd22934c2b33436ca2f37d167428b35 | 441c216de6c2ca3f1e9f80317e950ca4fc2fc27d | /hackerrank/solutions/fibonacci_modified.py | 880c1e1eac2c77d4ff7d1f988517ec7d4b1dcfd4 | [] | no_license | briansu2004/Unscience-Computer-Hackerrank | 53bc926ff68500b7df06672ac075bf19d5a7537b | 23e2a7239d110d51af4f9e093bd36dfd52d38943 | refs/heads/master | 2023-03-18T07:45:07.296229 | 2020-06-15T00:21:08 | 2020-06-15T00:21:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | #!/bin/python3
#
# Problem Author: PRASHANTB1984 @ hackerrank
# Difficulty: Medium
# link: https://www.hackerrank.com/challenges/fibonacci-modified/problem
#
# Solution
# Author: Byung Il Choi (choi@byung.org)
#
import math
import os
import random
import re
import sys
# Complete the fibonacciModified function below.
def fibonacciModified(t1, t2, n):
if n == 1:
return t1
if n == 2 :
return t2
t_i = 0
for i in range(3, n+1):
t_i = t1 + t2 ** 2
t1 = t2
t2 = ti
return t_i
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t1T2n = input().split()
t1 = int(t1T2n[0])
t2 = int(t1T2n[1])
n = int(t1T2n[2])
result = fibonacciModified(t1, t2, n)
fptr.write(str(result) + '\n')
fptr.close()
| [
"choi@byung.org"
] | choi@byung.org |
b77adeb1ec4a34291041ac77541134a2b7debf75 | 5abb082407832749f418c803d875b1798a457be6 | /processing/cluster.py | 8cf97e283b6e0a117ebba5ad784592f566a2a773 | [] | no_license | darkflake/ASDAS | 63b4593d77d09132a7365518a1e10db3a40e1c75 | 9dbb63c7f0279270dcd321b7a4456ec55f70bf6f | refs/heads/master | 2022-05-26T11:26:26.497155 | 2020-04-27T15:58:35 | 2020-04-27T15:58:35 | 259,531,939 | 0 | 0 | null | 2020-04-28T04:34:55 | 2020-04-28T04:34:55 | null | UTF-8 | Python | false | false | 2,934 | py | from tslearn.metrics import dtw
from tslearn.barycenters import dtw_barycenter_averaging
class Cluster:
def __init__(self, data_count: int, data_points: list, instance: int):
self.center = []
self.count = data_count
self.data_points = data_points
self.instance = instance
self.cluster_center = self.compute_center()
self.intracluster = self.center_distance()
# self.threshold = self.calculate_threshold()
self.intercluster = []
self.silhouette_list = []
def compute_center(self):
r"""
Perform DTW-Barycenter Averaging Algorithm and get the centroid of the cluster.
:return: Cluster Center curve
"""
return dtw_barycenter_averaging(self.data_points, metric_params={"global_constraint": "sakoe_chiba", "sakoe_chiba_radius": 3},
max_iter=3).tolist()
def center_distance(self):
r"""
Get DTW distance between all the points of the cluster from the centroid of the cluster.
:return: list of distances of all cluster data points
"""
center_distances = []
for point in range(self.count):
distance = dtw(self.data_points[point], self.cluster_center,
global_constraint="sakoe_chiba", sakoe_chiba_radius=3)
center_distances.append(distance)
return center_distances
def compute_silhouettes(self):
r"""
Calculate the silhouette score for every point. Uses the formula : (B-A)/max(B, A).
Where, B = intercluster distance. (distance from nearest cluster centroid other than the one it belongs to.)
A = intracluster distance. (distance from its cluster centroid.)
:return: list of silhouette scores for all the data points
"""
silhouettes = []
for data_index in range(self.count):
silhouette_score = (self.intercluster[data_index] - self.intracluster[data_index]) / \
max(self.intercluster[data_index], self.intracluster[data_index])
silhouettes.append(silhouette_score)
self.silhouette_list = silhouettes
def calculate_threshold(self):
r"""
Compute the 90 percentile threshold value for the distance from the cluster centroid.
(Threshold value to classify a test data point into this cluster)
:return: single number between the min and max of distances list
"""
if self.count > 1:
distances_list = self.intracluster.copy()
distances_list = sorted(distances_list)
index = 0.9 * (len(distances_list) + 1)
threshold = (index % 1) * (
distances_list[int(index + 1)] - distances_list[int(index)]) + distances_list[int(index)]
else:
threshold = self.intracluster[0]
return threshold
| [
"sudhanshu.1.k@gmail.com"
] | sudhanshu.1.k@gmail.com |
0a5eb3316e0630c70e252f7cdd69aefd417c9da0 | 84e952d130dc2fc417caab8f504fcd245fde3d12 | /cnn/activators.py | 68de7879161fd3085591c49588861e8cd0b4087f | [] | no_license | ErZhouEr/mnist | 974b95cdbe25d5fff96558739ebb8fa9fcb6a8e2 | 2f247526098f2c5e316cef89bdbd0aba12d0cfee | refs/heads/master | 2021-01-02T02:54:11.322613 | 2020-03-30T10:00:39 | 2020-03-30T10:00:39 | 239,457,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | #!usr/bin/env python3
# -*- coding:utf-8 -*-
"""
@author:ZHOUCE
@file: activators.py
@time: 2020/03/10
"""
import numpy as np
class ReluActivator(object):
@staticmethod
def ori_func(x):
return max(0,x)
@staticmethod
def d_func(y):
return 1 if y>0 else 0
class SigmodActivator(object):
@staticmethod
def ori_func(x):
return 1/(1+np.exp(-x))
@staticmethod
def d_func(y):
return y*(1-y)
class IdentityActivator(object):
@staticmethod
def ori_func(x):
return x
@staticmethod
def d_func(y):
return 1
class TanhActivator(object):
@staticmethod
def ori_func(x):
return
@staticmethod
def d_func(y):
return 1-y*y | [
"917846003@qq.com"
] | 917846003@qq.com |
b9c0c5a09024bde9c8a11e132d1b4bd3dc3b41f2 | b5025befdf74fff3071252abaa4db09479f2d763 | /Govind_Gopal/tinkter/std_cl.py | 411265e5cbbd962b9aab9b0d3fe6350c6dee4796 | [] | no_license | sidv/Assignments | d2fcc643a2963627afd748ff4d690907f01f71d8 | d50d668264e2a31581ce3c0544f9b13de18da2b3 | refs/heads/main | 2023-07-30T02:17:19.392164 | 2021-09-23T04:47:56 | 2021-09-23T04:47:56 | 392,696,356 | 1 | 20 | null | 2021-09-23T08:14:11 | 2021-08-04T13:20:43 | Python | UTF-8 | Python | false | false | 166 | py | class Students:
def __init__(self,st_id = 0, name = "", age = 0, st_class = ""):
self.st_id = st_id
self.name = name
self.age = age
self.st_class = st_class
| [
"govind@gmail.com"
] | govind@gmail.com |
14b78e042a17e9782e6aabb2c3817989c186c63e | 7defbcf4dd1b82d1c98b4a7b82f627269fec38e3 | /ExponentialDecay.py | f37194d62c0921f73554744feab4c0f47d1653da | [] | no_license | ZhangLeiCharles/abo_py | 6798b109968483792fcdd953f184e8f69b126efc | 57c3290b25113dc783088a59cd5a4685f6ba145c | refs/heads/master | 2020-09-24T14:42:10.071120 | 2019-08-21T19:28:37 | 2019-08-21T19:28:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py | from GPy.kern import Kern
import numpy as np
class ExponentialDecay(Kern):
def __init__(self,input_dim,variance=1.,lengthscale=1.,power=1.,active_dims=None):
super(ExponentialDecay, self).__init__(input_dim, active_dims, 'exponential_decay')
assert input_dim == 1, "For this kernel we assume input_dim=1"
self.variance = Param('variance', variance)
self.lengthscale = Param('lengtscale', lengthscale)
self.power = Param('power', power)
self.add_parameters(self.variance, self.lengthscale, self.power)
def K(self,X,X2):
if X2 is None:
X2 = X
dist2 = np.square((X-X2.T)/self.lengthscale)
return self.variance*(1 + dist2/2.)**(-self.power)
def Kdiag(self,X):
return self.variance*np.ones(X.shape[0])
def update_gradients_full(self, dL_dK, X, X2):
if X2 is None:
X2 = X
dist2 = np.square((X-X2.T)/self.lengthscale)
dvar = (1 + dist2/2.)**(-self.power)
dl = self.power * self.variance * dist2 * self.lengthscale**(-3) * (1 + dist2/2./self.power)**(-self.power-1)
dp = - self.variance * np.log(1 + dist2/2.) * (1 + dist2/2.)**(-self.power)
self.variance.gradient = np.sum(dvar*dL_dK)
self.lengthscale.gradient = np.sum(dl*dL_dK)
self.power.gradient = np.sum(dp*dL_dK)
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.sum(dL_dKdiag)
# here self.lengthscale and self.power have no influence on Kdiag so target[1:] are unchanged
def gradients_X(self,dL_dK,X,X2):
"""derivative of the covariance matrix with respect to X."""
if X2 is None:
X2 = X
dist2 = np.square((X-X2.T)/self.lengthscale)
dX = -self.variance*self.power * (X-X2.T)/self.lengthscale**2 * (1 + dist2/2./self.lengthscale)**(-self.power-1)
return np.sum(dL_dK*dX,1)[:,None]
def gradients_X_diag(self,dL_dKdiag,X):
# no diagonal gradients
pass
| [
"fmnyikosa@gmail.com"
] | fmnyikosa@gmail.com |
2af4c73e584fc0be42678e2c99f8a8a70638146a | 4ce4902847b2d8d1c92daf89b35ca8aaff141090 | /Data/Alignment_etc_for_ExaML/examl_constraint_only_ml.py | 75c35c29258cf08487eebc8afa37f78b674b69a9 | [] | no_license | mpnelsen/Lecanoromycetes_megaphylogeny | 0e672615e0d74745a72865d056619c9056b1f45b | 343e0abefdb4853858be3ae680850823818f2ec0 | refs/heads/master | 2022-12-02T01:26:37.628073 | 2020-08-07T17:52:54 | 2020-08-07T17:52:54 | 282,992,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | import random
import subprocess
import os
#os.mkdir('~/path...')
#file path and name (original alignment)
f="concat.phy"
cons="family_constraints.tre"
#number of bootstrap replicates
#n=100
#number of processors to use
proc=10
#original data set - make binary, get starting tree, find ml tree
#subprocess.call(args="~/ExaML/parser/parser -s {0} -q 19may2014_concat_partfile.txt -m DNA -n forexaml".format(f), shell=True);
#y=random.randint(1,1000000)
#subprocess.call(args="~/standard-RAxML/raxmlHPC-PTHREADS-SSE3 -T {0} -y -m GTRCAT -q 19may2014_concat_partfile.txt -s {1} -n MLparstart.tre -p {2}".format(proc, f, y), shell=True);
#subprocess.call(args="mpirun.openmpi -np {0} ~/ExaML/examl/examl -s forexaml.binary -D -m PSR -t RAxML_parsimonyTree.MLparstart.tre -n ML_TREE".format(proc), shell=True);
#now...for constraint...
subprocess.call(args="~/ExaML/parser/parser -s {0} -q 19may2014_concat_partfile.txt -m DNA -n forexaml_family_cons1".format(f), shell=True);
y=random.randint(1,1000000)
subprocess.call(args="mpirun.openmpi -np {0} ~/ExaML/examl/examl -s forexaml_family_cons1.binary -D -m PSR -g {1} -p {2} -n subclass_family_constraint_ML_TREE".format(proc,cons,y), shell=True);
| [
"mpnelsen@gmail.com"
] | mpnelsen@gmail.com |
420c082ad3b9a653211cb49b4e86c1bbf9b0682e | c8542b4b474758c4f0b0e553c5a56e79830a36a9 | /scripts/wof-exportify-repo | 55997157b5140d2831272e2a8c5f155d8c4d207c | [
"BSD-3-Clause"
] | permissive | whosonfirst/py-mapzen-whosonfirst-export | f9b7d569f2135afcbc79ff3cd51f0a4f58606619 | d64fc4a5f6fbe54a35504ad55d8aa41092b6b106 | refs/heads/master | 2022-09-02T08:49:53.205825 | 2020-08-13T17:31:58 | 2020-08-13T17:31:58 | 38,065,450 | 1 | 5 | BSD-3-Clause | 2020-01-31T17:21:01 | 2015-06-25T18:06:06 | Python | UTF-8 | Python | false | false | 1,102 | #!/usr/bin/env python
# -*-python-*-
import os
import sys
import logging
import mapzen.whosonfirst.export
import mapzen.whosonfirst.utils
if __name__ == '__main__':
import optparse
opt_parser = optparse.OptionParser()
opt_parser.add_option('-R', '--repo', dest='repo', action='store', default=None, help='')
opt_parser.add_option('-v', '--verbose', dest='verbose', action='store_true', default=False, help='Be chatty (default is false)')
options, args = opt_parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
repo = os.path.abspath(options.repo)
data = os.path.join(repo, "data")
for path in (repo, data):
if not os.path.exists(path):
logging.error("%s does not exist" % path)
sys.exit(1)
exporter = mapzen.whosonfirst.export.flatfile(data)
crawl = mapzen.whosonfirst.utils.crawl(data, inflate=True)
for feature in crawl:
exporter.export_feature(feature)
sys.exit(0)
| [
"aaron@mapzen.com"
] | aaron@mapzen.com | |
d3341f8f3b280ac55142cb872ec1aff5fa734881 | f140cb021cc0f402ed388a1a991f1e150e7e55df | /Python_DL_Project/Source Code/age_gender_detector.py | fdbd0ac47b4c1fbb29c397ff5e851011b5cc249d | [] | no_license | rupadoppalapudi/Python_DeepLearning-2021Spring | c8183bbaed2acb7878dc5d395eb0b4def26f31cb | 7432dccb9e3158c74e13cf115978a304d8bc4ea2 | refs/heads/main | 2023-04-11T13:51:27.673075 | 2021-05-01T02:01:01 | 2021-05-01T02:01:01 | 332,598,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,712 | py | # Age and Gender detection of a person based on the image by passing it to the machine learning algorithm
# importing required modules
import cv2 as cv
# import time
# extracting face of the person from image
def extract_face(net, image, conf_threshold=0.7):
frame = image.copy()
f_height = frame.shape[0] # frame height
f_width = frame.shape[1] # frame width
# deep neural network library
# blobfromimage method to set scalefactor, size, mean, swapRB, crop, ddepth of the image
blob_img = cv.dnn.blobFromImage(frame, 1.0, (300, 300), [104, 117, 123], True, False)
net.setInput(blob_img)
detections = net.forward()
b_boxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * f_width)
y1 = int(detections[0, 0, i, 4] * f_height)
x2 = int(detections[0, 0, i, 5] * f_width)
y2 = int(detections[0, 0, i, 6] * f_height)
b_boxes.append([x1, y1, x2, y2])
cv.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), int(round(f_height / 150)), 8)
return frame, b_boxes
face_Proto = "models/opencv_face_detector.pbtxt" # protocol buffer
face_Model = "models/opencv_face_detector_uint8.pb"
age_Proto = "models/age_deploy.prototxt" # deploys age model
age_Model = "models/age_net.caffemodel" # defines the the internal states of layer parameters
gender_Proto = "models/gender_deploy.prototxt" # deploys gender model
gender_Model = "models/gender_net.caffemodel" # defines the the internal states of layer parameters
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
# set age and gender category
age_category = ['(0-3)', '(4-7)', '(8-15)', '(16-23)', '(24-33)', '(34-45)', '(46-54)', '(55-100)']
gender_category = ['Male', 'Female']
# loading the network - face, age and gender network
face_network = cv.dnn.readNet(face_Model, face_Proto)
age_network = cv.dnn.readNet(age_Model, age_Proto)
gender_network = cv.dnn.readNet(gender_Model, gender_Proto)
padding = 20
# age and gender detection of the person based on the image
def age_gender_detector(image):
# reading image
# t = time.time()
frame_face, b_boxes = extract_face(face_network, image)
for bbox in b_boxes:
face = image[max(0, bbox[1] - padding):min(bbox[3] + padding, image.shape[0] - 1),
max(0, bbox[0] - padding):min(bbox[2] + padding, image.shape[1] - 1)]
blob = cv.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
gender_network.setInput(blob)
gender_pred = gender_network.forward()
gender = gender_category[gender_pred[0].argmax()]
# Display detected gender of the input image on to console
print("Gender Output: {}, conf = {:f}".format(gender, gender_pred[0].max()))
age_network.setInput(blob)
age_pred = age_network.forward()
age = age_category[age_pred[0].argmax()]
# Display detected age of the input image on to console
print("Age Output : {}".format(age_pred))
print("Age : {}, conf = {:f}".format(age, age_pred[0].max()))
frame_label = "{},{}".format(age, gender)
font = cv.FONT_ITALIC
color = (0, 0, 255)
# putText renders the specified text string in the image
cv.putText(frame_face, frame_label, (bbox[0], bbox[1] - 10), font, 0.8, color, 2,
cv.FILLED)
return frame_face
# displaying the output image along with age and gender indication
input_image = cv.imread("rh.PNG")
output_image = age_gender_detector(input_image)
cv.imshow("image", output_image)
cv.waitKey(0) | [
"70282754+rupadoppalapudi@users.noreply.github.com"
] | 70282754+rupadoppalapudi@users.noreply.github.com |
e32c252e8271fa08227d5821a55cb10ebdfc74ad | e30b1e04fc5471ff3996def08ec58e6c91b7b523 | /migrations/versions/ab1e57abf110_adds_customer_model.py | 0de9deee0b44ad1b696c56c1c9a61906c1330808 | [] | no_license | lisa1501/retro-video-store | bae33ad1357ac271711e627f5ea609ae14cfc7d4 | ea0456007febeeeab41b23e3adae39473213ae74 | refs/heads/master | 2023-08-08T08:01:30.096798 | 2021-05-21T23:07:50 | 2021-05-21T23:07:50 | 368,365,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | """adds customer model
Revision ID: ab1e57abf110
Revises:
Create Date: 2021-05-18 12:47:26.828064
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ab1e57abf110'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('customer',
sa.Column('customer_id', sa.Integer(), nullable=False),
sa.Column('customer_name', sa.String(), nullable=True),
sa.Column('customer_postal_code', sa.String(), nullable=True),
sa.Column('customer_phone', sa.String(), nullable=True),
sa.Column('customer_registered_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('customer_id')
)
op.create_table('video',
sa.Column('video_id', sa.Integer(), nullable=False),
sa.Column('video_title', sa.String(), nullable=True),
sa.Column('video_release_date', sa.DateTime(), nullable=True),
sa.Column('video_total_inventory', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('video_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('video')
op.drop_table('customer')
# ### end Alembic commands ###
| [
"halisasaipulla@gmail.com"
] | halisasaipulla@gmail.com |
6038802b027f5c0482af3d4095a3a38ed01c354b | 90fe944f97b868e38cffcbbe94fa47286d6d1e68 | /setDataset.py | 6cf5a098f1acbf36cff87ba4cf8397c915bca74e | [] | no_license | weichen283/FaceRecognition-pytorch | 8b8f76f36097c8bdc3173ebdd03c202b49a48bce | 1dc8a633302239cf6b3a7595b54e768c68d6603b | refs/heads/main | 2023-07-03T02:21:36.840918 | 2021-08-08T06:44:24 | 2021-08-08T06:44:24 | 393,675,726 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | import sys
import os
import cv2
import dlib
input_dir = './photo_download'
output_dir = './face_photos/other_faces'
size = 64
if not os.path.exists(output_dir):
os.makedirs(output_dir)
detector = dlib.get_frontal_face_detector()
index = 1
for (path, dirnames, filenames) in os.walk(input_dir):
for filename in filenames:
if filename.endswith('.jpg'):
print('Being processed picture %s' % index)
img_path = path+'/'+filename
img = cv2.imread(img_path)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
dets = detector(gray_img, 1)
for i, d in enumerate(dets):
x1 = d.top() if d.top() > 0 else 0
y1 = d.bottom() if d.bottom() > 0 else 0
x2 = d.left() if d.left() > 0 else 0
y2 = d.right() if d.right() > 0 else 0
face = img[x1:y1, x2:y2]
face = cv2.resize(face, (size,size))
cv2.imshow('image', face)
cv2.imwrite(output_dir+'/'+str(index)+'.jpg', face)
index += 1
key = cv2.waitKey(30) & 0xff
if key == 27:
sys.exit(0)
| [
"xwchcooo@gmail.com"
] | xwchcooo@gmail.com |
4c344ba89021d350f8aa299a728e88ef44bb7a78 | b44498e47fb5b9766920af01d6af4890c6dd054a | /python27/1.0/Lib/site-packages/pypm/common/repository.py | e9dd0ff821cf352c994447ea5d8e293f52e576cf | [] | no_license | zhaoxiaohui/xunlei_vip | 4489dde49716640bb58cd2a210bce06e607ea1fb | 465a9d937ce04c784dca14cc60dffecce4041d32 | refs/heads/master | 2020-03-30T06:21:13.865619 | 2015-10-15T05:13:09 | 2015-10-15T05:13:09 | 38,826,640 | 23 | 17 | null | null | null | null | UTF-8 | Python | false | false | 28,875 | py | # Copyright (c) 2010 ActiveState Software Inc. All rights reserved.
"""
pypm.common.repository
~~~~~~~~~~~~~~~~~~~~~~
Code related to repository functions
"""
import os
from os import path as P
import logging
import gzip
from hashlib import md5
from contextlib import closing
from collections import namedtuple
import tempfile
import shutil
import re
import json
from fnmatch import fnmatch
import pkg_resources
import six.moves
from applib import sh
from applib import log
from applib import textui
from applib import _cmdln as cmdln
from applib import _simpledb
# from applib.misc import require_option
from applib.misc import xjoin
from pypm.common import net
from pypm.common import python
from pypm.common import supported
from pypm.common.util import BareDateTime
from pypm.common.util import ConfigParserNamedLists
from pypm.common.util import dlocked
from pypm.common.util import url_join
from pypm.common.util import path_to_url
from pypm.common.net import URLProperties
from pypm.common.package import BinaryPackage
from pypm.common.package import RepoPackage
from pypm.common.package import PackageFile
LOG = logging.getLogger(__name__)
class MultiRepositoryConfig(object):
"""Represent a config file with multiple repositories
see src/pypm/client/client.conf[repository] for an example; also see the
class docstring for pypm.common.util.ConfigParserNamedLists
"""
RepositoryLocation = namedtuple('RepositoryLocation', 'name location')
def __init__(self, *configfiles):
self.cfg = six.moves.configparser.SafeConfigParser()
self.cfg.read(configfiles)
self.namedlists = ConfigParserNamedLists(
self.cfg.items('repository'),
self.RepositoryLocation,
self._is_location)
def get_locations(self, repository_locations):
assert isinstance(repository_locations, str)
locations = []
for l in ConfigParserNamedLists.VALUE_SEP.split(repository_locations):
if self._is_location(l):
locations.append(self.RepositoryLocation('<unnamed>', l))
elif l not in self.namedlists.mapping:
raise ValueError(
'repository name "{0}" is not found; available ones are: {1}'.format(
l, self.namedlists.mapping.keys()))
else:
locations.extend(self.namedlists.mapping[l])
return locations
def get_urls(self, repository_locations):
return [path_to_url(l.location) if '://' not in l.location else l.location
for l in self.get_locations(repository_locations)]
def _is_location(self, l):
"""Return True if `l` is a path or url"""
return '/' in l or '\\' in l
class MultiRepositorySet(object):
"""A group of mutually-exclusive repository sets (``RepositorySet``)
Example::
rex/free
rex/be
rex/testing
rex/stable
Each repository set contains packages that are not in the others. The
pattern specifying the mapping for packages -> repository-set is defined in
a config file (see etc/activestate.conf:[packages]mapping). The same config
file also defines the names ('free', 'be', ..) for the repository sets.
"""
def __init__(self, path, configfile):
"""
- path: base path to the multirepositoryset
- configfile: config file defining repository names and mapping
"""
self.path = path
self.mrc = MultiRepositoryConfig(configfile)
self.reposets = {} # name -> RepositorySet
self.mapping = [] # [(pattern, reponame), ...]
self._init()
def _init(self):
for name, repository_locations in self.mrc.namedlists.mapping.items():
assert len(repository_locations) == 1
repository_location = repository_locations[0]
self.reposets[name] = RepositorySet(
P.join(self.path, name), name, repository_location.location)
for line in self.mrc.cfg.get('packages', 'mapping').split('\n'):
line = line.strip()
if line and not line.startswith('#'):
pattern, repo = line.split()
self.mapping.append((pattern.strip(), repo.strip()))
def get_repository(self, bpkg):
"""Return the repository where ``bpkg`` is mapped to.
Pick the appropriate repository respected by the mapping
(``self.mapping``). Name of the bpkg is used in matching the patterns
in self.mapping. pyver and osarch of bpkg is finally used in picking up
the underlying repository in the choosen reposet.
- bpkg: An instance of ``pypm.common.package.BinaryPackage``
"""
bpkg_fn = bpkg.make_filename()
for pattern, name in self.mapping:
if fnmatch(bpkg_fn, pattern):
return self.reposets[name].get_repository(
bpkg.pyver, bpkg.osarch
)
def __iter__(self):
"""Iter over available repositories"""
for name in sorted(self.reposets.keys()):
for repo in self.reposets[name]:
yield repo
def __str__(self):
return "<MultiRepositorySet: reposets=\n %s\n/>" % '\n '.join(
[str(x) for x in self.reposets.items()])
#
# RepositorySet
#
class RepositorySet(object):
"""A set of repositories
This set includes repositories for each platform/pyver combination. An
example would be the 'free' repository set::
$ tree -L 2 free/
free/
|-- 2.6
| |-- linux-x86
| |-- linux-x86_64
| |-- macosx
| |-- win32-x86
| `-- win64-x64
|-- 2.7
| |-- linux-x86
| |-- linux-x86_64
| |-- macosx
| |-- win32-x86
| `-- win64-x64
`-- 3.1
|-- linux-x86
|-- linux-x86_64
|-- macosx
|-- win32-x86
`-- win64-x64
"""
def __init__(self, path, name, url):
self.path = path
self.name = name
self.url = url
def create_repositories(self):
"""Create repositories for supported configuration"""
for osarch in supported.os_architectures:
for pyver in supported.py_versions:
sh.mkdirs(P.join(self.path, pyver, osarch))
def get_repository(self, pyver, osarch, autocreate=False):
path = xjoin(self.path, pyver, osarch)
url = '/'.join([self.url, pyver, osarch])
if autocreate:
# create, if does not already exist
sh.mkdirs(path)
return Repository(path, self.name, pyver, osarch, url)
def __iter__(self):
"""Iter over all supported repositories
If a supported repository does not exists, simply create the repository
directory before returning it.
"""
for pyver in supported.py_versions:
for osarch in supported.os_architectures:
yield self.get_repository(pyver, osarch, autocreate=True)
def __str__(self):
return '{0}<{1.path}, {1.url}>'.format(self.__class__.__name__, self)
__repr__ = __str__
#
# Repository
#
class Repository(object):
"""Repository directory containing packages and index"""
def __init__(self, path, name, pyver, osarch, url):
from pypm.web.uptree import UpTree # XXX: put uptree out of 'web'
self.path = path
self.name = name
self.pyver = pyver
self.osarch = osarch
self.url = url
self.uptree = UpTree(
self.path,
content_cache_filenames=['info.json', 'imports'],
mtime_cache_filenames=['log'],
)
def _update_uptree(self):
counters = self.uptree.update() # potentially long-operation
if any(counters.values()):
LOG.info('Uptree was updated: %s', counters)
return counters
def find_packages(self):
"""Return available packages in the repository"""
LOG.info("Updating uptree for %s", self)
self._update_uptree()
LOG.info("Retrieving files via uptree for %s", self)
return [p for p in self.uptree.get_files() if p.endswith('.pypm')]
def find_all_packages(self):
"""Return all packages whether succeeded or failed
Return a list of tuples of the form:
(pkgfile, pkgdir, succeeded)
where:
pkgfile - path to the package file (may not exist)
pkgdir - path to the package dir (.d/ directory)
succeeded - True of the pkgfile exists
"""
self._update_uptree()
processed = set()
for fil in textui.ProgressBar.iterate(self.uptree.get_files(), note='Files'):
if fil.endswith('.pypm'):
pkgfile, pkgdir, succeeded = fil, fil + '.d', True
elif fil.endswith('.pypm.d'):
pkgfile, pkgdir = fil[:-2], fil
succeeded = self.uptree.exists(pkgfile)
else:
continue
if pkgfile not in processed:
processed.add(pkgfile)
yield pkgfile, pkgdir, succeeded
def __str__(self):
return '{0}<{1.path}>'.format(self.__class__.__name__, self)
#
# RepositoryIndex
#
class RepoPackageDatabase(_simpledb.SimpleDatabase):
"""A database containing instances of ``pypm.common.package.RepoPackage``"""
_simpledb.setup(RepoPackageDatabase, RepoPackage,
primary_keys=['name', 'version',
'pyver', 'osarch',
'pkg_version'])
class RepositoryIndex(object):
"""Index of packages in a repository
Repositories can optionally have index files .. which are especially useful
when the repository is only available remotely over the wire.
The index file contains a list of all packages along with their metadata and
the relative location of the package file.
There is just one index file:
- index | index.gz - list of packages that are available (sqlite)
"""
def __init__(self, repository):
assert isinstance(repository, Repository)
self.repository = repository
def get_index(self):
"""Return an existing index as ``RepoPackageDatabase``
Returned index database corresponds to a temporary file (as the index
file is originally compressed; it needs to be extracted to a temporary
location) .. hence any attempts to "write" on the returned index
database will be futile.
"""
return RepoPackageDatabase(_ungzip(xjoin(self.repository.path, 'index.gz')))
def generate_index(self):
"""Generated the repository index file (`index.gz`)
index.gz is the compressed sqlite index containing all of the succeeded
packages in the repository pool.
Return the number of packages added to the repository index.
"""
from pypm.grail.package import PackageShare
assert P.exists(self.repository.path)
idx_path = xjoin(self.repository.path, 'index')
idx_gz_path = idx_path + '.gz'
sh.rm(idx_path)
db = RepoPackageDatabase(idx_path, touch=True)
# Tag BE packages; so client may use it to determine if a package is
# available only to BE customers or not.
# See also: RepoPackage.requires_be_license property
pkgtags = 'be' if self.repository.name == 'be' else ''
# Load package-specific data from share/p/*
pkgdata = dict([(s.name, s) for s in PackageShare.all()])
with closing(db):
LOG.debug('finding packages in %s', self.repository.path)
packages = self.repository.find_packages()
LOG.debug('processing %d packages', len(packages))
rpkg_list = [
RepoPackage.create_from(
BinaryPackage(**self._read_info_json(pkgfile)),
relpath=P.relpath(pkgfile, self.repository.path),
tags=pkgtags)
for pkgfile in textui.ProgressBar.iterate(packages, note="Package")
]
for rpkg in rpkg_list:
# Optimize index size by removing the "description" field.
# PyPI's descriptions are typically very long - see
# http://pypi.python.org/pypi/zc.buildout for example - hence we
# must remove them from the index.
rpkg.description = ''
if rpkg.name in pkgdata:
# Add package notes to the description^Wextra field
# See pypm.common.package.RepoPackage.FIELDS to understand
# why we are abusing this field.
notes = list(pkgdata[rpkg.name].get_notes_for(
pyver=rpkg.pyver, osarch=rpkg.osarch))
rpkg.description = json.dumps({
'notes': notes
})
LOG.debug('Patching "description" field for %s', rpkg)
# keep only the latest pkg_version in index
LOG.debug("pruning older pkg_version's")
rpkg_list = _prune_older_binary_releases(rpkg_list)
LOG.debug('.. resulting in %d packages', len(rpkg_list))
LOG.info(' writing index (please wait) ...')
with db.transaction() as session:
session.add_all(rpkg_list)
session.commit()
session.close()
LOG.info(' compressing index: ...%s%s',
os.path.basename(idx_gz_path),
(' (%d)' % len(rpkg_list)) if rpkg_list else '')
sh.rm(idx_gz_path)
with closing(gzip.open(idx_gz_path, 'wb')) as f:
f.write(open(idx_path, 'rb').read())
sh.rm(idx_path)
return len(rpkg_list)
def _read_info_json(self, pypm_file):
"""Read cached info.json (as dict) from the .d/ directory
If cached version is missing, read from the package file itself, which
would be an expensive operation.
"""
info_json_loc = xjoin(pypm_file + '.d', 'info.json')
try:
s = self.repository.uptree.open_and_read(info_json_loc)
except IOError as e:
# There seems to be no .d/info.json file; perhaps this is a
# 'custom' that is not managed by pypm-builder. So let's extract
# info.json from the package file (.pypm) even though that is
# expensive (so we will also warn the user)
LOG.info(
'ALERT: Cache file (.d/info.json) missing; extracting from %s', pypm_file)
s = PackageFile(pypm_file).retrieve_info_json()
d = json.loads(s)
# It is not clear whether info.json's "name" field is canonical (i.e.,
# lower case safe version of name, that is guarateed to be same).
# Therefore, we do one final conversion there.
d['name'] = pkg_resources.safe_name(d['name']).lower()
return d
def _prune_older_binary_releases(packages):
"""Prune all older releases (pkg_version) of the package"""
releases = {}
for pkg in packages:
key = (pkg.full_name, pkg.version, pkg.pyver, pkg.osarch)
if key in releases:
prevrel = releases[key]
if pkg.pkg_version == prevrel.pkg_version:
raise IOError('duplicate packages in repository: %s; %s' % \
(prevrel.relpath, pkg.relpath))
elif pkg.pkg_version > prevrel.pkg_version:
releases[key] = pkg
else:
releases[key] = pkg
return releases.values()
#
# classes for managing remote repositories
#
class RemoteRepositorySet(object):
"""Represent a remotely available RepositorySet"""
def __init__(self, url):
self.url = url
def get_repository(self, pyenv, osarch):
assert isinstance(pyenv, python.BasePythonEnvironment)
# Use full ActivePython version instead of pyver for ActiveState
# repositories, and let the AS server handle the redirection. We do this
# to control the repository URL for each and every ActivePython
# release. For eg., use /2.6.6.16/... instead of /2.6/... even though
# the actual repository path in our server uses 2.6.
if re.search(r'pypm.*\.activestate\.com', self.url):
ver = pyenv.apyver
else:
ver = pyenv.pyver
return RemoteRepository(url_join(self.url, [ver, osarch]))
class RemoteRepository(object):
"""Represent a remotely available Repository"""
# Filename of the actual remote index file
REMOTE_INDEX_FILENAME = "index.gz"
def __init__(self, url):
self.url = url
def download_index(self, target_file, force, verbose=True, interactive=True):
"""Download repository index unless it was downloaded recently (Etag)
- force: Do not use cache; always download
- verbose: If False, try not to print (LOG.info) anything to console
unless an actual download happens.
Return True if download actually happened.
"""
def start_info(status):
if status == 'Hit' and not verbose:
return None
return '%s: [%s] :repository-index:' % (
status,
six.moves.urlparse(self.url).netloc)
index_url = url_join(self.url, [self.REMOTE_INDEX_FILENAME])
try:
idxgz_file, downloaded = net.download_file(index_url, P.dirname(target_file), {
'use_cache': not force,
'save_properties': True,
'start_info': start_info,
}, interactive=interactive)
if not downloaded:
# index was not updated
return False
except six.moves.HTTPError as e:
if e.code == 404: # Not Found
raise ValueError(
'{0.url} does not appear to be a valid repository '
'because {1} is missing.'.format(self, index_url))
raise
# extract index.gz (REMOTE_INDEX_FILENAME) to index (target_file)
try:
with closing(gzip.open(idxgz_file, 'rb')) as f:
with open(target_file, 'wb') as f2:
f2.write(f.read())
except:
# If an error occurs during extraction, simply delete the index file
# (so that it will get properly synced during next sync)
sh.rm(target_file)
sh.rm(idxgz_file)
raise
return True
def get_unique_id(self):
"""Return an alpha-numeric ID unique to this repository (URL)"""
return md5(self.url.encode('utf8')).hexdigest()
def __str__(self):
return '{0.__class__.__name__}<{0.url}>'.format(self)
class RemoteRepositoryManager(object):
"""Manage multiple remote repositories with a local cache"""
def __init__(self, path):
# local cache directory where repository indexes will be stored
self.path = path
sh.mkdirs(path)
def get_index_db(self, remote_repository):
"""Return the index database for remote repository
If necessary, download the index automatically
"""
return RepoPackageDatabase(
self.get_local_index_path(
remote_repository))
def sync_repository(self, remote_repository, force, verbose=True, interactive=True):
"""Sync the cache for a remote repository"""
with dlocked(self.path):
assert isinstance(remote_repository, RemoteRepository)
idx_path = self.get_local_index_path(remote_repository)
sh.mkdirs(P.dirname(idx_path))
return remote_repository.download_index(
idx_path, force, verbose, interactive=interactive)
def get_remote_index_last_download_attempt_time(self, remote_repository):
"""Return the UTC datetime when the index file was last *attempted* to
download
The download may not have happened, however, due to unmodified ETag.
If no index is available (as in, 'pypm sync' or an equivalent was never
run in the first place), return None
"""
original_index_file = P.join(
P.dirname(self.get_local_index_path(remote_repository)),
remote_repository.REMOTE_INDEX_FILENAME)
urlprops = URLProperties(original_index_file).load()
if urlprops:
return BareDateTime.to_datetime(
urlprops.custom['last_attempt_utc'])
def get_local_index_path(self, remote_repository):
return xjoin(
self.path,
remote_repository.get_unique_id(),
'index')
#
# cmdln
#
@cmdln.option('-r', '--repository-path',
help='Local repository path')
@cmdln.option('-R', '--multi-repository-set-path',
help='MultiRepositorySet path')
class Commands(log.LogawareCmdln):
name = "pypm-repository"
def initialize(self):
# require_option(self.options, 'configfile')
# require_option(self.options, 'multi_repository_set_path')
if self.options.multi_repository_set_path and self.options.repository_path:
raise ValueError('must pass either, not both, of -r or -R')
elif not (self.options.multi_repository_set_path or self.options.repository_path):
raise ValueError('must pass either of -r or -R')
@cmdln.option('', '--filter',
help='Filter repo paths by a glob filter; eg: free; free/2.?; macosx')
@cmdln.option('', '--only-new', action="store_true",
help="Only writes for packages with no 'files' or 'imports'")
def _do_write_files_list(self, subcmd, opts):
"""${cmd_name}: Generate .d/files and .d/imports files for all packages
${cmd_usage}
${cmd_option_list}
"""
with self.bootstrapped():
mreposet = MultiRepositorySet(
self.options.multi_repository_set_path,
self.options.configfile
)
skipped = 0
LOG.info('Generating .d/<files,imports> for repositories in: %s', mreposet.path)
with textui.longrun(LOG):
for repo in mreposet:
if opts.filter and not fnmatch(repo.path, '*'+opts.filter+'*'):
skipped += 1
continue
LOG.info('')
LOG.info('->> {0.name:6}{0.pyver:6}{0.osarch:15}'.format(repo))
LOG.info(' %s', repo.path)
from pypm.builder.mason import _extract_files_list
for pypm_file in textui.ProgressBar.iterate(repo.find_packages()):
if opts.only_new:
if all([P.exists(pypm_file + '.d/imports'),
P.exists(pypm_file + '.d/files')]):
continue
modules = _extract_files_list(pypm_file)
LOG.info('P: %s -> %s', os.path.basename(pypm_file), modules)
if skipped:
LOG.warn('skipped %d repositories', skipped)
def do_dirty(self, subcmd, opts, *paths):
"""${cmd_name}: Mark the given paths are dirty (for uptree)
${cmd_usage}
${cmd_option_list}
"""
with self.bootstrapped():
mreposet = MultiRepositorySet(
self.options.multi_repository_set_path,
self.options.configfile
)
for path in paths:
for repo in mreposet:
if path.startswith(repo.path + '/'):
cnt = repo.uptree.mark_dirty(path)
LOG.info('%d dirty marks set for: %s', cnt, path)
break
else:
LOG.error('Not a path in any repository: %s', path)
@cmdln.option('-n', '--dry-run', action="store_true", default=False,
help="Perform a dry-run (grep for potential overwrites)")
@cmdln.option('-f', '--force', action="store_true", default=False,
help="Allow overwrites (disabled by default)")
def do_copy_custom(self, subcmd, opts, *paths):
"""${cmd_name}: Copy packages into the appropriate repository
Use this command to *manually* copy the *custom* PyPM packages that
won't be built by the *automated* pypm-builder/grail. This includes the
following cases,
1. "extra" PyPM packages available in the "GoldBits" directory of
ActivePython (eg: as.openssl)
2. Proprietary packages (eg: pyimsl from VNI)
Use ``MultiRepositoryConfig`` (etc/activestate.conf) to configure how to
allocate the custom packages, i.e., where to put them in "free" or "be"
repo.
Example::
$ bin/pypm-repository -c etc/activestate.conf copy_custom \
$NAS/ActivePython/2.7.0.2/GoldBits/internal/extra/*.pypm
${cmd_usage}
${cmd_option_list}
"""
with self.bootstrapped():
mreposet = MultiRepositorySet(
self.options.multi_repository_set_path,
self.options.configfile
)
for path in paths:
bpkg = PackageFile(path).to_binary_package()
repo = mreposet.get_repository(bpkg)
target_path = P.join(
repo.path, 'pool', bpkg.name[0], bpkg.name[:2],
os.path.basename(path))
sh.mkdirs(P.dirname(target_path))
action = 'OVERWRITE' if P.exists(target_path) else 'CREATE'
LOG.info('%s %s %s', action, repo.name, target_path)
if not opts.dry_run:
if P.exists(target_path) and not opts.force:
raise IOError('cannot overwrite: %s' % target_path)
sh.cp(path, target_path)
repo.uptree.mark_dirty(target_path)
@cmdln.option('', '--filters', default='free,be',
help='Filter repo paths by a glob filter; eg: free; free/2.?; macosx')
def do_genidx(self, subcmd, opts):
"""${cmd_name}: Generate the index file for all available repositories
${cmd_usage}
${cmd_option_list}
"""
with self.bootstrapped():
if self.options.multi_repository_set_path:
self.genidx_mreposet(opts.filters.split(','))
else:
self.genidx_repository()
def genidx_mreposet(self, filters):
logsdir = xjoin(self.options.multi_repository_set_path, '_logs')
with log.archivedby(logging.getLogger('pypm'),
logsdir,
'repository_genidx',
level=logging.INFO,
formatter=logging.Formatter('%(asctime)s %(message)s')):
mreposet = MultiRepositorySet(
self.options.multi_repository_set_path,
self.options.configfile
)
skipped = 0
LOG.info('Generating indices for repositories in: %s', mreposet.path)
with textui.longrun(LOG):
for repo in mreposet:
if filters and not any([fnmatch(repo.path, '*'+f+'*') for f in filters]):
skipped += 1
continue
LOG.info('')
LOG.info('-> {0.name:6}{0.pyver:6}{0.osarch:15}'.format(repo))
LOG.info(' %s', repo.path)
idx = RepositoryIndex(repo)
idx.generate_index()
if skipped:
LOG.info('ALERT: skipped %d repositories', skipped)
def genidx_repository(self):
repo = Repository(
self.options.repository_path, '<unnamed>',
pyver='<multiple>', osarch='<multiple>', url='<nourl>')
idx = RepositoryIndex(repo)
idx.generate_index()
def main():
Commands(install_console=True, default_verbosity=1).main()
#
# -- internal --
#
def _ungzip(path):
"""Ungzip a compressed to a temporary location
Return the extracted location
"""
with closing(gzip.open(path, 'rb')) as f:
with tempfile.NamedTemporaryFile(delete=False) as f2:
shutil.copyfileobj(f, f2)
return f2.name
| [
"fire.guy@163.com"
] | fire.guy@163.com |
f8dc0b1aee58d7979e7f5d482179c4af4a3c1ae0 | a937504aac6f6b954473b25dd1da8cca9f90be4e | /sphere_drop.py | c155de9f73444e761ebb59414f3497e369974b4d | [] | no_license | zizochan/blender_python | b822a2288e29766d5eb7b58a7b8856ee4e9f3be6 | aa519db917a94768f3ba2bb02e374f9069395b51 | refs/heads/master | 2021-04-28T01:06:56.560046 | 2018-05-27T11:26:39 | 2018-05-27T11:30:22 | 122,267,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,400 | py | # -*- coding: utf-8 -*-
# import
import bpy
# config
FRAME = {"start": 0, "end": 200}
BLOCK_NUMBER = {"x": 6, "y": 6, "z": 30}
BLOCK_SIZE = {"x": 0.5, "y": 0.5, "z": 1.5}
# color
START_COLOR = {"r": 75, "g": 0, "b": 130}
END_COLOR = {"r": 25, "g": 25, "b": 112}
FLOOR_COLOR = {"r": 0, "g": 250, "b": 154}
SPHERE_COLOR = {"r": 255, "g": 215, "b": 0}
# mass
BLOCK_MASS = 1000
SHERE_MASS = 100000
# const
FIELD_SIZE = {"x": BLOCK_NUMBER["x"] * BLOCK_SIZE["x"], "y": BLOCK_NUMBER["y"] * BLOCK_SIZE["y"], "z": BLOCK_NUMBER["z"] * BLOCK_SIZE["z"]}
def set_frame_size(start_frame, end_frame):
bpy.context.scene.frame_start = start_frame
bpy.context.scene.frame_end = end_frame
def delete_all_objects():
for item in bpy.context.scene.objects:
bpy.context.scene.objects.unlink(item)
def create_cube(x, y, z):
cube = create_primitive_cube(x, y, z)
add_color_material(cube, x, y, z)
def create_primitive_cube(x, y, z):
pos_x = x * BLOCK_SIZE["x"] - BLOCK_SIZE["x"] * 0.5
pos_y = y * BLOCK_SIZE["y"] - BLOCK_SIZE["y"] * 0.5
pos_z = z * BLOCK_SIZE["z"] * 0.98 + BLOCK_SIZE["z"] * 0.5 # 原因不明だが縦にわずかな隙間ができるので×0.98する
bpy.ops.mesh.primitive_cube_add(location=(pos_x, pos_y, pos_z), rotation=(0,0,0))
bpy.ops.rigidbody.object_add()
obj = bpy.context.scene.objects.active
obj.rigid_body.angular_damping = 0
obj.rigid_body.mass = BLOCK_MASS
obj.scale = (BLOCK_SIZE["x"] * 0.5, BLOCK_SIZE["y"] * 0.5, BLOCK_SIZE["z"] * 0.5)
return obj
def add_color_material(obj, x, y, z):
new_colors = {}
for key in ["r", "g", "b"]:
new_colors[key] = color_with_gradation(key, x, y, z)
mat = bpy.data.materials.new('Cube')
mat.diffuse_color = (new_colors["r"], new_colors["g"], new_colors["b"])
obj.data.materials.append(mat)
def color_with_gradation(key, x, y, z):
now_step = x + y + z
max_step = BLOCK_NUMBER["x"] + BLOCK_NUMBER["y"] + BLOCK_NUMBER["z"]
if max_step < 1:
return START_COLOR[key]
color_diff = END_COLOR[key] - START_COLOR[key]
return (START_COLOR[key] + (color_diff / max_step) * now_step) / 255
def create_floor():
pos_x = FIELD_SIZE["x"] * 0.5
pos_y = FIELD_SIZE["y"] * 0.5
pos_z = 0
bpy.ops.mesh.primitive_plane_add(location=(pos_x, pos_y, pos_z))
bpy.ops.rigidbody.object_add(type='PASSIVE')
obj = bpy.context.scene.objects.active
obj.scale = (FIELD_SIZE["x"] * 3, FIELD_SIZE["y"] * 3, 1)
obj.rigid_body.collision_margin = 0
mat = bpy.data.materials.new('Floor')
mat.diffuse_color = (FLOOR_COLOR["r"] / 255, FLOOR_COLOR["g"] / 255, FLOOR_COLOR["b"] / 255)
obj.data.materials.append(mat)
def create_sphere():
pos_x = FIELD_SIZE["x"] * 0.5
pos_y = FIELD_SIZE["y"] * 0.5
pos_z = FIELD_SIZE["z"] + BLOCK_SIZE["z"] * 3
bpy.ops.mesh.primitive_uv_sphere_add(location=(pos_x, pos_y, pos_z))
bpy.ops.rigidbody.object_add()
obj = bpy.context.scene.objects.active
obj.rigid_body.angular_damping = 0
obj.rigid_body.mass = SHERE_MASS
obj.rigid_body.restitution = 1
scale = BLOCK_NUMBER["x"] * 0.8
obj.scale = (scale, scale, scale)
mat = bpy.data.materials.new('Shpere')
mat.diffuse_color = (SPHERE_COLOR["r"] / 255, SPHERE_COLOR["g"] / 255, SPHERE_COLOR["b"] / 255)
obj.data.materials.append(mat)
def set_camera():
camera_range = FIELD_SIZE["z"] * 1.2
x_pos = camera_range
y_pos = camera_range * -1
z_pos = camera_range
bpy.ops.object.camera_add(
location=(x_pos, y_pos, z_pos),
rotation=(1.2, 0, 0.75)
)
obj = bpy.context.scene.objects.active
bpy.context.scene.camera = obj
def set_lamp():
x_pos = 0
y_pos = 0
z_pos = 0
bpy.ops.object.lamp_add(
location=(x_pos, y_pos, z_pos),
rotation=(0.79, 0, 0),
type = "SUN"
)
def create_blocks():
for x in range(1, BLOCK_NUMBER["x"] + 1):
for y in range(1, BLOCK_NUMBER["y"] + 1):
# 外周のブロックだけ作る
if x > 1 and x < BLOCK_NUMBER["x"] and y > 1 and y < BLOCK_NUMBER["y"]:
continue
for z in range(0, BLOCK_NUMBER["z"]):
create_cube(x, y, z)
# main
set_frame_size(FRAME["start"], FRAME["end"])
delete_all_objects()
create_blocks()
create_sphere()
create_floor()
set_camera()
set_lamp()
| [
"zizo.toiawase@gmail.com"
] | zizo.toiawase@gmail.com |
bdf6f258d52e429ac7db157c730375ffbb5a503b | 4fdee88add8859003daed07f907052f05b106fe9 | /py_toolbox/copy.py | 89d813553a5b243d6532087afb0847061ad2ca2e | [
"MIT"
] | permissive | koya-ken/py-toolbox | f7db82cc57ae90b428a200ba4f487142ce998757 | 6c5dbda3197d47a1263aef8e30528b1bf1d8862c | refs/heads/master | 2020-12-03T07:09:29.355933 | 2020-02-15T16:02:00 | 2020-02-15T16:02:00 | 231,237,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | import os
from tqdm import tqdm
def copyfileobj(fromfile, tofile, callback=None, length=16*1024, overwrite=False):
copied = 0
if os.path.isdir(tofile):
tofile = os.path.join(tofile, os.path.basename(fromfile))
size = os.path.getsize(fromfile)
tosize = os.path.getsize(tofile) if os.path.exists(tofile) else 0
if os.path.exists(tofile) and not overwrite:
return
if not overwrite and size == tosize:
return
with open(fromfile, 'rb') as fsrc, open(tofile, 'wb') as fdst:
while True:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
copied = len(buf)
if callback is not None:
callback(copied)
def copyfileprogress(fromfile, tofile, length=16*1024, overwrite=False, showmessage=True):
if os.path.isdir(tofile):
tofile = os.path.join(tofile, os.path.basename(fromfile))
size = os.path.getsize(fromfile)
tosize = os.path.getsize(tofile) if os.path.exists(tofile) else 0
if os.path.exists(tofile) and not overwrite:
return
if not overwrite and size == tosize:
return
if showmessage:
fromname = os.path.basename(fromfile)
toname = os.path.basename(tofile)
print(fromname, '=>', toname)
progress = tqdm(total=size)
copyfileobj(fromfile, tofile, lambda x: progress.update(x), length)
| [
"koya.smp@gmail.com"
] | koya.smp@gmail.com |
52f0dad9a36e69dae81cc0744cf62af43ac22ba0 | 4c2572e33eb5858487dd85cda48515ad642974d3 | /python/tvm/relay/testing/tf.py | 0f98f2033e27a7ab86bc31cf616a0fa1551de2a3 | [] | no_license | jack16888/tvm-yolov3 | abb430e0e11261f0e54f890f25a7e554340ff9f1 | c53477c0f40cade632344d1484a9079f5f2deb1a | refs/heads/master | 2022-04-07T23:11:42.441089 | 2020-03-20T01:18:28 | 2020-03-20T01:18:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,255 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
"""
Tensorflow Model Helpers
========================
Some helper definitions for tensorflow models.
"""
import re
import os.path
import collections
import numpy as np
# Tensorflow imports
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tvm.contrib.download import download_testdata
try:
tf_compat_v1 = tf.compat.v1
except ImportError:
tf_compat_v1 = tf
######################################################################
# Some helper functions
# ---------------------
def ProcessGraphDefParam(graph_def):
"""Type-checks and possibly canonicalizes `graph_def`.
Parameters
----------
graph_def : Obj
tensorflow graph definition.
Returns
-------
graph_def : Obj
tensorflow graph devinition
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
# `graph_def` could be a dynamically-created message, so try a duck-typed
# approach
try:
old_graph_def = graph_def
graph_def = graph_pb2.GraphDef()
graph_def.MergeFrom(old_graph_def)
except TypeError:
raise TypeError('graph_def must be a GraphDef proto.')
return graph_def
def AddShapesToGraphDef(session, out_node):
""" Add shapes attribute to nodes of the graph.
Input graph here is the default graph in context.
Parameters
----------
session : tf.Session
Tensorflow session
out_node : String
Final output node of the graph.
Returns
-------
graph_def : Obj
tensorflow graph definition with shapes attribute added to nodes.
"""
if isinstance(out_node, list):
graph_def = tf_compat_v1.graph_util.convert_variables_to_constants(
session,
session.graph.as_graph_def(add_shapes=True),
out_node,
)
else:
graph_def = tf_compat_v1.graph_util.convert_variables_to_constants(
session,
session.graph.as_graph_def(add_shapes=True),
[out_node],
)
return graph_def
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Parameters
----------
label_lookup_path: String
File containing String UID to integer node ID mapping .
uid_lookup_path: String
File containing String UID to human-readable string mapping.
Returns
-------
node_id_to_name: dict
dict from integer node ID to human-readable string.
"""
if not tf_compat_v1.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf_compat_v1.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf_compat_v1.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf_compat_v1.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def get_workload_official(model_url, model_sub_path):
""" Import workload from tensorflow official
Parameters
----------
model_url: str
URL from where it will be downloaded.
model_sub_path:
Sub path in extracted tar for the ftozen protobuf file.
Returns
-------
model_path: str
Full path to saved model file
"""
model_tar_name = os.path.basename(model_url)
model_path = download_testdata(model_url, model_tar_name, module=['tf', 'official'])
dir_path = os.path.dirname(model_path)
import tarfile
if model_path.endswith("tgz") or model_path.endswith("gz"):
tar = tarfile.open(model_path)
tar.extractall(path=dir_path)
tar.close()
else:
raise RuntimeError('Could not decompress the file: ' + model_path)
return os.path.join(dir_path, model_sub_path)
def get_workload(model_path, model_sub_path=None):
""" Import workload from frozen protobuf
Parameters
----------
model_path: str
model_path on remote repository to download from.
model_sub_path: str
Model path in the compressed archive.
Returns
-------
graph_def: graphdef
graph_def is the tensorflow workload.
"""
if model_sub_path:
path_model = get_workload_official(model_path, model_sub_path)
else:
repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/'
model_url = os.path.join(repo_base, model_path)
path_model = download_testdata(model_url, model_path, module='tf')
# Creates graph from saved graph_def.pb.
with tf_compat_v1.gfile.FastGFile(path_model, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.import_graph_def(graph_def, name='')
return graph_def
#######################################################################
# PTB LSTMBlockCell Model
# -----------------------
class PTBSmallConfig(object):
"""Small config.
This configurations are used when training the model
"""
num_layers = 2
num_steps = 1
hidden_size = 200
batch_size = 1
vocab_size = 10000
init_scale = 0.1
def get_config():
"""Configuration used for training the model"""
return PTBSmallConfig()
def pick_from_weight(weight, pows=1.0):
"""Identify token from Softmax output.
This token will be mapped to word in the vocabulary.
"""
weight = weight**pows
t = np.cumsum(weight)
s = np.sum(weight)
return int(np.searchsorted(t, 0.5 * s))
def do_tf_sample(session, data, in_states, num_samples):
"""Sampled from the model"""
samples = []
sample = None
#Cell inputs c and h should be passed for each layer explicitly.
state_input_name = ['Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros:0',
'Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1:0',
'Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros:0',
'Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1:0']
state = session.run(state_input_name)
#Graph nodes to be fetched as run output. Tensorflow LSTMBlockCell create internal
#nodes for intermediate operations (gates) in the cell during run.
#Cell state (c) is ':1'and cell output (h) is ':6' for each layer.
fetches = [['Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1',
'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6',
'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1',
'Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6'],
'Model/Softmax:0']
def _get_feed_dict(input_name, input_data):
"""Create feed dict"""
feed_dict = {}
if isinstance(input_data, list):
for i, e in enumerate(input_name):
feed_dict[e] = input_data[i]
else:
feed_dict[input_name] = input_data
return feed_dict
for x in data:
feed_dict = _get_feed_dict(state_input_name, state)
feed_dict['Model/Placeholder:0'] = [[x]]
state, probs = session.run(fetches, feed_dict)
sample = pick_from_weight(probs[0])
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
feed_dict = _get_feed_dict(state_input_name, state)
feed_dict['Model/Placeholder:0'] = [[samples[-1]]]
state, probs = session.run(fetches, feed_dict)
sample = pick_from_weight(probs[0])
samples.append(sample)
k += 1
return samples, state
def _create_ptb_vocabulary(data_dir):
"""Read the PTB sample data input to create vocabulary"""
data_path = os.path.join(data_dir, 'simple-examples/data/')
file_name = 'ptb.train.txt'
def _read_words(filename):
"""Read the data for creating vocabulary"""
with tf_compat_v1.gfile.GFile(filename, "r") as f:
return f.read().encode("utf-8").decode("utf-8").replace("\n", "<eos>").split()
def _build_vocab(filename):
"""Create vocabulary"""
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
#for python 3.x
id_to_word = dict((v, k) for k, v in word_to_id.items())
return word_to_id, id_to_word
def ptb_raw_data(data_path, file_name):
"""Read the sample data and create vocabulary"""
train_path = os.path.join(data_path, file_name)
word_to_id, id_2_word = _build_vocab(train_path)
return word_to_id, id_2_word
return ptb_raw_data(data_path, file_name)
def get_workload_ptb():
""" Import ptb workload from frozen protobuf
Parameters
----------
Nothing.
Returns
-------
graph_def: graphdef
graph_def is the tensorflow workload for ptb.
word_to_id : dict
English word to integer id mapping
id_to_word : dict
Integer id to English word mapping
"""
sample_repo = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/'
sample_data_file = 'simple-examples.tgz'
sample_url = sample_repo+sample_data_file
ptb_model_file = 'RNN/ptb/ptb_model_with_lstmblockcell.pb'
import tarfile
file_path = download_testdata(sample_url, sample_data_file, module=['data', 'ptb_data'])
dir_path = os.path.dirname(file_path)
t = tarfile.open(file_path, 'r')
t.extractall(dir_path)
word_to_id, id_to_word = _create_ptb_vocabulary(dir_path)
return word_to_id, id_to_word, get_workload(ptb_model_file)
| [
"gary30404@gmail.com"
] | gary30404@gmail.com |
6f254e66ef72d1178913366f837aa9804f7e8752 | 5502979ea9649e4dc556544285f1f943f560655d | /collatz.py | 802327c1ce51233d0bb33244d6d8e5828b75d277 | [] | no_license | imcdonald2/Python | 36f7cb7370b3405ab4d6884fddcf7bbb3c692fe6 | 7baa4ef58f3abbd57c856b2c3eb90990224021f2 | refs/heads/master | 2021-01-19T13:38:56.140524 | 2017-08-20T10:11:29 | 2017-08-20T10:11:29 | 100,851,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | def collatz(number):
while number != 1:
if number % 2 == 0:
number = number / 2
print(number)
elif number % 2 == 1:
number = number * 3 + 1
print(number)
collatz(53453)8 | [
"gobizku@gmail.com"
] | gobizku@gmail.com |
2c5fcb6a21f19430ac8de3c70be24f2e6b1711a8 | 90cad1df7b7d424feb8e71ff3d77e772d446afdf | /test/test_reebill/test_excel_export.py | 56fabef7c4bff8d7f0b9276c42aab2347c599839 | [] | no_license | razagilani/billing | acb8044c22b4075250c583f599baafe3e09abc2e | fd2b20019eeedf0fcc781e5d81ff240be90c0b37 | refs/heads/master | 2021-05-01T14:46:32.138870 | 2016-03-09T18:55:09 | 2016-03-09T18:55:09 | 79,589,205 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,940 | py | from copy import deepcopy
from datetime import date, datetime
from itertools import cycle
from StringIO import StringIO
import unittest
import mock
from core import init_config, init_model
from core.model import Register, Charge, Session, Utility, Address,\
Supplier, RateClass, UtilityAccount
from core.model.utilbill import UtilBill, Charge
from reebill.reports.excel_export import Exporter
from reebill.reebill_model import ReeBill, Payment
from reebill.reebill_dao import ReeBillDAO
from reebill.payment_dao import PaymentDAO
from test import create_tables
class ExporterSheetTest(unittest.TestCase):
def setUp(self):
#Set up the mock
self.mock_StateDB = mock.create_autospec(ReeBillDAO)
self.payment_dao = mock.Mock(autospec=PaymentDAO)
self.exp = Exporter(self.mock_StateDB, self.payment_dao)
def test_get_reebill_details_dataset(self):
def make_reebill(id, month):
result = mock.Mock(autospec=ReeBill)
result.id = id
result.sequence = 1
result.version = 0
result.issued = 1
result.utilbills = [mock.Mock(autospec=UtilBill)]
result.billing_address = 'Monroe Towers, Silver Spring, MD'
result.service_address = 'Monroe Towers, Silver Spring, MD'
result.issue_date = date(2013, 4, 1)
result.ree_value = 4.3
result.prior_balance = 2.20
result.payment_received = None
result.total_adjustment = 0.00
result.balance_forward = 62.29
result.ree_charge = 122.20
result.late_charge = 32.2
result.balance_due = 5.01
result.get_period.return_value = (
date(2011, month, 3), date(2011, month+1, 2))
result.get_total_actual_charges.return_value = 743.49
result.get_total_hypothetical_charges.return_value = 980.33
result.get_total_renewable_energy.return_value = 188.20
return result
def make_payment(date_applied, amount):
result = mock.Mock(autospec=Payment)
result.date_applied = date_applied
result.credit = amount
return result
def get_payments_for_reebill_id(id):
if id == 1:
return [
make_payment(datetime(2011, 1, 30, 0, 0, 0), 400.13), # '10003'
make_payment(datetime(2011, 2, 01, 0, 0, 0), 13.37)
]
else:
return []
self.payment_dao.get_payments_for_reebill_id.side_effect = \
get_payments_for_reebill_id
self.mock_StateDB.get_all_reebills_for_account.side_effect = cycle([
[make_reebill(1, 1)], # For account '10003'
[make_reebill(2, 2), make_reebill(3, 3), make_reebill(4, 4)] # 10004
])
# No start or end date
dataset = self.exp.get_export_reebill_details_dataset(
['10003', '10004'])
correct_data = [
('10003', 1, 0, u'Monroe Towers, Silver Spring, MD',
u'Monroe Towers, Silver Spring, MD', '2013-04-01',
'2011-01-03', '2011-02-02', '980.33', '743.49', '4.30',
'2.20', None, '2011-01-30', '400.13', '0.00', '62.29',
'122.20', 32.2, '5.01', '', '-117.90', '-117.90',
'188.20', '1.26'),
('10003', 1, 0, None, None, None, None, None, None, None,
None, None, None, '2011-02-01', '13.37', None, None,
None, None, None, None, None, None, None, None),
('10004', 1, 0, u'Monroe Towers, Silver Spring, MD',
u'Monroe Towers, Silver Spring, MD', '2013-04-01',
'2011-02-03', '2011-03-02', '980.33', '743.49', '4.30',
'2.20', None, None, None, '0.00', '62.29', '122.20',
32.2, '5.01', '', '-117.90', '-117.90', '188.20',
'1.26'),
('10004', 1, 0, u'Monroe Towers, Silver Spring, MD',
u'Monroe Towers, Silver Spring, MD', '2013-04-01',
'2011-03-03', '2011-04-02', '980.33', '743.49', '4.30',
'2.20', None, None, None, '0.00', '62.29', '122.20',
32.2, '5.01', '', '-117.90', '-235.80', '188.20',
'1.26'),
('10004', 1, 0, u'Monroe Towers, Silver Spring, MD',
u'Monroe Towers, Silver Spring, MD', '2013-04-01',
'2011-04-03', '2011-05-02', '980.33', '743.49', '4.30',
'2.20', None, None, None, '0.00', '62.29', '122.20',
32.2, '5.01', '', '-117.90', '-353.70', '188.20',
'1.26')
]
for indx,row in enumerate(dataset):
self.assertEqual(row, correct_data[indx])
self.assertEqual(len(dataset), len(correct_data))
# Only start date
dataset = self.exp.get_export_reebill_details_dataset(
['10003', '10004'], begin_date=date(2011, 4, 1))
correct_data = [
('10004', 1, 0, u'Monroe Towers, Silver Spring, MD',
u'Monroe Towers, Silver Spring, MD', '2013-04-01',
'2011-04-03', '2011-05-02', '980.33', '743.49', '4.30',
'2.20', None, None, None, '0.00', '62.29', '122.20',
32.2, '5.01', '', '-117.90', '-117.90', '188.20',
'1.26')
]
for indx,row in enumerate(dataset):
self.assertEqual(row, correct_data[indx])
self.assertEqual(len(dataset), len(correct_data))
# Only end date
dataset = self.exp.get_export_reebill_details_dataset(
['10003', '10004'], end_date=date(2011, 3, 5))
correct_data = [
('10003', 1, 0, u'Monroe Towers, Silver Spring, MD',
u'Monroe Towers, Silver Spring, MD', '2013-04-01',
'2011-01-03', '2011-02-02', '980.33', '743.49', '4.30',
'2.20', None, '2011-01-30', '400.13', '0.00', '62.29',
'122.20', 32.2, '5.01', '', '-117.90', '-117.90',
'188.20', '1.26'),
('10003', 1, 0, None, None, None, None, None, None, None,
None, None, None, '2011-02-01', '13.37', None, None,
None, None, None, None, None, None, None, None),
('10004', 1, 0, u'Monroe Towers, Silver Spring, MD',
u'Monroe Towers, Silver Spring, MD', '2013-04-01',
'2011-02-03', '2011-03-02', '980.33', '743.49', '4.30',
'2.20', None, None, None, '0.00', '62.29', '122.20',
32.2, '5.01', '', '-117.90', '-117.90', '188.20',
'1.26')
]
for indx,row in enumerate(dataset):
self.assertEqual(row, correct_data[indx])
self.assertEqual(len(dataset), len(correct_data))
# Start and end date
dataset = self.exp.get_export_reebill_details_dataset(
['10003', '10004'], begin_date=date(2011, 2, 1),
end_date=date(2011, 3, 5))
correct_data = [
('10004', 1, 0, u'Monroe Towers, Silver Spring, MD',
u'Monroe Towers, Silver Spring, MD', '2013-04-01',
'2011-02-03', '2011-03-02', '980.33', '743.49', '4.30',
'2.20', None, None, None, '0.00', '62.29', '122.20',
32.2, '5.01', '', '-117.90', '-117.90', '188.20',
'1.26')
]
for indx,row in enumerate(dataset):
self.assertEqual(row, correct_data[indx])
self.assertEqual(len(dataset), len(correct_data))
def test_account_charges_sheet(self):
def make_utilbill(month):
result = mock.Mock(autospec=UtilBill)
result.period_start = datetime(2013, month, 3)
result.period_end = datetime(2013, month+1, 4)
result.state = UtilBill.Complete
return result
def make_charge(group, desc, number):
result = mock.Mock(autospec=Charge)
result.total = number
result.group = group
result.description = desc
return result
def make_reebill(seq):
result = mock.Mock(autospec=ReeBill)
result.sequence = seq
ub = make_utilbill(seq)
result.utilbills = [ub]
result.utilbill = ub
return result
r1 = make_reebill(1)
r1.utilbill.charges = [make_charge(x,y,z) for x,y,z in [
('Group1', "Description1", 1.11),
('Group1', "Description2", 2.22),
('Group2', "Description3", 3.33),
]]
r2 = make_reebill(2)
r2.utilbill.charges = [make_charge(x,y,z) for x,y,z in [
('Group1', "Description1", 4.44),
('Group2', "Description2", 5.55),
('Group2', "Description3", 6.66),
]]
r3 = make_reebill(3)
r3.utilbill.charges = [make_charge(x,y,z) for x,y,z in [
('Group1', "Description1", 4.44),
('Group2', "Description2", 5.55),
('Group2', "Description3", 6.66),
]]
# No start date or end date
dataset = self.exp.get_account_charges_sheet('999999', [r1, r2, r3])
correct_data = [('999999', 1, '2013-01-03', '2013-02-04', '2013-01',
'No', '1.11', '2.22', '3.33', ''),
('999999', 2, '2013-02-03', '2013-03-04', '2013-02',
'No', '4.44', '', '6.66', '5.55'),
('999999', 3, '2013-03-03', '2013-04-04', '2013-03',
'No', '4.44', '', '6.66', '5.55')]
headers = ['Account', 'Sequence', 'Period Start', 'Period End',
'Billing Month', 'Estimated', 'Group1: Description1',
'Group1: Description2', 'Group2: Description3',
'Group2: Description2']
self.assertEqual(headers, dataset.headers)
for indx, row in enumerate(dataset):
self.assertEqual(row, correct_data[indx])
self.assertEqual(len(dataset), len(correct_data))
# Only start date
dataset = self.exp.get_account_charges_sheet(
'999999', [r1, r2, r3], start_date=datetime(2013, 2, 1))
correct_data = [('999999', 2, '2013-02-03', '2013-03-04', '2013-02',
'No', '4.44', '5.55', '6.66'),
('999999', 3, '2013-03-03', '2013-04-04', '2013-03',
'No', '4.44', '5.55', '6.66')]
headers = ['Account', 'Sequence', 'Period Start', 'Period End',
'Billing Month', 'Estimated', 'Group1: Description1',
'Group2: Description2', 'Group2: Description3']
self.assertEqual(headers, dataset.headers)
for indx, row in enumerate(dataset):
self.assertEqual(row, correct_data[indx])
self.assertEqual(len(dataset), len(correct_data))
# Only end date
dataset = self.exp.get_account_charges_sheet(
'999999', [r1, r2, r3], end_date=datetime(2013, 3, 5))
correct_data = [('999999', 1, '2013-01-03', '2013-02-04', '2013-01',
'No', '1.11', '2.22', '3.33', ''),
('999999', 2, '2013-02-03', '2013-03-04', '2013-02',
'No', '4.44', '', '6.66', '5.55')]
headers = ['Account', 'Sequence', 'Period Start', 'Period End',
'Billing Month', 'Estimated', 'Group1: Description1',
'Group1: Description2', 'Group2: Description3',
'Group2: Description2']
self.assertEqual(headers, dataset.headers)
for indx, row in enumerate(dataset):
self.assertEqual(row, correct_data[indx])
self.assertEqual(len(dataset), len(correct_data))
# Both start and end date
dataset = self.exp.get_account_charges_sheet(
'999999', [r1, r2], start_date=datetime(2013, 2, 1),
end_date=datetime(2013, 3, 5))
correct_data = [('999999', 2, '2013-02-03', '2013-03-04', '2013-02',
'No', '4.44', '5.55', '6.66')]
headers = ['Account', 'Sequence', 'Period Start', 'Period End',
'Billing Month', 'Estimated', 'Group1: Description1',
'Group2: Description2', 'Group2: Description3']
self.assertEqual(headers, dataset.headers)
for indx, row in enumerate(dataset):
self.assertEqual(row, correct_data[indx])
self.assertEqual(len(dataset), len(correct_data))
def test_get_energy_usage_sheet(self):
def make_charge(number):
result = mock.Mock(autospec=Charge)
result.total = number
result.group = str(number)
result.description = ''
return result
#Setup Mock
u1 = mock.Mock(autospec=UtilBill)
u1.utility_account.account = '10003'
u1.rate_class.name = 'DC Non Residential Non Heat'
u1.period_start = date(2011,11,12)
u1.period_end = date(2011,12,14)
u1.charges = [make_charge(x) for x in [3.37, 17.19, 43.7, 164.92,
23.14, 430.02, 42.08, 7.87, 11.2]]
# replacement for document above
register1 = mock.Mock(autospec=Register)
register1.description = ''
register1.quantity = 561.9
register1.unit = 'therms'
register1.estimated = False
register1.reg_type = 'total'
register1.register_binding = Register.TOTAL
register1.active_periods = None
u1._registers = [register1]
u2 = deepcopy(u1)
u2.period_start = date(2011,12,15)
u2.period_end = date(2012,01,14)
u2._registers = [deepcopy(register1)]
dataset = self.exp.get_energy_usage_sheet([u1, u2])
correct_data = [('10003', u'DC Non Residential Non Heat', 561.9, u'therms', '2011-11-12', '2011-12-14', 3.37, 17.19, 43.7, 164.92, 23.14, 430.02, 42.08, 7.87, 11.2),
('10003', u'DC Non Residential Non Heat', 561.9, u'therms', '2011-12-15', '2012-01-14', 3.37, 17.19, 43.7, 164.92, 23.14, 430.02, 42.08, 7.87, 11.2),]
headers = ['Account', 'Rate Class', 'Total Energy', 'Units',
'Period Start', 'Period End', '3.37: ', '17.19: ', '43.7: ',
'164.92: ', '23.14: ', '430.02: ', '42.08: ', '7.87: ',
'11.2: ']
self.assertEqual(headers, dataset.headers)
for indx,row in enumerate(dataset):
self.assertEqual(row, correct_data[indx])
self.assertEqual(len(dataset), len(correct_data))
class ExporterDataBookTest(unittest.TestCase):
def setUp(self):
init_config('test/tstsettings.cfg')
create_tables()
init_model()
self.exp = Exporter(ReeBillDAO(), PaymentDAO())
s = Session()
utility = Utility(name='New Utility', address=Address())
s.add(utility)
supplier = Supplier(name='New Supplier', address=Address())
s.add(supplier)
rate_class = RateClass(name='New Rate Class', utility=utility,
service='electric')
s.add(rate_class)
utility_account = UtilityAccount(
'some name',
'20001',
utility,
supplier,
rate_class,
Address(),
Address(),
'1234567890'
)
s.add(utility_account)
s.add(
UtilBill(
utility_account, utility,
rate_class, supplier=supplier,
period_start=date(2010, 11, 1), period_end=date(2011, 2, 3),
date_received=datetime.utcnow().date(),
state=UtilBill.Estimated,
)
)
def test_exports_returning_binaries(self):
"""
This test simply calls all export functions returning binaries. This
way we can at least verify that the code in those functions is
syntactically correct and calls existing methods
"""
string_io = StringIO()
# export_account_charges
self.exp.export_account_charges(string_io)
self.exp.export_account_charges(string_io, '20001')
self.exp.export_account_charges(string_io, '20001', date(2010, 11, 1))
self.exp.export_account_charges(string_io, '20001',
date(2010, 11, 1), date(2011, 2, 3))
# export_energy_usage
self.exp.export_energy_usage(string_io)
the_account = Session().query(UtilityAccount).filter_by(
account='20001').one()
self.exp.export_energy_usage(string_io, the_account)
# export_reebill_details
self.exp.export_reebill_details(string_io)
self.exp.export_reebill_details(string_io, '20001')
self.exp.export_reebill_details(string_io, '20001', date(2010, 11, 1))
self.exp.export_reebill_details(string_io, '20001',
date(2010, 11, 1), date(2011, 2, 3))
| [
"dklothe@skylineinnovations.com"
] | dklothe@skylineinnovations.com |
013860d7ff4bc86bddb06db3b24f6cdf9fc666b6 | 4e21ec3da6819145e4ff6826f921c419d9a1f20a | /canteenSystem.py | 01d629bd42c81221450f0e8ba13b3db43ef77c69 | [] | no_license | aneezJaheez/NTU-Cafeteria-System | bc6623471e9469eefb6bdbc470278c796d0248bb | a404f28ba3f276cd44a7a41a77f8481cfc0d48ec | refs/heads/master | 2023-03-21T18:48:28.769238 | 2021-02-24T15:50:23 | 2021-02-24T15:50:23 | 279,474,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,261 | py | #Program to view stores, menu, store timings for various eateries in the north spine canteen
from tkinter import *
from tkinter import ttk
from PIL import ImageTk, Image
from tkmacosx import Button
try:
import tkinter as tk
from tkinter import ttk
except ImportError:
import Tkinter as tk
import ttk
from tkcalendar import Calendar, DateEntry
from tkinter.messagebox import showinfo
#the above import statements deal with creation and design of GUI using tkinter
import sys
import datetime #python library to retrieve and manipulate system and user defined date and time
#=================================
#JONATHAN
#=================================
def popup_showinfo():#popup window to be displayed on erroneous input
showinfo("Error", "Plese enter a valid input!")
def calcWaitTime(*args):#function to retrieve the number of people in queue and set the total wait time depending on user input
try:
value = int(numPeople.get())
if value < 0:
popup_showinfo()
else:
waitTime.set(value*1.5)
except ValueError:
popup_showinfo()
#=================================
#JONATHAN
#=================================
def place_widgets(background_label, calcButton, waittime_result, qEntry, backButton, endProg):#function to define the placement of widgets at each storefront page
background_label.place(x=0, y=0, relwidth=1, relheight=1)
calcButton.place(x=590, y=328, height=36, width=70)
waittime_result.place(x=530,y=375, height=20, width=40)
qEntry.place(x=525,y=330, height=30, width=50)
backButton.place(x=30, y=400, height=50, width=50)
endProg.place(x=740, y=20, height =34, width=40)
#=================================
#KIM
#=================================
#function to display the menu items of a particular store
def display_menu(f, itemDict):
line_str = f.readlines() #initialization of string which holds text file contents
for i in line_str: #converts the contents of the string into a dictionary with the menu item as the key and the price as the value
new_str = i.split(":")
itemDict[new_str[0]] = new_str[1]
final_str = "" #initialization of final string that will be used to display the menu label widget
for key, value in itemDict.items():
final_str += key + " : " + value
itemLabel = ttk.Label(root, text = final_str, font=('Trattatello')) #menu items label initialization and styling
itemLabel.place(x=280,y=180, width=250, height=125)
#=================================
#ANEEZ
#=================================
#Function to set the program to the current system date and time
def SetDateToCurr():
global day_str
global time_str
global hour_int
now = datetime.datetime.now()
day_str = datetime.datetime.today().strftime("%A")
time_str = now.strftime("%H:%M:%S")
hour_int = int(now.hour)
#=================================
#ANEEZ
#=================================
#Function to set the program to a user defined date and time
def SetDateToCustom(cus_day, cus_hour, cus_min):
global day_str
global time_str
global hour_int
#Special case handling to display the user defined time properly when it is a single digit
if int(cus_hour) < 10:
cus_hour = "0" + cus_hour
if int(cus_min) < 10:
cus_min = "0" + cus_min
day_str = cus_day
hour_int = cus_hour
time_str = cus_hour + ":" + cus_min + ":00"
hour_int = int(cus_hour)
#=================================
#KIM
#=================================
#Function to end program when user clicks exit
def leavePage():
try:
root.destroy()
exit()
except:
pass
#=================================
#KIM
#=================================
#noodle store GUI
def Noodle(*args):
itemDict = {}#initialization of dictionary to hold store menu information
#commands to retrive an image from the system which will be used to design the various widgets present in the GUI
noodle_back_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/Noodles_main.png")
closed_screen_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/Noodles_close.png")
Noodles_main = ImageTk.PhotoImage(noodle_back_img)
Noodles_close = ImageTk.PhotoImage(closed_screen_img)
#initialization block for the widgets to be displayes to the user. Defines the various widgets and places them at an arbitrary location on the window
background_label = Label(root, image=Noodles_main)
background_label.image = Noodles_main # this is to keep a copy of the image in the file
calcButton = Button(root, text = "Calculate", command = calcWaitTime, image=EnterButton)
waittime_result = ttk.Label(root, textvariable = waitTime)
qEntry = ttk.Entry(root, textvariable = numPeople)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
place_widgets(background_label, calcButton, waittime_result, qEntry, backButton, endProg)
#the conditional statements retrieve store menu information from text files depending on the present or user defined date and time
if hour_int >= 8 and hour_int < 20:
if(day_str == "Monday" or day_str == "Wednesday" or day_str == "Friday"):
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/noodle_mwf.txt", "r")
else:
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/noodle_tt.txt", "r")
else:
noodles_close_label = ttk.Label(root, image= Noodles_close)
noodles_close_label.place(x=0, y=0, relwidth=1, relheight=1)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
backButton.place(x=30, y=400, height=50, width=50)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
endProg.place(x=740, y=20, height =34, width=40)
display_menu(f, itemDict)
root.bind('<Return>', calcWaitTime)#binds the caluclate wait time button to the enter key on the keyboard
#=================================
#JONATHAN
#=================================
#western store GUI
def Western(*args):
itemDict = {}#initialization of dictionary to hold store menu information
#commands to retrive an image from the system which will be used to design the various widgets present in the GUI
western_back_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/Western_main.png")
store_closed_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/Western_close.png")
Western_main = ImageTk.PhotoImage(western_back_img)
Western_close = ImageTk.PhotoImage(store_closed_img)
#initialization block for the widgets to be displayes to the user. Defines the various widgets and places them at an arbitrary location on the window
background_label = Label(root, image=Western_main)
background_label.image = Western_main # this is to keep a copy of the image in the file
calcButton = Button(root, text = "Calculate", command = calcWaitTime, image=EnterButton)
waittime_result = ttk.Label(root, textvariable = waitTime)
qEntry = ttk.Entry(root, textvariable = numPeople)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
place_widgets(background_label, calcButton, waittime_result, qEntry, backButton, endProg)
#the conditional statements retrieve store menu information from text files depending on the present or user defined date and time
if hour_int >= 8 and hour_int < 20:
if(day_str == "Monday" or day_str == "Wednesday" or day_str == "Friday"):
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/western_mwf.txt", "r")
else:
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/western_tt.txt", "r")
else:
Western_close_label = ttk.Label(root, image= Western_close)
Western_close_label.place(x=0, y=0, relwidth=1, relheight=1)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
backButton.place(x=30, y=400, height=50, width=50)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
endProg.place(x=740, y=20, height =34, width=40)
display_menu(f, itemDict)
root.bind('<Return>', calcWaitTime)#binds the caluclate wait time button to the enter key on the keyboard
#=================================
#ANEEZ
#=================================
#Chinese store GUI
def Chinese(*args):
itemDict = {}#initialization of dictionary to hold store menu information
#commands to retrive an image from the system which will be used to design the various widgets present in the GUI
miniwok_back_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/Miniwok_Main.png")
store_closed_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/Miniwok_close.png")
Miniwok_Main = ImageTk.PhotoImage(miniwok_back_img)
Miniwok_close = ImageTk.PhotoImage(store_closed_img)
#initialization block for the widgets to be displayes to the user. Defines the various widgets and places them at an arbitrary location on the window
background_label = Label(root, image=Miniwok_Main)
background_label.image = Miniwok_Main # this is to keep a copy of the image in the file
calcButton = Button(root, text = "Calculate", command = calcWaitTime, image=EnterButton)
waittime_result = ttk.Label(root, textvariable = waitTime)
qEntry = ttk.Entry(root, textvariable = numPeople)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
place_widgets(background_label, calcButton, waittime_result, qEntry, backButton, endProg)
#the conditional statements retrieve store menu information from text files depending on the present or user defined date and time
if hour_int >= 8 and hour_int < 20:
if(day_str == "Monday" or day_str == "Wednesday" or day_str == "Friday"):
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/chinese_mwf.txt", "r")
else:
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/chinese_tt.txt", "r")
else:
Miniwok_close_label = ttk.Label(root, image= Miniwok_close)
Miniwok_close_label.place(x=0, y=0, relwidth=1, relheight=1)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
backButton.place(x=30, y=400, height=50, width=50)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
endProg.place(x=740, y=20, height =34, width=40)
display_menu(f, itemDict)
root.bind('<Return>', calcWaitTime)#binds the caluclate wait time button to the enter key on the keyboard
#=================================
#JONATHAN, ANEEZ
#=================================
#McD store GUI
def McDRegular(*args):
itemDict = {}#initialization of dictionary to hold store menu information
#commands to retrive images from the system which will be used to design the various widgets present in the GUI
mcd_back_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/MCD_main.png")
store_closed_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/MCD_close.png")
MCD_main = ImageTk.PhotoImage(mcd_back_img)
MCD_close = ImageTk.PhotoImage(store_closed_img)
#initialization block for the widgets to be displayes to the user. Defines the various widgets and places them at an arbitrary location on the window
background_label = Label(root, image=MCD_main)
background_label.image = MCD_main # this is to keep a copy of the image in the file
calcButton = Button(root, text = "Calculate", command = calcWaitTime, image=EnterButton)
waittime_result = ttk.Label(root, textvariable = waitTime)
qEntry = ttk.Entry(root, textvariable = numPeople)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
place_widgets(background_label, calcButton, waittime_result, qEntry, backButton, endProg)
#the conditional statements retrieve store menu information from text files depending on the present or user defined date and time
if(hour_int >= 7 and hour_int <= 10):
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/mcd_breakfast.txt", "r")
elif hour_int >= 11 and hour_int < 22:
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/mcd_regular.txt", "r")
else:
MCD_close_label = ttk.Label(root, image= MCD_close)
MCD_close_label.place(x=0, y=0, relwidth=1, relheight=1)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
backButton.place(x=30, y=400, height=50, width=50)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
endProg.place(x=740, y=20, height =34, width=40)
display_menu(f, itemDict)
root.bind('<Return>', calcWaitTime)#binds the caluclate wait time button to the enter key on the keyboard
#=================================
#ANEEZ, KIM
#=================================
#kfc store GUI
def kfcRegular(*args):
itemDict = {}#initialization of dictionary to hold store menu information
global time_str
global day_str
#commands to retrive an image from the system which will be used to design the various widgets present in the GUI
kfc_back_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/KFC_main.png")
store_closed_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/KFC_close.png")
KFC_main = ImageTk.PhotoImage(kfc_back_img)
KFC_close = ImageTk.PhotoImage(store_closed_img)
#initialization block for the widgets to be displayes to the user. Defines the various widgets and places them at an arbitrary location on the window
background_label = Label(root, image=KFC_main)
background_label.image = KFC_main # this is to keep a copy of the image in the file
calcButton = Button(root, text = "Calculate", command = calcWaitTime, image=EnterButton)
waittime_result = ttk.Label(root, textvariable = waitTime)
qEntry = ttk.Entry(root, textvariable = numPeople)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
place_widgets(background_label, calcButton, waittime_result, qEntry, backButton, endProg)
#the conditional statements retrieve store menu information from text files depending on the present or user defined date and time
if(hour_int >= 6 and hour_int <= 10):
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/kfc_breakfast.txt", "r")
elif hour_int >= 11 and hour_int < 22:
f = open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/kfc_regular.txt", "r")
else:
KFC_close_label = ttk.Label(root, image= KFC_close)
KFC_close_label.place(x=0, y=0, relwidth=1, relheight=1)
backButton = Button(root, text = "", command = curStoresPage, image = back_button)
backButton.place(x=30, y=400, height=50, width=50)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
endProg.place(x=740, y=20, height =34, width=40)
display_menu(f, itemDict)
root.bind('<Return>', calcWaitTime)#binds the caluclate wait time button to the enter key on the keyboard
#=================================
#ANEEZ
#=================================
#Defines a page that allows users to input custom date and time preferences
def dateEntryPage(*args):
hourString = StringVar()
minString = StringVar()
#Function to retrieve the custom date and time values from the user
def getDateValues():
try:
day = cal.selection_get()
hour = hourString.get()
minute = minString.get()
SetDateToCustom(day.strftime("%A"), hour, minute)
except ValueError:
pass
global flag
flag = True
today = datetime.date.today()
mindate = datetime.date(today.year, today.month, today.day)#sets minimum date on the calendar as todays date
maxdate = datetime.date(year = 2030, month = 12, day = 30)#sets maximum date on the calendar
#commands to retrive an image from the system which will be used to design the various widgets present in the GUI
selDate_back_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/OtherDates/selectdate.png")
Enter_Button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/OtherDates/Enter_Button.png")
set_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/OtherDates/Set_Button.png")
selDate_Background = ImageTk.PhotoImage(selDate_back_img)
Enter_Button = ImageTk.PhotoImage(Enter_Button_img)
Set_Button = ImageTk.PhotoImage(set_button_img)
s = ttk.Style()
s.theme_use('clam')
#initialization block for the widgets to be displayes to the user. Defines the various widgets and places them at an arbitrary location on the window
customFrame = ttk.Frame(root, padding = "0")
background_label = Label(root, image=selDate_Background)
background_label.image = selDate_Background # this is to keep a copy of the image in the file
enterButton = Button(root, text = "", command = curStoresPage, image = Enter_Button)
setButton = Button(root, text = "Set", command = getDateValues, image = Set_Button)
backButton = Button(root, text = "Go Back", command = mainScreen, image = back_button)
cal = Calendar(root, font="Arial 14", selectmode='day', locale='en_US', mindate=mindate, maxdate=maxdate, disabledforeground='red', cursor="hand1", year=2018, month=2, day=5)
hourEntry = ttk.Combobox(root, values = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23), width = 10, textvariable = hourString, state="readonly")
hourEntry.current(9)#sets the default value of the combobox
minuteEntry = ttk.Combobox(root, values = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59), width = 10, textvariable = minString, state="readonly")
minuteEntry.current(29)#sets default value of combobox
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
#Placement block for the widgets to be displayed to the user. Defines the coordinates on the window at which each widget should be placed
customFrame.place()
background_label.place(x=0, y=0, relwidth=1, relheight=1)
enterButton.place(x=650, y=340, height = 146, width = 128)
setButton.place(x=520, y=340, height=146, width=128)
backButton.place(x=30, y=400, height=50, width=50)
cal.pack(expand=False)
cal.place(x = 250, y = 150)
hourEntry.place(x=250, y=380)
minuteEntry.place(x=420, y=380)
endProg.place(x=740, y=20, height =34, width=40)
#=================================
#KIM
#=================================
def curStoresPage(*args):
global flag
if flag == False:
SetDateToCurr()
else:
pass
#commands to retrieve an image from the system which will be used to design the various widgets present in the GUI
availableStores_back_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/CurrStores/AvailStores.png")
kfc_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/CurrStores/KFC.png")
McD_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/CurrStores/McD.png")
noodles_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/CurrStores/Noodles.png")
western_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/CurrStores/Western.png")
miniwok_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/CurrStores/Miniwok.png")
AvailStores = ImageTk.PhotoImage(availableStores_back_img)
KFC = ImageTk.PhotoImage(kfc_button_img)
McD = ImageTk.PhotoImage(McD_button_img)
Noodles = ImageTk.PhotoImage(noodles_button_img)
PorkChop = ImageTk.PhotoImage(western_button_img)
Miniwok = ImageTk.PhotoImage(miniwok_button_img)
style = ttk.Style()#style tag to define the label style and font color
style.configure("TLabel", foreground = 'black')
background_label = Label(root, image=AvailStores)
background_label.image = AvailStores # this is to keep a copy of the image in the file
store1 = Button(root, text = "KFC", command = kfcRegular, image = KFC)
store2 = Button(root, text = "McDonald's", command = McDRegular, image = McD)
store3 = Button(root, text = "Mini Wok", command = Chinese, image = Miniwok)
store4 = Button(root, text = "Noodle", command = Noodle, image = Noodles)
store5 = Button(root, text = "Western", command = Western, image = PorkChop)
backButton = Button(root, text = "", command = mainScreen, image = back_button)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
#placement block for the widgets to be displayed to the user. Defines the coordinates on the window at which each widget should be placed
background_label.place(x=0, y=0, relwidth=1, relheight=1)
store1.place(x=350,y=175, height=46, width=190)
store2.place(x=350,y=230, height=46, width=190)
store3.place(x=350, y=405, height=46, width=190)
store4.place(x=350, y=290, height=46, width=190)
store5.place(x=350, y=348, height=46, width=190)
backButton.place(x=30, y=400, height=50, width=50)
endProg.place(x=740, y=20, height =34, width=40)
#=================================
#JONATHAN
#=================================
def mainScreen(*args):
global flag
flag = False
style = ttk.Style()#style tag to define the label style and font color
style.configure("TLabel", foreground = 'white')
#commands to retrive an image from the system which will be used to design the various widgets present in the GUI
main_back_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/MainPage/MainBackground.png")
currStores_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/MainPage/CurrentStore.png")
otherDates_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/MainPage/OtherDates.png")
image3 = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/MainPage/black.png")
MainBackground = ImageTk.PhotoImage(main_back_img)
CurrentStore = ImageTk.PhotoImage(currStores_button_img)
OtherDates = ImageTk.PhotoImage(otherDates_button_img)
black_background = ImageTk.PhotoImage(image3)
current_time = time_str
day = day_str
#initialization block for the widgets to be displayes to the user. Defines the various widgets and places them at an arbitrary location on the window
background_label = Label(root, image=MainBackground)
background_label.image = MainBackground # this is to keep a copy of the image in the file
dayLabel = ttk.Label(root, text = day, image = black_background, compound=CENTER)
timeLabel = ttk.Label(root, text = current_time, image = black_background, compound=CENTER)
currDate = Button(root, text = "Current Stores", command = lambda : curStoresPage(), image = CurrentStore)
otherDate = Button(root, text = "View stores by other dates", command = lambda: dateEntryPage(), image = OtherDates)
endProg = Button(root, text = "", command = leavePage, image = CloseButton)
#placement block for the widgets to be displayed to the user. Defines the coordinates on the window at which each widget should be placed
background_label.place(x=0, y=0, relwidth=1, relheight=1)
dayLabel.place(x=100, y=20, height = 30, width = 100)
timeLabel.place(x=400, y=20, height = 30, width = 100)
currDate.place(x=252, y=270, height=54, width=292)
otherDate.place(x=252, y=350, height=54, width=292)
endProg.place(x=740, y=20, height =34, width=40)
root.mainloop()
flag = False #flag variable to check if the store should display the current date and time or the custom date and time
now = datetime.datetime.now()
day_str = datetime.datetime.today().strftime("%A")#Returns the current day as a string
time_str = now.strftime("%H:%M:%S")#Returns the current time as HH:MM:SS
hour_int = int(now.hour)
root = Tk()#intial root window on which information and widgets will be displayed
root.title("NTU North Spine Canteen System")
root.geometry("800x504+325+175")#sets the position of the window on execution of the program
root.maxsize(800,500)#sets the maximum size of the GUI window to prevent resizing
root.minsize(800,500)#sets the minimum size of the GUI window to prevent resizing
waitTime = StringVar()#initializes a string that will store the waiting time at the store
numPeople = StringVar()#initializes a string that will retrieve the number of people in queue at the store
#public declaration and retrieval of images that will be used in every page of the GUI
back_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/back_button.png")
close_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/CloseButton.png")
enter_button_img = Image.open("/Users/aneez.jah/Desktop/Y1S1 Coursework/Introduction to Computational Thinking/Mini Project/NSCanteenSystem/Stores/Enter.png")
back_button = ImageTk.PhotoImage(back_button_img)
CloseButton = ImageTk.PhotoImage(close_button_img)
EnterButton = ImageTk.PhotoImage(enter_button_img)
mainScreen() | [
"noreply@github.com"
] | noreply@github.com |
86175487f19361d5a3638bd32f87a2f980377a3a | 3c29e2e1c8e2bcf962c8b31214f418ad7b1b5d11 | /readAndPlotSingle.py | da5035b21e5d8e674f6f7a6120513e4e23278ac0 | [] | no_license | pbarragan/mechIdent | 55cf7ce7f7d30d9e07367de1240b26fc039ce7ec | 63f18c4af8b5c1498d74ee96d561a5116d7691ce | refs/heads/master | 2021-01-10T20:32:14.278094 | 2014-02-27T04:56:29 | 2014-02-27T04:56:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,307 | py | from matplotlib import pyplot
import numpy
def skip_lines(f,lines):
for i in range(lines):
f.readline()
def get_data(fileName):
f = open(fileName,'r')
#read the initial stuff you need
f.readline()
model = int(f.readline())
skip_lines(f,5)
numStates = int(f.readline())
skip_lines(f,numStates*6+2)
numActions = int(f.readline())
skip_lines(f,numActions+2)
numMPPairs = int(f.readline())
skip_lines(f,numMPPairs*(6+1)+(1+1+1+1)+numStates)
data = []
for i in range(numMPPairs):
data.append([float(f.readline())])
skip_lines(f,3)
numSteps = int(f.readline())
#this has to happen until you get to the end of the file
for i in range(numSteps):
skip_lines(f,11+numStates+1+numMPPairs+1)
for j in range(numMPPairs):
data[j].append(float(f.readline()))
f.close()
print model
print numStates
print numActions
print numMPPairs
print numSteps
print len(data)
print len(data[0])
return data, numSteps, numMPPairs, model
def get_data_w_actions(fileName):
f = open(fileName,'r')
#read the initial stuff you need
f.readline()
model = int(f.readline())
skip_lines(f,5)
numStates = int(f.readline())
skip_lines(f,numStates*6+2)
numActions = int(f.readline())
skip_lines(f,numActions+2)
numMPPairs = int(f.readline())
skip_lines(f,numMPPairs*(6+1)+(1+1+1+1)+numStates)
data = []
for i in range(numMPPairs):
data.append([float(f.readline())])
skip_lines(f,3)
numSteps = int(f.readline())
#this has to happen until you get to the end of the file
for i in range(numSteps):
skip_lines(f,5)
actionString = f.readline()
print actionString
skip_lines(f,5)
skip_lines(f,numStates+1+numMPPairs+1)
for j in range(numMPPairs):
data[j].append(float(f.readline()))
f.close()
#print model
#print numStates
#print numActions
#print numMPPairs
#print numSteps
#print len(data)
#print len(data[0])
return data, numSteps, numMPPairs, model
#main
#just to get the numbers we want
model = 5
asTypes = ['simple','random','entropy']
asNum = 2
trialNum = 1
path = 'data/model'+str(model)+'/'+asTypes[asNum]+'/'
startFileName = path+'data'+str(model)+'_'+str(trialNum)+'.txt'
#startFileName = 'data/videoData/data0.txt'
#setup
startData, nSteps, nMPPairs, m = get_data_w_actions(startFileName)
totalData = numpy.array(startData)
inds = range(nSteps+1)
models = ['Free','Fixed','Rev','Pris','RevPrisL','PrisPrisL']
for i in range(nMPPairs):
pyplot.plot(inds,totalData[i,:],'-o')
pyplot.title(models[m]+' - '+asTypes[asNum])
pyplot.ylabel('Probability')
pyplot.xlabel('Step')
pyplot.legend(models)
pyplot.show()
## data, numSteps, numMPPairs, model = get_data('data5Sun_Sep_15_17_10_07_2013.txt')
## inds = range(numSteps+1)
## models = ['Free','Fixed','Rev','Pris','RevPrisL','PrisPrisL']
## #colors = ['r','g',
## for i in range(numMPPairs):
## pyplot.plot(inds,data[i],'-o')
## pyplot.title(models[model])
## pyplot.ylabel('Probability')
## pyplot.xlabel('Step')
## pyplot.legend(models)
## dataArray = numpy.array(data)
## print dataArray
## print dataArray[5,:]
## pyplot.show()
| [
"patrick.r.barragan@gmail.com"
] | patrick.r.barragan@gmail.com |
56c9a9f7223bf03212b85936315983875c71c2d4 | 292569c7576a0de7d2d7c7653f202b4d1189415e | /main.py | f8a1c3c34ff8441af2a35e613208be241bbe2366 | [] | no_license | zabbidou/Regex-to-epsilonNFA-to-DFA | 38aa3f7c6f22e7bb3c36bae1949fa937d77ab96f | c506e22aa2347ef17abae93fdd5c5a52f0c185f8 | refs/heads/main | 2023-03-17T18:35:56.938344 | 2021-03-08T22:54:38 | 2021-03-08T22:54:38 | 345,816,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | import sys
from nfa import *
from antlr4 import *
from RegexLexer import RegexLexer
from RegexParser import RegexParser
from RegexVisitor import RegexVisitor
input = FileStream(sys.argv[1])
lexer = RegexLexer(input)
stream = CommonTokenStream(lexer)
parser = RegexParser(stream)
tree = parser.regex()
visitor = RegexVisitor()
# construim nfa-ul
nfa = visitor.visit(tree)
# printam nfa-ul
nfa.printToFile(sys.argv[2])
# facem transformarea nfa->dfa
dfa = nfaToDfa(nfa)
# scrierea in fisier
dfa.printDFA(sys.argv[3]) | [
"noreply@github.com"
] | noreply@github.com |
e0827393ee09fc0e40318a821e8d87fb7743ad1d | daca019a9dac198390ae73588496e310f30362f1 | /mapReduce/lab2/ejercicio3/6.py | a743dc76277eb0cac42b6963d03080e96f0c1ff9 | [] | no_license | caladdd/BigData | 9fd9ffeb857f3dc97d03dcfbbd8854840c4f83df | e372ad6c3140a040ad33d7e87dd94ab6932ee5c1 | refs/heads/master | 2020-03-11T04:45:23.996813 | 2018-05-07T04:31:12 | 2018-05-07T04:31:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | from mrjob.job import MRJob
from mrjob.step import MRStep
#import numpy as np
class MejorEval(MRJob):
def steps(self):
return[
MRStep(mapper = self.mapper1,
reducer = self.reducer1),
MRStep(reducer = self.reducer2)
]
def mapper1(self, _, line):
pelicula = line.split(",")
yield pelicula[4], float(pelicula[2])
def reducer1(self, key, values):
acum = 0
cont = 0
for v in values:
acum = acum + v
cont = cont + 1
yield None, (key, acum / cont)
def reducer2(self, key, values):
maxi = 0
key = " "
for v in values:
if maxi < v[1]:
maxi = v[1]
key = v[0]
yield key, maxi
if __name__ == '__main__':
MejorEval.run()
| [
"juanpablo.calad@gmail.com"
] | juanpablo.calad@gmail.com |
775c98aff54bc0e91e9c3be0b4cb8fdbb0cecb40 | a4b38feb9a4502249babc679c38590a017a47d30 | /rohanblog/urls.py | 9c15e04b0ccd08107339ca522ca6acec3be4962d | [] | no_license | rohanNpradhan/Rohan_Blog-Django | 825bc5b57555d857655e14ec75ba62974ef20850 | e97ffc0a68e5b6abe9536cf7b953a99b7a06eb8c | refs/heads/master | 2021-08-10T20:12:07.579942 | 2017-11-12T22:49:51 | 2017-11-12T22:49:51 | 110,465,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | """rohanblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
import posts.views
import sitepages.views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', posts.views.home, name = "home"),
url(r'^posts/(?P<post_id>[0-9]+)/$', posts.views.post_details, name = "post_detail"),
url(r'^about', sitepages.views.about, name = "about"),
] + static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| [
"rohanpradhan@gmail.com"
] | rohanpradhan@gmail.com |
64d45d9b34c9e2d7e84fae07e4afc49f2795317a | 0c9ba4d9c73fb3b4ee972aed0b6d844d8a9546a9 | /TerchaerCode/s13day3课上代码/day3/s1.py | 2b5de365e8984bdb16be174cabf9b4b954ffbc68 | [] | no_license | SesameMing/Python51CTONetwork | d38179122c8daaed83d7889f17e4c3b7d81e8554 | 76169c581245abf2bcd39ed60dc8c9d11698fd3a | refs/heads/master | 2020-04-15T12:47:15.234263 | 2018-06-02T15:04:04 | 2018-06-02T15:04:04 | 65,876,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,765 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Alex Li
# 1、set,无序,不重复序列
# li = [11,222]
# print(li)
# a. 创建
# li = []
# list((11,22,33,4))
# list __init__,内部执行for循环(11,22,33,4) [11,22,33,4]
# 原理,list
# dic = {"k1":123}
# se = {"123", "456"}
# s = set() # 创建空集合
# li = [11,22,11,22]
# s1 = set(li)
# print(s1)
# b. 功能
# set()\
# 创建集合
# s1 = {11,22}
# s2 = set()
# s3 = set([11,22,33,4])
## 操作集合
# s = set()
# print(s)
# s.add(123)
# s.add(123)
# s.add(123)
# print(s)
# s.clear()
# print(s)
# s1 = {11,22,33}
# s2 = {22,33,44}
# s3 = s1.difference(s2)
# A中存在,B中不存在
# s3 = s2.difference(s1)
# s3 = s1.symmetric_difference(s2)
# print(s1)
# print(s2)
# print(s3)
# s1.difference_update(s2)
# s1.symmetric_difference_update(s2)
# print(s1)
# s1 = {11,22,33}
# s1.discard(1111)
# s1.remove(11111)
# ret = s1.pop()
# print(s1)
# print(ret)
# s1 = {11,22,33}
# s2 = {22,33,44}
# s3 = s1.union(s2)
# print(s3)
# s3 = s1.intersection(s2)
# s1.intersection_update(s2)
# print(s3)
"""
s1 = {11,22,33}
s1.add(11)
s1.add(12)
s1.add(13)
# li = [11,22,3,11,2]
# li = (11,22,3,11,2)
li = "alexalex"
s1.update(li)
print(s1)
"""
# li = [11,22,33] # list __init__
# li() # list __call__
# li[0] # list __getitem__
# li[0] = 123 # list __setitem__
# def li[1] # list __delitem__
old_dict = {
"#1": 8,
"#2": 4,
"#4": 2,
}
new_dict = {
"#1": 4,
"#2": 4,
"#3": 2,
}
# old_kyes = old_dict.keys()
# old_set = set(old_kyes)
new_set = set(new_dict.keys())
old_set = set(old_dict.keys())
remove_set = old_set.difference(new_set)
add_set = new_set.difference(old_set)
update_set = old_set.intersection(new_set)
import re
re.match() | [
"admin@v-api.cn"
] | admin@v-api.cn |
aee4fd9a82016258b51396e9fcbb86c222086056 | 21f31e7894bd2620af08ff02b307afbf85551a22 | /DublinBus/DublinBusTest/migrations/0002_line9_lineone.py | 12652ebe993f635299ee5c8a482b41dc4698816b | [
"BSD-2-Clause"
] | permissive | atreanor/PaceApp | f3866993a82617d6b541d6c681523f53c43de2a9 | c7ad6f42d9442d99b48c03e3e6ad883f5d16bba9 | refs/heads/master | 2022-12-18T04:37:27.271179 | 2019-03-24T22:13:59 | 2019-03-24T22:13:59 | 177,114,952 | 0 | 0 | BSD-2-Clause | 2022-12-08T04:53:55 | 2019-03-22T09:52:23 | JavaScript | UTF-8 | Python | false | false | 1,464 | py | # Generated by Django 2.0.6 on 2018-07-10 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('DublinBusTest', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Line9',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('displaystopid', models.TextField(blank=True, null=True)),
('fullname', models.TextField(blank=True, null=True)),
('latitude', models.TextField(blank=True, null=True)),
('longitude', models.TextField(blank=True, null=True)),
],
options={
'db_table': '9',
'managed': False,
},
),
migrations.CreateModel(
name='Lineone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('displaystopid', models.TextField(blank=True, null=True)),
('fullname', models.TextField(blank=True, null=True)),
('latitude', models.TextField(blank=True, null=True)),
('longitude', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'LineOne',
'managed': False,
},
),
]
| [
"alan.treanor@ucdconnect.ie"
] | alan.treanor@ucdconnect.ie |
7010bdcc199ffe691d09e85fcec06ddaf77523bb | d9a839f241f65a6bb1ed0e900d8c12d407478360 | /Python_Practice/primetest.py | d515a92e55f456103d273d03015c9e07bc598f05 | [] | no_license | downstreamfish/Python_Trains | df9ad1ea78076a970a8236624aac2105de9e36e2 | 91b58a9e28eb7d42ab0afb693cc42ff3cfd10dea | refs/heads/master | 2020-03-27T03:09:33.509798 | 2019-01-16T12:48:48 | 2019-01-16T12:48:48 | 145,842,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | for n in range(2, 10):
for x in range(2,n):
if n % x == 0:
print(n, 'equals', x, '*', n/x)
break
else:
print(n, 'is a prime number')
| [
"honle_han1403@outlook.com"
] | honle_han1403@outlook.com |
f1225911a9c8b090f16e2d5cab0de30ef298ea31 | fc41dc76e08232e1115107f99fc004873e225a87 | /project_one/settings.py | 3b93108dc2191c5ddc912eb8a3a67265cda9fc8e | [] | no_license | jomapormentilla/django_project_one | 8206ea9bba728657b3aa1a6233ae58f3b2d3ea72 | 1d318d434b2269ce1c86e40a44d67cdf30a7f925 | refs/heads/main | 2023-06-17T21:17:51.018746 | 2021-07-07T18:43:31 | 2021-07-07T18:43:31 | 383,824,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | """
Django settings for project_one project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-2ew4&a10&onkra@6p=6z)syyk^s(1!d@f*n8(fbv77da6*pr_0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'birthday',
'hello',
'todolist',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_one.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_one.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"jomapormentilla@gmail.com"
] | jomapormentilla@gmail.com |
7ce2d665d3a2088faa37fe2203b59cecafdb1a2f | 620213ce023607fce767bd1cac0bdf9ceed97de9 | /servings/flask/utils/names.py | 1ca87f8600c791434928d9f1449b74b9323f11ff | [] | no_license | yusuf287/DataMigrator | bc7e4edb37ff243a204e69a9d9c6f4cc3443ed14 | 52198e2ee2e67acd425832ab09f5be99a2c3c9a3 | refs/heads/master | 2020-09-06T01:59:40.113394 | 2019-12-02T06:30:49 | 2019-12-02T06:30:49 | 220,280,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | test_case_proto_file_name = "test_cases.proto"
input_output_proto_file_name = "input_output.proto"
test_code_file = "test.py"
input_test_file_name = "testcases.txt" | [
"noreply@github.com"
] | noreply@github.com |
40cbfdfcdcbc0a80e3f1a4e234d9e3147910b169 | 02520432a2d109e3be3d701a76794735e642aa5f | /base_modules/pcam_pcomputing.py | 31547b64c88f4c0b23efe4aeb775807225d319f7 | [] | no_license | ronniejoshua/multi_threading_processing | af1918a810d5e5b1907e81b4364502ee2dda9e24 | 1f9437c4a8599078c6a909cda95a0ede3127fbf5 | refs/heads/master | 2022-12-12T03:51:02.884704 | 2020-08-26T15:42:28 | 2020-08-26T15:42:28 | 281,604,347 | 0 | 0 | null | 2022-12-08T11:14:21 | 2020-07-22T07:16:06 | Python | UTF-8 | Python | false | false | 11,035 | py |
"""
Partitioning
------------
Partitioning, is about breaking down the problem into discreet chunks of work that can be distributed to
multiple tasks. At this beginning stage, we're not concerned with practical issues like the number of
processors in our computer. We'll consider that later. For now, our goal is to simply decompose the problem
at hand into as many small tasks as possible, and there are two basic ways to approach partitioning:
1. Domain Decomposition
2. Functional Decomposition
Domain Decomposition
--------------------
Domain, or data decomposition, focuses on dividing the data associated with the problem into lots of
small and, if possible, equally-sized partitions. The secondary focus is then to consider the computations
to be performed and associating them with that partition data.
Different ways of decomposing data can have different advantages and disadvantages depending on the problem and
hardware involved. Once we've partitioned the data, we can turn our focus towards the processing that needs to
be applied to each section.
Functional Decomposition
------------------------
The other form of decomposition, functional decomposition, provides a different, complimentary way to break down
the problem. Rather than focusing on the data being manipulated, functional decomposition begins by considering
all of the computational work that a program needs to do. And then divides that into separate tasks that perform
different portions of the overall work. The data requirements for those tasks are a secondary consideration.
Note
----
Keep in mind that domain and functional decomposition are complimentary ways to approach a problem. And it's
natural to use a combination of the two. Programmers typically start with domain decomposition because it forms
a foundation for a lot of parallel algorithms. But sometimes taking a functional approach instead can provide
different ways of thinking about these problems. It's worth taking the time to explore alternative perspectives.
It can reveal problems or opportunities for better optimization that would be missed by considering data alone.
by considering data alone.
"""
"""
Communication
-------------
After decomposing the problem into separate tasks, the next step in our design process is to establish communication,
which involves figuring out how to coordinate execution and share data between the tasks.
Some problems can be decomposed in ways that do not require tasks to share data between them.
Although our separate tasks can execute concurrently, They are sometimes no longer completely independent
from each other. In this type of situation, we might establish a network of direct point-to-point communication
links between neighboring tasks. For each link, one task is acting as the sender, or producer of data, and the other
task that needs it is the receiver or consumer.
That type of local point-to-point communication can work when each task only needs to communicate with a small number
of other tasks. - But if your tasks need to communicate with a larger audience, then you should consider other
structures for sharing data between multiple tasks. You might have one task that broadcasts the same data out to all
members of a group or collective, or it scatters different pieces of the data out to each of the members to process.
Afterwards, that task can gather all of the individual results from the members of the group and combine them for a
final output.
When operations require this type of global communication, it's important to consider how it can grow and scale.
Simply establishing point-to-point pairs may not be sufficient. If one task is acting as a centralized manager to
coordinate operations with a group of distributed workers, as the number of workers grow, the communication workload
of a central manager grows too, and may turn it into a bottleneck.
This is where strategies like divide and conquer can be useful. in a way that reduces the burden on any one task.
These are just a few high-level structures to serve as a starting point as you begin to plan the communications
for a parallel program.
A few other factors to consider include whether the communications will be synchronous or asynchronous. Synchronous
communications are sometimes called blocking communications, because all tasks involved have to wait until the
entire communication process is completed to continue doing other work. That can potentially result in tasks spending
a lot of time waiting on communications instead of doing useful work.
Asynchronous communications, on the other hand, are often referred to as non-blocking communications, because after
a task sends an asynchronous message, it can begin doing other work immediately, regardless of when the receiving
task actually gets that message. You should also consider the amount of processing overhead a communication strategy
involves, because the computer cycles spent sending and receiving data are cycles not being spent processing it.
Latency is another factor to consider, the time it takes for a message to travel from point A to B, expressed in
units of time, like microseconds. And bandwidth, which is the amount data that can be communicated per unit of time,
expressed in some unit of bytes per second. Now if you're just writing basic multi-threaded or multi-processed
programs to run on a desktop computer, some of these factors like latency and bandwidth, probably aren't major
concerns, because everything is running on the same physical system.
But as you develop larger programs that distribute their processing across multiple physical systems, those
inter-system communication factors can have a significant impact on the overall performance. can have a significant
impact on the overall performance.
"""
"""
Agglomeration
In the first two stages of our parallel design process, we partitioned a problem into a set of separate tasks and
established communication to provide those tasks with the data they needed. We looked at different ways to decompose
the problem and focused on defining as many small tasks as possible. That approach helped us consider a wide range of
opportunities for parallel execution.
However, the solution it created is not very efficient, especially if there are way more tasks than there are
processors on the target computer. - Now it's time to turn our thinking from abstract to something concrete and
modify that design to execute more efficiently on a specific computer.
In the third agglomeration stage, we'll revisit the decisions we made during the partitioning and communication
stages to consider changes to make our program more efficient, combining some of those tasks and possibly replicating
data or computations.
As a parallel program executes, periods of time spent performing usable computations are usually separated by periods
of communication and synchronization events. The concept of granularity gives us a qualitative measure of the time
spent performing computation over the time spent on communication.
Parallelism can be classified into two categories based on the amount of work performed by each task.
With fine-grained parallelism, a program is broken down into a large number of small tasks. The benefit is that
lots of small tasks can be more evenly distributed among processors to maximize their usage, a concept called
load balancing. The downside is that having lots of tasks increases the overhead for communication and
synchronization, so it has a lower computation-to-communication ratio.
On the other end of the spectrum, coarse-grained parallelism splits the program into a small number of large tasks.
The advantage is that it has a much lower communication overhead, so more time can be spent on computation.
However, the larger chunks of work may produce a load imbalance, where certain tasks process the bulk of data,
while others remain idle.
Those are two extremes and the most efficient solution will be dependent on the algorithm and the hardware on
which it runs. For most general purpose computers, that's usually in the middle with some form of medium-grained
parallelism.
A well-designed parallel program should adapt to changes in the number of processors, so keep flexibility in mind.
Try not to incorporate unnecessary, hard-coded limits on the number of tasks in the program. If possible, use
compiled time or run time parameters to control the granularity.
"""
"""
Mapping
The fourth and final stage of our parallel design process is mapping. And this is where we specify where each
of the tasks we established will actually execute. Now this mapping stage does not apply if you're only using a
single process or system because there's only one place to execute the program or if you're using a system with
automated task scheduling.
So if I'm just writing programs to run on a desktop computer, like the examples we've shown you throughout this
course, mapping isn't even a consideration. The operating system handles scheduling threads to execute on specific
processor cores, so that's out of our hands.
Mapping really becomes a factor if you're using a distributed system or specialized hardware with lots of parallel
processors for large-scale problems, like in scientific computing applications. The usual goal of a mapping algorithm
is to minimize the total execution time of the program, and there are two main strategies to achieve that goal.
You can place tasks that are capable of executing concurrently on different processors to increase the overall
concurrency, or you can focus on placing tasks that communicate with each other frequently on the same processor
to increase locality by keeping them close together.
In some situations, it might be possible to leverage both of those approaches, but more often, they'll conflict
with each other, which means the design will have to make trade-offs. There's a variety of different load-balancing
algorithms that use domain decomposition and agglomeration techniques to map task execution to processors.
If the number of tasks or the amount of computation and communication per task changes as the program executes,
that makes the problem more complex, and it may require dynamic load-balancing techniques that periodically
determine a new mapping strategy.
Designing a good mapping algorithm is highly dependent on both the program structure and the hardware it's running
on, and that gets beyond the scope of this course.
So to summarize the four-step parallel design process, we start by taking a problem and partitioning or decomposing
it into a collection of tasks. Then we evaluate the communication necessary to synchronize and share data between
those tasks. After that, we agglomerate or combine those tasks into groups to increase the program's efficiency with
certain hardware in mind. And then finally, those tasks get mapped to specific processors to actually execute.
""" | [
"ron.juden@gmail.com"
] | ron.juden@gmail.com |
f7b43de393d6f9ff23a62a56c441c6076b7d7d8d | 6609a299691152af507a40c03a36341154447881 | /shopping/migrations/0005_home.py | 8a7dc986084ad59d375e73a1a5dac16ebd8d69e7 | [] | no_license | Princetripathi087/shopping | 6020835f97c325a6ba0a94b4f83e24faa38e7d36 | 158f7a072d20ce87f4628b48d2e0a3163252f7a0 | refs/heads/main | 2023-05-11T10:36:25.143412 | 2021-05-27T12:27:18 | 2021-05-27T12:27:18 | 371,336,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py | # Generated by Django 3.1.2 on 2021-04-21 17:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shopping', '0004_elec'),
]
operations = [
migrations.CreateModel(
name='Home',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Himg', models.ImageField(upload_to='pics')),
('Hname', models.TextField()),
('Hprice', models.IntegerField()),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
ba07f8e90183b19fbf61a59a84b04c542f44f08b | d1b51796e6ccd043155046a7d4e6bb5b93a740f9 | /praticepython/Exercise11.py | c3619d62b8712ab3ae4c6b03894eaae3c3ab6a5a | [] | no_license | leocarlos92/python | ee017532107efb6726263c7cdc23657aac6f9020 | aa59bca3db1e90c1e61394d1bf4a35bd2f54086a | refs/heads/master | 2020-04-24T06:34:49.124655 | 2019-04-11T16:53:56 | 2019-04-11T16:53:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | """
Ask the user for a number and determine whether the number is prime or not.
(For those who have forgotten, a prime number is a number that has no divisors).
You can (and should!) use your answer to Exercise 4 to help you. Take this opportunity to practice using functions,
described below.
Discussion
Concepts for this week:
1. Functions
2. Reusable functions
3. Default arguments
"""
def get_number():
return raw_input("Write a number: ")
def get_divisors_from2(number):
return [num for num in reversed(range(2,number+1)) if number % num == 0 and number > 0]
def is_prime(num_aux):
if len(get_divisors_from2(num_aux)) == 1:
print '{} is a prime number'.format(num_aux)
else:
print '{} is not a prime number'.format(num_aux)
if __name__ == '__main__':
num = get_number()
is_prime(int(num))
| [
"leonarbc@motorola.com"
] | leonarbc@motorola.com |
84514a0a1649c409c1594afb68b66c5957994ab4 | fce86501f046bdf495e52952a026415229a3f512 | /manage.py | 32d39d9ac87d68c9b70ad26d6f7924935ea04c15 | [] | no_license | michaelliriano/tltrealestate | cdfcabcfb2e0c77a61d9c135deee4e1a3f3df4a3 | bcbd6dd16da37317737385b020d0c36ce0130dfa | refs/heads/master | 2022-12-19T09:51:57.498550 | 2020-02-13T19:45:43 | 2020-02-13T19:45:43 | 239,376,842 | 0 | 1 | null | 2022-11-22T05:18:49 | 2020-02-09T21:26:26 | Python | UTF-8 | Python | false | false | 623 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tlt.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"51761089+michaelliriano@users.noreply.github.com"
] | 51761089+michaelliriano@users.noreply.github.com |
9a90cd66c119e2e73b470b6be1c99591426412d7 | 9661a19548c9886beb4965f8b404fc61f0f6831e | /load_murcs/serverProperty_Load.py | 310081e0d192cd180461d576e96ffb96bc28cea9 | [] | no_license | dirkhpe/bv | d3ee2f43ac1cc0b14b38b40417adbd96335db818 | 7725ebc01b3b981897f018a5e81bfd8a62dea11d | refs/heads/master | 2022-01-31T00:30:46.056907 | 2019-06-05T08:30:05 | 2019-06-05T08:30:05 | 107,697,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | """
This script will load server Property information.
"""
import logging
from lib import localstore
from lib import my_env
from lib.murcs import *
from lib import murcsrest
cfg = my_env.init_env("bellavista", __file__)
r = murcsrest.MurcsRest(cfg)
lcl = localstore.sqliteUtils(cfg)
tablename = "serverproperty"
logging.info("Handling table: {t}".format(t=tablename))
records = lcl.get_table(tablename)
my_loop = my_env.LoopInfo("Server Properties", 20)
for trow in records:
my_loop.info_loop()
# Get excel row in dict format
row = dict(trow)
serverId = row.pop("serverId").lower()
payload = dict(
serverId=serverId
)
for k in row:
if row[k] and k not in excludedprops:
if k in fixedprops:
payload[k] = fixedprops[k]
elif k in srv_prop2dict:
payload[srv_prop2dict[k][0]] = {srv_prop2dict[k][1]: row[k]}
else:
payload[k] = row[k]
r.add_server_property(serverId, payload)
cnt = my_loop.end_loop()
logging.info("End Application, {cnt} records processed.".format(cnt=cnt))
| [
"dirk.vermeylen@dxc.com"
] | dirk.vermeylen@dxc.com |
03383d50b28e350662087e4cbccf7252f9345a0a | 44eff1687bd3db0552ebbffc14d7178b0d946988 | /Crawling/Crawling/wsgi.py | e5e321b1486fbec68086438616042c03aeeb0997 | [] | no_license | Hwang-Sungmin/Crawling | 8a129087930798d027a66e30edce1a224114c381 | 04278e47e40165561436b925eef8b841f8009a46 | refs/heads/master | 2020-12-02T13:12:06.762340 | 2020-03-17T05:03:00 | 2020-03-17T05:03:00 | 231,016,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for Crawling project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Crawling.settings')
application = get_wsgi_application()
| [
"hsm511@ajou.ac.kr"
] | hsm511@ajou.ac.kr |
cf56319f264cb7dd6bbfac6b309392fa20ef6ce9 | 54fe1f253fb245deffe162f6cb97291b8fd2a6dd | /sticks.py | 6a5cb9a84db5b475dc5391e83d5e3fd8713065e0 | [] | no_license | qugit-hub/shiyanlou-code | 23186ebb3440a0b8680bf4625580f761403a1bda | bcb36663251b202404f6def70000367ec7b767c2 | refs/heads/master | 2023-02-26T06:59:15.960331 | 2021-02-03T03:00:25 | 2021-02-03T03:00:25 | 330,922,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #!/usr/bin/env python3
sticks=21
print("There are 21 sticks, you can take 1-4 number of sticks at a time.")
print("whoever will take the last stick will lose")
while True:
print("sticks left: ",sticks)
if sticks ==1:
print("You took the last stick, you lose")
break
sticks_taken=int(input("Take sticks(1-4):"))
if sticks_taken >=5 or sticks_taken <=0:
print("Wrong choice")
continue
print("Computer took: ",(5-sticks_taken),"\n")
sticks -=5
| [
"qym_94@163.com"
] | qym_94@163.com |
f748d138a6f55d5fa1f7c4e87defd034f0577a9a | 78bafc8f949612e5c4b984b405cf7955fb5118dd | /python/create_branch.py | f539bea6d0d3355d60c19a276b1d886babbb3357 | [] | no_license | arazmj/gdbnms | 1c1275589a8ebaf9abbfc06ec39e5b1abdcd8edf | 2d0cc4d14ab7ae3572623dacb7e2614def9a88ca | refs/heads/master | 2020-05-04T14:37:56.003499 | 2019-05-01T15:45:10 | 2019-05-01T15:45:10 | 179,205,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | import argparse
from neo4j import GraphDatabase
import sys, random
driver = GraphDatabase.driver("bolt://localhost:7687", auth=("neo4j", "password"))
mobile = 0
client = 0
ip = 0
def rand_mac():
return "%02x:%02x:%02x:%02x:%02x:%02x" % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)
)
def clear_db(tx):
tx.run("MATCH (d) "
"DETACH DELETE d")
def add_friend(tx, name, friend_name):
tx.run("MERGE (a:Person {name: $name}) "
"MERGE (a)-[:KNOWS]->(friend:Person {name: 'a' + $friend_name})",
name=name, friend_name=friend_name)
def print_friends(tx, name):
for record in tx.run("MATCH (a:Person)-[:KNOWS]->(friend) WHERE a.name = $name "
"RETURN friend.name ORDER BY friend.name", name=name):
print(record["friend.name"])
def exec_file(tx, filename, branch):
f = open(filename)
cph = f.read()
cmds = cph.split(';')
for cmd in cmds:
if '$client' in cmd:
global client
client += 1
if '$mobile' in cmd:
global mobile
mobile += 1
if '$ip' in cmd:
global ip
ip += 1
if not cmd.isspace():
print(cmd)
tx.run(cmd, branch=branch, mobile=mobile, client=client, mac=rand_mac(), ip=ip)
parser = argparse.ArgumentParser()
parser.add_argument("number", help="number of branches")
parser.add_argument("common", help="global cypher file for networks")
parser.add_argument("branch", help="branch cypher file, will be replicated based on the number specified")
args = parser.parse_args()
with driver.session() as session:
session.write_transaction(clear_db)
# global file
session.write_transaction(exec_file, args.common, 1)
# branch file
for x in range(1, int(args.number) + 1):
session.write_transaction(exec_file, args.branch, x)
| [
"arazmj"
] | arazmj |
9effecb7ceecacaffbb19eacf43d2dfadefd80cb | 186065405b2d551c8b7c3783cc7d0e41fcbee7f2 | /part2/main-app/reader/main.py | bcceddd96602bc30d0cabb12a452b76030eef346 | [] | no_license | Deninc/kubernetes-course | daa0f41d617269403c2228c29828eb79866534c4 | aad816baa95e2158e465682b3165eb12adac45b9 | refs/heads/master | 2023-02-08T06:01:41.396120 | 2021-01-02T20:26:34 | 2021-01-02T20:26:34 | 309,653,962 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | import os
import hashlib
import urllib.request
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
app = FastAPI()
@app.get("/", response_class=HTMLResponse)
def read_root():
with open("/tmp/random_string.txt") as f:
s = f.readline().rstrip()
c = urllib.request.urlopen("http://pong-svc:1234/count").read()
h = hashlib.sha224(s.encode("utf-8")).hexdigest()
return f"""
<html>
<head>
<title>Main app</title>
</head>
<body>
<p>{os.environ["MESSAGE"]}</p>
<p>{s} {h}</p>
<p>Ping / Pongs: {c}</p>
</body>
</html>
"""
| [
"thang.nguyen@cloudasset.com"
] | thang.nguyen@cloudasset.com |
9878b2c84cacca4dd1e3d8247bee2f35e08bf723 | a16f09908c251f4fc162b28e8dbeb9d2017ab408 | /homepage/migrations/0005_alter_ticketmodel_status.py | c6a248ab9acf6eedc0feedb0c9be79adbf60b7c9 | [] | no_license | maglin07/BugTrackerApp | 95b5ec93d69777ed89d13a8665d50d701cb62694 | da6b257a3d00aa4abb344c76ec339349e226d23b | refs/heads/main | 2023-06-18T12:02:53.483915 | 2021-07-21T07:06:31 | 2021-07-21T07:06:31 | 388,016,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # Generated by Django 3.2.3 on 2021-05-25 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('homepage', '0004_alter_ticketmodel_status'),
]
operations = [
migrations.AlterField(
model_name='ticketmodel',
name='status',
field=models.CharField(choices=[('N', 'New'), ('P', 'In Progress'), ('D', 'Done'), ('I', 'Invalid')], default='N', max_length=2),
),
]
| [
"linmag138@outlook.com"
] | linmag138@outlook.com |
10a36d8ba182d5cfeb3359dcf0af11c05abaad56 | b4e2977a962ba91d32fe258b9de65b0d4182571f | /partytxt/controllers/voice.py | 30ae8f2cdc3ca5d3274a40c5ac7de6126d9b6a7c | [] | no_license | thedangler/smshub | 3266f7677c16324078a68df43f2854c0e353d58f | e808ebfc5725a9d080efe020289ddd17c4316580 | refs/heads/master | 2021-01-09T20:29:37.722295 | 2016-07-06T13:51:27 | 2016-07-06T13:51:27 | 61,894,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,318 | py | __author__ = 'mhebel'
from flask.ext.sqlalchemy import SQLAlchemy
from flask import Blueprint, render_template, flash, request, redirect, url_for,session
from flask import current_app
from twilio.rest import TwilioRestClient
from partytxt.models import User, People, SMSLog, VoiceMailBox,VoiceStats
import re, os, time, requests
import twilio.twiml
## Ask to record a message, listen to new messages, popular messages
## New messages are recordings not her by you
## Popular messages are new recordings that are up voted
## TODO add important voice messages that alert group memebers of the voicemail
db = SQLAlchemy()
voice = Blueprint('voice', __name__)
@voice.route('/voice_menu',methods=['GET','POST'])
def voice_menu():
resp = twilio.twiml.Response()
from_number = request.values.get('From',None)
call_sid = request.values.get("CallSid")
caller = People.query.filter(People.number == from_number).first()
if caller is None:
resp.reject()
return str(resp)
#initial voice mail - if hang up occurs we have a log
voice_mail = VoiceMailBox(caller.id,0,None,call_sid)
db.session.add(voice_mail)
log = SMSLog(from_=from_number,body="voice mail",name=caller.name,murls=None,type ='voice',sid = call_sid)
db.session.add(log)
db.session.commit()
resp.say("Hello " + caller.name)
with resp.gather(numDigits=1,action="/handle-key",methods="POST") as g:
g.say("Press 1 to record a 15 second message. Press 2 to listen to new messages. Press 3 to listen to top 3 messages.")
return str(resp)
@voice.route('/handle-key',methods=['GET','POST'])
def key_handler():
digit_pressed = request.values.get("Digits")
if digit_pressed == "1":
resp = twilio.twiml.Response()
resp.say("Record your message after the beep, press any key to end recording")
resp.record(maxLength="15",action='/handle-recording')
resp.gather().say("A message was not received,press any key to try again") # might need to change to press 1 or redirect
return str(resp)
elif digit_pressed == "2" or digit_pressed == "3":
resp = twilio.twiml.Response()
resp.redirect('/listen?message_type='+digit_pressed)
return str(resp)
else:
return redirect('/voice_menu')
@voice.route('/handle-recording',methods=['GET','POST'])
def record_message():
audio_url = request.values.get('RecordingUrl',None)
from_number = request.values.get('From',None)
call_sid = request.values.get("CallSid")
#could combine these queries
caller = People.query.filter(People.number == from_number).first()
vm = db.session.query(VoiceMailBox).filter(VoiceMailBox.call_sid == call_sid).first()
vm.audio_url = audio_url
vm.status = 1 # have a url
log = db.session.query(SMSLog).filter(SMSLog.sid == call_sid).first()
log.media_urls = audio_url
db.session.commit()
resp = twilio.twiml.Response()
resp.say("Message recorded")
resp.say("Bye")
resp.hangup()
#download mp3
mp3 = request.values.get("CallSid")+".mp3"
file_name = "%s/partytxt/static/voicemail/%s" %(os.getcwd(),mp3)
try:
f = open(file_name,'wb')
f.write(requests.get(audio_url+".mp3").content)
f.close()
## clean up recording
## only delete if download success use hooks?
recording_sid = audio_url.split("/Recordings/")[1]
client = TwilioRestClient(current_app.config['TWILIO_SID'],current_app.config['TWILIO_TOKEN'])
#client.recordings.delete(recording_sid) ## put this in a routine
except requests.exceptions.RequestException as e:
pass
return str(resp)
@voice.route('/listen',methods=['GET','POST'])
def listen():
current_message = request.values.get('current_msg',0)
from_number = request.values.get('From',None)
first = current_message == 0
digit = request.values.get('Digits',None)
#all the messages not listened by me
#make archive later
person = People.query.filter(People.number == from_number).first()
if person is None:
resp = twilio.twiml.Response()
resp.say("Unknown person")
resp.hangup()
return str(resp)
if request.values.get('message_type') == 3:
msgs = db.session.query(VoiceMailBox).join(VoiceStats).filter(VoiceStats.listened_by != person.name, VoiceMailBox.people_id != person.id).order_by(VoiceMailBox.vote_count.desc()).limit(3).all() # change query to get the top 3 popular.
else:
#don't listen to voice mails i made and ones I've already heard
msgs = db.session.query(VoiceMailBox).join(VoiceStats).filter(VoiceStats.listened_by != person.name, VoiceMailBox.people_id != person.id).all()
if current_message > len(msgs):
resp = twilio.twiml.Response()
resp.say("No more messages")
resp.say("Bye!")
resp.hangup()
return str(resp)
message = msgs[current_message]
## process digits
if digit is not None:
if digit == 1:
pass # skip
elif digit == 2:
#mark give up vote
## bug - can upvote multiple messages
message.vote_count = message.vote_count + 1
db.session.commit()
current_message = current_message + 1
resp = twilio.twiml.Response()
with resp.gather(numDigits = 1,action='/listen?current_msg='+str(current_message)+'&message_type='+request.values.get('message_type'),timeout=5) as g:
if first:
if request.values.get('message_type') == 2:
g.say("Press 1 to skip, 2 to up vote")
else:
g.say("Press 1 to skip")
else:
g.say("Next Message")
g.say(person.name+" left this beauty.")
if 'http' in message.audio_url or 'https' in message.audio_url:
g.play(message.audio_url)
else:
g.play(url_for('static',filename='voicemail/'+message.call_sid+'.mp3',_external=True))
#mark as played
#plays next message
Vs = VoiceStats(message.id,person.name)
db.session.add(Vs)
db.session.commit()
resp.redirect("/listen?current_message="+str(current_message)+"&Digits=1&message_type="+request.values.get('message_type'))
return str(resp)
| [
"matthew.hebel@schulich.uwo.ca"
] | matthew.hebel@schulich.uwo.ca |
407627485b2c556513409348892b4e55a9892ce0 | 1ecfacf515059041f6a90c486248d597934185b8 | /zadatak1.py | 139f1004092768ddfb1b46a3ec878e0e03fe0824 | [] | no_license | StefanRojovic/Programskoinzenjerstvo | c684e2bf0c7b708465bdc68fb05629811fa7c5df | e3bb2a9b5b45474729b088818eacf40d1b978b7e | refs/heads/master | 2021-04-24T05:20:23.963261 | 2020-06-03T20:46:19 | 2020-06-03T20:46:19 | 250,084,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | def main():
pass
if __name__ == '__main__':
main()
class Razlomak(object):
def __init__(self,brojnik,nazivnik):
self._brojnik = brojnik
self._nazivnik = nazivnik
@property
def brojnik(self):
return self._brojnik
@brojnik.setter
def brojnik(self,value):
self._brojnik = value
@property
def nazivnik(self):
return self._nazivnik
@nazivnik.setter
def nazivnik(self,value):
self.nazivnik = value
def skrati(self):
nd = 1
for i in range (1,self.brojnik+1):
if(self.brojnik % i ==0 and self.nazivnik % i ==0):
nd = i
i = i+1
return self.brojnik / nd and self.nazivnik / nd
def __repr__(self):
return "Razlomak("+repr(self._brojnik) + ',' + repr(self._nazivnik)+")"
def __str__(self):
return 'Razlomak' + str(self._brojnik)+ '|' +str(self._nazivnik)
def __eq__(self,other):
return self.brojnik/self.nazivnik == other.brojnik/other.nazivnik
def __lt__(self,other):
return self.brojnik/self.nazivnik < other.brojnik/other.nazivnik
def __le__(self,other):
return self.brojnik/self.nazivnik <= other.brojnik/other.nazivnik
def __mul__(self,other):
return Razlomak(self.brojnik*other.brojnik,self.nazivnik*other.nazivnik)
def __mod__(self,other):
return Razlomak(self.brojnik*other.nazivnik,self.nazivnik*other.brojnik)
def __add__(self,other):
a = self.nazivnik
b = other.nazivnik
nv = 1
if (a<b):
nv = a
if (b%a ==0):
b=b/a
else:
b = b
nv = a*b
else:
nv = b
if (a%b==0):
a = a/b
else:
a = a
nv = a*b
return Razlomak(nv/self.nazivnik*self.brojnik+nv/other.nazivnik*other.brojnik,nv)
def __sub__(self,other):
a=self.nazivnik
b = other.nazivnik
nv = 1
if (a<b):
nv = a
if (b%a==0):
b = b/a
else:
b=b
nv = a*b
else:
nv = b
if (a%b==0):
a = a/b
else:
a=a
nv = a*b
return Razlomak(nv/self.nazivnik*self.brojnik-nv/other.nazivnik*other.brojnik,nv)
| [
"noreply@github.com"
] | noreply@github.com |
5acdfe52eb25d3cd6de5b1bea421707d7b5ba3cd | 0630a7addb90600293f0ee0787dd6ab0ac77b09a | /LinkedList/FindDuplicatesInArray.py | 35abfd91e5fb529b564d57ba99c6e779fb8a5c5a | [
"MIT"
] | permissive | aritraaaa/Competitive_Programming | 48ecd3b6e28549889160c04cdbd19a5ad06fa49b | ee7eadf51939a360d0b004d787ebabda583e92f0 | refs/heads/master | 2023-06-12T07:04:07.698239 | 2021-07-01T11:11:24 | 2021-07-01T11:11:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | class Solution:
# @param A : tuple of integers
# @return an integer
def repeatedNumber(self, A):
'''
# This is the marker concept, by list modification
A = list(A)
if len(A) <= 1:
return -1
for i in range(len(A)):
num = abs(A[i])
if A[num]>=0:
A[num] = -1 * A[num] # marking negative means already visited A[i]
else:
return num
return -1
'''
# This is modified Floyd Warshall concept, cycle in linked list type
# https://medium.com/solvingalgo/solving-algorithmic-problems-find-a-duplicate-in-an-array-3d9edad5ad41
slow, fast = A[0], A[A[0]]
while slow!=fast:
slow = A[slow]
fast = A[A[fast]]
# the slow and fast pointers are at the same point now, i.e start point of cycle
slow = 0
while slow!=fast:
slow = A[slow]
fast = A[fast]
if slow == 0:
return -1
else:
return slow
| [
"amitrajitbose9@gmail.com"
] | amitrajitbose9@gmail.com |
4f82adf734cf7b91f9e7205d76ded8c1fe3b8db5 | 06c8afa94c045afb016e1dcdfe56ce6bc4c943d9 | /work_test/winpexpect_test.py | cefffa729d35c2e36462ef28e8bdd4dcff66e2ca | [] | no_license | Shadow-of-Diamond/my_python | 4512cc9a34fc59dc1cc45f696f7195b86d558590 | 5092100312cc299451309ec6c6b8faab1acee691 | refs/heads/master | 2020-07-07T06:14:42.042225 | 2019-08-26T07:53:32 | 2019-08-26T07:53:32 | 121,462,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | from winpexpect import winspawn
#ssh = winspawn('ssh',['-tty','xiezhenkun@192.168.1.10'])
ssh = winspawn('ssh -tt xiezhenkun@192.168.1.10')
ssh.logfile = sys.stdout
i = ssh.expect(['word:'], timeout = 5)
print (i)
ssh.sendline('xzK3.............') | [
"xzk1072503@gmail.com"
] | xzk1072503@gmail.com |
43406fad42fdb45d72fa3cbfab83dde3b7796090 | 4569d707a4942d3451f3bbcfebaa8011cc5a128d | /tracmilemixviewplugin/0.10/mmv/tests/test_rt.py | 6c8f860eb1a9198e639b85f4ace6ffe292f7992d | [] | no_license | woochica/trachacks | 28749b924c897747faa411876a3739edaed4cff4 | 4fcd4aeba81d734654f5d9ec524218b91d54a0e1 | refs/heads/master | 2021-05-30T02:27:50.209657 | 2013-05-24T17:31:23 | 2013-05-24T17:31:23 | 13,418,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | #from trac.core import *
#from trac.test import EnvironmentStub, Mock
#from trac.env import Environment
#from trac.core import ComponentManager, ComponentMeta
#
#import tickettemplate.ttadmin as ttadmin
#
#import os.path
#import tempfile
#import shutil
#
#import unittest
#
#class TicketTemplateTestCase(unittest.TestCase):
#
# def setUp(self):
## self.env = EnvironmentStub()
#
# env_path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
# self.env = Environment(env_path, create=True)
# self.db = self.env.get_db_cnx()
#
# self.compmgr = ComponentManager()
#
# # init TicketTemplateModule
# self.tt = ttadmin.TicketTemplateModule(self.compmgr)
# setattr(self.tt, "env", self.env)
#
# def tearDown(self):
# self.db.close()
# self.env.shutdown() # really closes the db connections
# shutil.rmtree(self.env.path)
#
# def test_get_active_navigation_item(self):
# req = Mock(path_info='/tickettemplate')
# self.assertEqual('tickettemplate', self.tt.get_active_navigation_item(req))
#
# req = Mock(path_info='/something')
# self.assertNotEqual('tickettemplate', self.tt.match_request(req))
#
# def test_get_navigation_items(self):
# req = Mock(href=Mock(tickettemplate=lambda:"/trac-tempenv/tickettemplate"))
# a, b, c= self.tt.get_navigation_items(req).next()
# self.assertEqual('mainnav', a)
# self.assertEqual('tickettemplate', b)
#
# def test_match_request(self):
# req = Mock(path_info='/tickettemplate')
# self.assertEqual(True, self.tt.match_request(req))
#
# req = Mock(path_info='/something')
# self.assertEqual(False, self.tt.match_request(req))
#
# def test_getTicketTypeNames(self):
# options = self.tt._getTicketTypeNames()
# self.assertEqual(["default", "defect", "enhancement", "task"], options)
#
# def test_loadSaveTemplateText(self):
# for tt_name, tt_text in [("default", "default text"),
# ("defect", "defect text"),
# ("enhancement", "enhancement text"),
# ("task", "task text"),
# ]:
# self.tt._saveTemplateText(tt_name, tt_text)
# self.assertEqual(tt_name + " text", self.tt._loadTemplateText(tt_name))
#
#def suite():
# suite = unittest.TestSuite()
# suite.addTest(unittest.makeSuite(TicketTemplateTestCase, 'test'))
# return suite
#
#if __name__ == '__main__':
# unittest.main()
| [
"richard@7322e99d-02ea-0310-aa39-e9a107903beb"
] | richard@7322e99d-02ea-0310-aa39-e9a107903beb |
eb37fe94306a9b07ea18d96085975147265fa95d | 1d26bf859705d3d6193fc3cc9da28d9a3af0de8e | /core_jobs/apps/jobs/search_indexes.py | 18fbeb56c7da36a46a1a6f8f1a405a65d2340baf | [] | no_license | tanay46/core_jobs | 3d8fe637127c6a4a4923b07ba4235858ee1b40ba | cbf0cf074332f0dd5abc4d9145d47eb989bcd577 | refs/heads/master | 2020-04-05T23:17:19.425885 | 2012-01-23T08:48:10 | 2012-01-23T08:48:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | import datetime
from haystack import indexes
from apps.jobs.models import *
class PostIndex(indexes.RealTimeSearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='user')
modified_on = indexes.DateTimeField(model_attr='modified_on')
def get_model(self):
return Post
def index_queryset(self):
"""Used when the entire index for model is updated."""
return self.get_model().objects.filter(modified_on__lte=datetime.datetime.now()) | [
"shindonghee@gmail.com"
] | shindonghee@gmail.com |
e31ee367fc6802635bca02f0078aae7a1c53faf9 | cc2d37a71eac8422b0722533766b3ee95b5b5d1a | /data_import/zmmutypes.py | 4480cded4f86f0e6a1b75637c6e882db37268004 | [] | no_license | JelleZijlstra/taxonomy | 88018a1a0ec114875c45cf87ffc825957fc3e870 | 56aac782e2cbbd084a14d2ad1b1572729ba387be | refs/heads/master | 2023-08-17T03:20:13.576928 | 2023-08-07T00:47:41 | 2023-08-07T00:47:41 | 10,962,492 | 0 | 0 | null | 2023-08-17T04:53:54 | 2013-06-26T08:21:35 | Python | UTF-8 | Python | false | false | 12,168 | py | import copy
import re
import unicodedata
from collections.abc import Iterable
from typing import Any
from taxonomy.db import constants, models
from . import lib
from .lib import DataT, PagesT
SOURCE = lib.Source("zmmutypes.txt", "ZMMU-types.pdf")
ORDER = "Отряд"
SECTION_INTRO = ("ЛЕКТОТИП", "ГОЛОТИП", "ПАРАТИП", "СИНТИП", "ПАРАЛЕКТОТИП", "НЕОТИП")
LABEL = (
"МЕСТО СБОРА",
"L OCALITY",
"LOCALITY",
"ДАТА",
"КОММЕНТАРИЙ",
"COMMENT",
"ПРИМЕЧАНИЕ",
"NOTICE",
"КОММЕНТАРИИ",
"ПРИМЕЧАНИЯ",
)
NAME_LINE = re.compile(
r"""
^(?P<original_name>[A-Z]\.?\[?[a-z]+\]?(\s\([A-Z][a-z]+\))?(\s[a-z\-\.\[\]]+)+)\s
(«[^»]+»\s)?
(?P<latin_authority>[A-Z].+?),\s(?P<year>\d{4})\s
\((?P<cyrillic_authority>[^:]+?),\s(?P<ref_year>\d{4}[а-яa-z]?):\s(?P<page_described>[^\)]+)\)\.?
$
""",
re.VERBOSE,
)
KEY_TO_KIND = {
"ЛЕКТОТИП": constants.SpeciesGroupType.lectotype,
"ГОЛОТИП": constants.SpeciesGroupType.holotype,
"НЕОТИП": constants.SpeciesGroupType.neotype,
}
def make_translation_table() -> dict[str, str]:
out = {}
with open("data_import/data/zmmu-transcribe.txt") as f:
for line in f:
line = unicodedata.normalize("NFC", line.strip())
if " " in line:
cyrillic, transcribed = line.split()
out[transcribed] = cyrillic
return out
def translate_chars(lines: Iterable[str]) -> Iterable[str]:
table = {ord(a): ord(b) for a, b in make_translation_table().items()}
for line in lines:
yield line.translate(table)
def extract_pages(lines: Iterable[str]) -> Iterable[list[str]]:
"""Split the text into pages."""
current_lines: list[str] = []
for line in lines:
if line.startswith("\x0c"):
yield current_lines
current_lines = []
line = line[1:]
current_lines.append(line)
yield current_lines
def label_pages(pages: Iterable[list[str]]) -> PagesT:
for i, lines in enumerate(pages):
if i < 164 or i > 240:
continue # Before mammal section
for i in range(1, len(lines) + 1):
if re.match(r"^\s+\d+\s+$", lines[-i]):
page_number = int(lines[-i].strip())
break
yield page_number, lines[:-i]
def align_columns(pages: PagesT) -> PagesT:
for page, lines in pages:
lines = lib.dedent_lines(lines)
try:
lines = lib.split_lines(lines, page, min_column=15, dedent_right=False)
except lib.NoSplitFound:
# Separately split the parts before and after the "Order ..." line.
lineno = 0
for i, line in enumerate(lines):
if line.lstrip().startswith(ORDER):
lineno = i
break
else:
assert False, f"could not find order in {page}"
before = lines[:lineno]
after = lines[lineno + 1 :]
lines = lib.split_lines(before, page) + lib.split_lines(after, page)
yield page, lines
def extract_names(pages: PagesT) -> DataT:
current_name: dict[str, Any] = {}
current_section: dict[str, Any] = {}
current_lines: list[str] = []
current_label = ""
def start_label(label: str, line: str) -> None:
nonlocal current_lines, current_label
assert current_name, f"cannot start {label} with {line!r} on an empty name"
if current_section:
container = current_section
else:
container = current_name
assert label not in container, f"duplicate label {label} for {container}"
current_lines = [line]
container[label] = current_lines
current_label = label
def start_section(label: str, line: str) -> None:
nonlocal current_section
if label in current_name:
section_label = line
else:
section_label = label
# This one is repeated in the source, apparently by mistake.
if section_label != "ПАРАТИП S-32814 Пол: ? Шкура в полной со-":
assert (
section_label not in current_name
), f"duplicate label {section_label} for {current_name}"
current_section = {"label": section_label}
current_name[section_label] = current_section
start_label(label, line)
for page, lines in pages:
for line in lines:
line = line.rstrip()
if not line:
continue
if current_section or not current_name:
if lib.initial_count(line, " ") > 3:
continue
if current_label == "ДАТА" and re.search(r"[a-z], \d{4}\)?$", line):
continue
if re.match(r"^†?[a-z]+$", line):
if current_name:
yield current_name
current_name = {"pages": [page], "root_name": line}
current_section = {}
current_label = ""
elif "name_line" not in current_name:
start_label("name_line", line)
elif line.startswith(SECTION_INTRO):
start_section(line.split()[0], line)
elif line.startswith(LABEL):
for label in LABEL:
if line.startswith(label):
start_label(label, line)
else:
current_lines.append(line)
if page == 228:
break # start of references
yield current_name
def extract_references(pages: PagesT) -> Iterable[list[str]]:
current_lines = []
for _, lines in pages:
for line in lines:
if line.strip() == "ЛИТЕРАТУРА" or not line.strip():
continue
if line.startswith(" "):
current_lines.append(line)
else:
if current_lines:
yield current_lines
current_lines = [line]
yield current_lines
def make_references_dict(refs: Iterable[list[str]]) -> dict[tuple[str, str], str]:
out = {}
for ref in refs:
text = lib.clean_line_list(ref)
if text == "The Times' Atlas of the World, 7th ed. London: Times Books, 1986.":
continue
match = re.match(r"^([^\d]+)(\d{4}(-\d+)?[^\.]?)\.", text)
assert match, text
year = match.group(2)
authors = match.group(1)
authority = ", ".join(a.split()[0] for a in authors.split(", ") if a)
out[(authority, year)] = text
return out
def handle_specimen(data: dict[str, Any]) -> dict[str, Any]:
detail = data[data["label"].split()[0]]
match = re.match(r"^(\(\?\) )?(S-\d+) Пол: (\??m\.|f\.|\?,?) (.*)$", detail)
if not match:
print(detail)
else:
data["type_specimen"] = f"ZMMU {match.group(2)}"
data["gender_value"] = {
"?m.": constants.SpecimenGender.male,
"m.": constants.SpecimenGender.male,
"f.": constants.SpecimenGender.female,
"?": constants.SpecimenGender.unknown,
"?,": constants.SpecimenGender.unknown,
}[match.group(3)]
rest = match.group(4)
if "ювенильный" in rest:
data["age"] = constants.SpecimenAge.juvenile
data["body_parts"] = rest
for label in ("LOCALITY", "L OCALITY"):
if label in data:
value = data[label]
data["loc"] = value
country = value.split()[-1].strip("«»[].")
country = lib.NAME_SYNONYMS.get(country, country)
try:
data["type_locality"] = models.Region.get(
models.Region.name == country
).get_location()
except models.Region.DoesNotExist:
pass
date_coll = data["ДАТА"]
try:
date, collector = date_coll.split(" КОЛЛ.: ", maxsplit=1)
except ValueError:
print(date_coll)
else:
if date != "?":
data["date"] = date.rstrip(".")
if collector != "?":
data["collector"] = collector
return data
def split_fields(names: DataT, refs_dict: dict[tuple[str, str], str]) -> DataT:
for name in names:
name["raw_text"] = copy.deepcopy(name)
match = NAME_LINE.match(name["name_line"].replace(" [sic!]", ""))
if not match:
assert False, f'failed to match {name["name_line"]}'
else:
name.update(match.groupdict())
name["authority"] = name["latin_authority"]
name["original_name"] = re.sub(
r"([a-zA-Z])\.\[([a-z]+)\] ", r"\1\2 ", name["original_name"]
)
refs_key = (name["cyrillic_authority"], name["ref_year"])
if refs_key in refs_dict:
name["verbatim_citation"] = refs_dict[refs_key]
paratypes = []
paralectotypes = []
syntypes = []
for key, value in list(name.items()):
if key != "raw_text" and isinstance(value, dict):
value = handle_specimen(value)
if key.startswith("ПАРАТИП"):
paratypes.append(value)
del name[key]
elif key.startswith("СИНТИП"):
syntypes.append(value)
del name[key]
elif key.startswith("ПАРАЛЕКТОТИП"):
paralectotypes.append(value)
del name[key]
elif key in KEY_TO_KIND:
name["species_type_kind"] = KEY_TO_KIND[key]
for subkey, subval in value.items():
if re.match(r"^[a-z_]+$", subkey):
name[subkey] = subval
if paratypes:
name["paratypes"] = paratypes
if paralectotypes:
name["paralectotypes"] = paralectotypes
if syntypes:
name["syntypes"] = syntypes
name["species_type_kind"] = constants.SpeciesGroupType.syntypes
yield name
def main() -> DataT:
lines = lib.get_text(SOURCE)
lines = translate_chars(lines)
unlabeled_pages = extract_pages(lines)
pages = label_pages(unlabeled_pages)
pages = lib.validate_pages(pages, verbose=False)
pages = align_columns(pages)
names: DataT = list(extract_names(pages))
refs = extract_references(pages)
refs_dict = make_references_dict(refs)
names = lib.clean_text(names)
names = split_fields(names, refs_dict)
names = lib.translate_to_db(names, "ZMMU", SOURCE, verbose=False)
conf = lib.NameConfig(
original_name_fixes={
"Neomys fodiens brachyotis": "Neomys fodiens brachyotus",
"Lepus mandshuricus sbph. melanotus": (
"Lepus mandschuricus subphasa melanonotus"
),
"Lepus timidus transbaikalensis": "Lepus timidus transbaicalicus",
"Citellus (Urocitellus) eversmanni incertedens": (
"Citellus (Urocitellus) eversmanni intercedens"
),
"Gulo gulo camtshaticus": "Gulo gulo kamtschaticus",
"A.[lticola] a.[rgentatus] tarasovi": "Alticola argentatus tarasovi",
"Microtus oeconomus": "Microtus oeconomus naumovi",
"Myotis emarginatus turcomanus": "Myotis emarginatus turcomanicus",
},
authority_fixes={
"Vorontsov & Boyeskorov et al.": "Vorontsov, Boyeskorov & Mezhzherin",
"Lavrenchenko, Likhnova, Baskevich & Bekele": (
"Lavrenchenko, Likhnova & Baskevich"
),
"Vorontsov, Boyeskorov & Lyapunova et al.": (
"Vorontsov, Boyeskorov, Lyapunova & Revin"
),
},
)
names = lib.associate_names(names, conf, max_distance=2)
names = lib.write_to_db(names, SOURCE, dry_run=False)
lib.print_field_counts(names)
return names
if __name__ == "__main__":
for p in main():
print(p)
| [
"jelle.zijlstra@gmail.com"
] | jelle.zijlstra@gmail.com |
d08c5b7f46e7c4c45e0f33e24570caf885631458 | b85bea9d8c4f64fecc7cce70c211e7c1f5fd5da2 | /2_frequent_item/preprocess.py | 97c2199fa4fe0b1ab481e9d629bd49e693a20454 | [] | no_license | alvinzhou66/data_mining_assignments | c27cc3433317baff3105cdd7d7737594160d99e3 | 2e175934e54b76c703e3c52bd438d2dc032d2273 | refs/heads/master | 2022-11-15T15:14:57.722238 | 2020-07-06T00:54:35 | 2020-07-06T00:54:35 | 277,409,292 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | #%%
import csv
import json
from pyspark import SparkContext
input_review_file_path = "data/review.json"
input_business_file_path = "data/business.json"
output_file_path = "data/user_business.csv"
sc = SparkContext.getOrCreate()
business = sc.textFile(input_business_file_path).map(lambda x: json.loads(x)).map(lambda x: (x['business_id'], x['state']))
business_id = business.filter(lambda x: x[1]=='NV').map(lambda x: x[0]).collect()
review = sc.textFile(input_review_file_path).map(lambda x: json.loads(x)).map(lambda x: (x['user_id'], x['business_id']))
user_id = review.filter(lambda x: x[1] in business_id).collect()
with open(output_file_path, 'w+', newline='') as output_file:
output_line = csv.writer(output_file)
output_line.writerow(['user_id', 'business_id'])
for line in user_id:
output_line.writerow(line)
output_file.close()
# %%
| [
"noreply@github.com"
] | noreply@github.com |
806b799a68a8f630d0d3c583863156a6fb5790ed | 98d07f95e07c3e76d356bd50e062602ce00dceb4 | /email_blast/urls.py | 05a2bd404b76b8eb4754eb59b8e091b9235ff2fc | [
"MIT"
] | permissive | todaatsushi/hhts_website | 23b7aee4e76878503459be334f6b6ef635dfd6f0 | 5ae44056d4cb577eab2e5a0525a330b3ac19e122 | refs/heads/master | 2022-04-29T04:55:25.569632 | 2019-08-12T16:52:27 | 2019-08-12T16:52:27 | 178,904,398 | 0 | 0 | null | 2022-04-22T21:03:59 | 2019-04-01T16:33:48 | Python | UTF-8 | Python | false | false | 560 | py | from django.urls import path
from .views import AllBlastsView, DraftMailView, DeleteMailView, UpdateMailView, ViewMailView, send_mail
urlpatterns =[
path('', AllBlastsView.as_view(), name='email-index'),
path('draft/', DraftMailView.as_view(), name='email-draft'),
path('<int:pk>/update/', UpdateMailView.as_view(), name='email-update'),
path('<int:pk>/delete/', DeleteMailView.as_view(), name='email-delete'),
path('<int:pk>/view/', ViewMailView.as_view(), name='email-view'),
path('<int:pk>/send/', send_mail, name='email-send'),
]
| [
"todaatsushi0@gmail.com"
] | todaatsushi0@gmail.com |
2dd88044536b0537240afba9e2ce20986046975e | 407d9b737babef2625182bd92ae56ae2efee8dfd | /DT_surface_floor_bottom.py | 008359f98f7b629520d29d44dc13de81474c11b1 | [] | no_license | Sujit-O/digitaltwin | 1ca4712ae2212de5b563ae9fee6b681b17b084f2 | 06950dc69b6f270742eeb8800a97ee5075a376ba | refs/heads/master | 2021-05-14T09:01:33.790322 | 2018-01-04T23:24:23 | 2018-01-04T23:24:23 | 116,317,258 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,084 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 3 12:16:00 2017
@author: AICPS
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 20 12:27:08 2017
@author: Sujit Rokka Chhetri
Project: Siemens Digital Twin Prject Summer 2017
"""
#!/usr/bin/python
#%% Import all the libraries
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.utils import shuffle
import os
import argparse
import matplotlib.pyplot as plt
#% Scikit modules
from sklearn import clone
from sklearn import ensemble
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.metrics import mean_absolute_error
#from sklearn.metrics import mean_squared_log_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics import explained_variance_score
from sklearn.preprocessing import Imputer
#%%Initialize Global Variables
featureParentPath='D:/GDrive/DT_Data/DAQ_Auto_Features/'
KPI_fileName='D:/GDrive/DT_Data/DAQ_Auto_Features/KPI_Object_'
KPI_fileName_surf='D:/GDrive/DT_Data/DAQ_Auto/'
objectName = 'UM3_Corner_Wall_'
segment_Numbers=[2,7,8,13]
#features=['CWTFeatures.csv']
features=['timeFeatures.csv', 'frequencyFeatures.csv','STFTFeatures.csv','CWTFeatures.csv']
# Function to acquire Data for training the models
#%% This function combines the data in the feature level
def combineFeatures(channel, dataread, dataFeature):
# print ('Combine Feature Called... \n')
if 'Channel' in channel:
temp=channel.split('_')
if np.shape(temp)[0]==4:
channel_name=temp[2]+'_'+temp[3]
else:
channel_name=temp[2]
else:
channel_name=channel
dataread.columns=channel_name+'_'+dataread.columns
if dataFeature.empty:
dataFeature=dataread
else:
dataFeature=pd.concat([dataFeature, dataread], axis=1)
return dataFeature
#%% This function combines the data in the channel level
def combineChannels(features, channel, segNum, objectFolderName,
dataChannel, dataFeature,segmentName):
# print ('Combine Channel Called... \n')
for featureName in features:
fileName = (featureParentPath+objectFolderName+'/'+channel+
'/'+segmentName+'/segment_'+str(segNum)+'/'+featureName)
dataread = pd.read_csv(fileName);
dataFeature=combineFeatures(channel, dataread, dataFeature)
if dataChannel.empty:
dataChannel=dataFeature
else:
dataChannel=pd.concat([dataChannel,dataFeature], axis=1)
return dataChannel
#%% This function combines the data in the segment level
def combineSegNums(objectFolderName, segNum, KPI_values,
KPI_columnIndex, dataSeg, y_seg,
y_seg_surf1,
y_seg_surf2,
dataChannel,segmentName,
KPI_values_surf1,
KPI_values_surf2):
# print ('Combine Segment Called... \n')
thickness_KPI=KPI_values.values[segNum][KPI_columnIndex]
KPI_surf1=KPI_values_surf1.values[segNum][1]
KPI_surf2=KPI_values_surf2.values[segNum][1]
for channel in os.listdir(featureParentPath+objectFolderName):
if not ('desktop' in channel):
dataFeature=pd.DataFrame()
dataChannel=combineChannels(features, channel,
segNum, objectFolderName,
dataChannel,dataFeature,segmentName)
if dataSeg.empty:
dataSeg=dataChannel
else:
dataSeg=pd.concat([dataSeg,dataChannel], axis=0)
y_KPI=pd.DataFrame({'Y_KPI_Thickness_in_mm':
np.repeat(thickness_KPI, dataChannel.shape[0])})
y_KPI_surf1=pd.DataFrame({'Y_KPI_Surface_Dispersion':
np.repeat(KPI_surf1, dataChannel.shape[0])})
y_KPI_surf2=pd.DataFrame({'Y_KPI_Surface_Dispersion':
np.repeat(KPI_surf2, dataChannel.shape[0])})
if y_seg.empty:
y_seg=y_KPI
y_seg_surf1=y_KPI_surf1
y_seg_surf2=y_KPI_surf2
else:
y_seg=pd.concat([y_seg,y_KPI], axis=0)
y_seg_surf1=pd.concat([y_seg_surf1,y_KPI_surf1], axis=0)
y_seg_surf2=pd.concat([y_seg_surf2,y_KPI_surf2], axis=0)
return dataSeg, y_seg, y_seg_surf1, y_seg_surf2
#%% This function combines the data in flow rate level and returns the data
def getXData(KPI_fileName,KPI_fileName_surf,objectName,segment_Numbers,
flowRates, segmentName,features):
# print ('Get Data Called... \n')
data=pd.DataFrame()
y_thickness=pd.DataFrame()
y_flow=pd.DataFrame()
y_surf1=pd.DataFrame()
y_surf2=pd.DataFrame()
for flow in flowRates:
objectFolderName = objectName+ str(flow)+'p';
fileNameKPI = KPI_fileName+str(flow)+'p.csv'
if 'Floor' in segmentName:
fileNameKPI_surf1 = KPI_fileName_surf+objectName+str(flow)+'p/KPI/1_directionality.csv'
fileNameKPI_surf2 = KPI_fileName_surf+objectName+str(flow)+'p/KPI/4_directionality.csv'
elif 'Wall' in segmentName:
fileNameKPI_surf1 = KPI_fileName_surf+objectName+str(flow)+'p/KPI/3_directionality.csv'
fileNameKPI_surf2 = KPI_fileName_surf+objectName+str(flow)+'p/KPI/2_directionality.csv'
else:
print('Segment Name does not match!')
return
KPI_values= pd.read_csv(fileNameKPI)
KPI_values_surf1= pd.read_csv(fileNameKPI_surf1)
KPI_values_surf2= pd.read_csv(fileNameKPI_surf2)
if 'Floor' in segmentName:
KPI_columnIndex=1
elif 'Wall' in segmentName:
KPI_columnIndex=2
else:
pass
dataSeg=pd.DataFrame()
y_seg=pd.DataFrame()
y_seg_surf1=pd.DataFrame()
y_seg_surf2=pd.DataFrame()
for segNum in segment_Numbers:
dataChannel=pd.DataFrame()
(dataSeg, y_seg,y_seg_surf1,
y_seg_surf2) = combineSegNums(objectFolderName,
segNum, KPI_values,
KPI_columnIndex,
dataSeg,
y_seg,
y_seg_surf1,
y_seg_surf2,
dataChannel,segmentName,
KPI_values_surf1,
KPI_values_surf2)
if y_thickness.empty:
y_thickness=y_seg
y_surf1=y_seg_surf1
y_surf2=y_seg_surf2
else:
y_thickness=pd.concat([y_thickness,y_seg], axis=0)
y_surf1=pd.concat([y_surf1,y_seg_surf1], axis=0)
y_surf2=pd.concat([y_surf2,y_seg_surf2], axis=0)
KPI_flow=pd.DataFrame({'Y_KPI_Flow(%)':np.repeat(flow,
dataSeg.shape[0])})
if y_flow.empty:
y_flow=KPI_flow
else:
y_flow=pd.concat([y_flow,KPI_flow], axis=0)
if data.empty:
data=dataSeg
else:
data=pd.concat([data,dataSeg], axis=0)
return data, y_thickness, y_flow , y_surf1, y_surf2
#%% Read the Data for Training
def parsingInit():
parser = argparse.ArgumentParser()
parser.add_argument("-ne","--n_estimators", type=int, nargs='?',
default=1000,
help="Enter the number of estimators")
parser.add_argument("-md","--max_depth", type=int,nargs='?',
default=2,
help="Enter the max depth for the boosting")
parser.add_argument("-ms","--min_samples_split", type=int,nargs='?',
default=2,
help="Determine the min sampling rate")
parser.add_argument("-lr","--learning_rate", type=float, nargs='?',
default=0.01,
help="Determine the learning rate")
parser.add_argument("-loss","--loss", type=str, nargs='?',default='ls',
help="Enter the type of loss")
parser.add_argument("-start","--trainGroupStart", type=int, nargs='?',
default=80,
help="Train Group Starting Flowrate")
parser.add_argument("-stop","--trainGroupStop", type=int,nargs='?',
default=120,
help="Train Group Stopping Flowrate")
parser.add_argument("-testGroup","--testGroup", type=int,nargs='?',
default=130,
help="Test Group Emissions")
parser.add_argument("-surf","--testSurface", type=str, nargs='?',
default='segments_Floor',
help="Test Surface")
args = parser.parse_args()
print ('Arguements:\n',
'1-> n_estimators : ', args.n_estimators ,'\n',
'2-> max_depth : ', args.max_depth ,'\n',
'3-> min_samples_split: ', args.min_samples_split ,'\n',
'4-> learning_rate : ', args.learning_rate,'\n',
'5-> loss : ', args.loss,'\n',
'6-> trainGroupStart : ', args.trainGroupStart,'\n',
'7-> trainGroupStop : ', args.trainGroupStop,'\n',
'8-> testGroup : ', args.testGroup,'\n',
'9-> testSurface : ', args.testSurface,'\n')
return (args.n_estimators, args.max_depth,
args.min_samples_split, args.learning_rate,
args.loss,args.trainGroupStart,
args.trainGroupStop, args.testGroup, args.testSurface)
#%%
def heldout_score(clf, X_test, y_test,n_estimators):
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
#%%
def crossValidation(cv_clf_T,n_splits,n_estimators,X_train_T,y_train_T):
cv = KFold(n_splits=n_splits)
val_scores_T = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv.split(X_train_T, y_train_T):
cv_clf_T.fit(X_train_T[train], y_train_T[train])
val_scores_T += heldout_score(cv_clf_T, X_train_T[test],
y_train_T[test],n_estimators)
val_scores_T /= n_splits
return val_scores_T
#%%
def maxDepthCheck(paramsGBR,X_train_T, y_train_T,X_test_T,y_test_T):
params=paramsGBR
test_score = np.zeros((paramsGBR['max_depth'],), dtype=np.float64)
train_score = np.zeros((paramsGBR['max_depth'],), dtype=np.float64)
for i,depth in enumerate(range(1,paramsGBR['max_depth']+1)):
params['max_depth'] =depth
model = ensemble.GradientBoostingRegressor(**params)
clf_T = clone(model)
clf_T = model.fit(X_train_T, y_train_T)
y_pred= clf_T.predict(X_test_T)
test_score[i] = clf_T.loss_(y_test_T, y_pred)
y_pred_Train= clf_T.predict(X_train_T)
train_score[i] = clf_T.loss_(y_train_T, y_pred_Train)
plt.figure()
plt.plot(train_score ,'b-', label='Training Set Deviance')
plt.plot(test_score, 'r-', label='Test Set Deviance')
plt.xlabel('Max Depths')
plt.ylabel('Deviance')
plt.show()
#%%
def minSplitCheck(paramsGBR,X_train_T, y_train_T,X_test_T,y_test_T):
params=paramsGBR
test_score = np.zeros((paramsGBR['min_samples_split'],), dtype=np.float64)
train_score = np.zeros((paramsGBR['min_samples_split'],), dtype=np.float64)
for i,split in enumerate(range(2,paramsGBR['min_samples_split']+2)):
params['min_samples_split'] = split
model = ensemble.GradientBoostingRegressor(**params)
clf_T = clone(model)
clf_T = model.fit(X_train_T, y_train_T)
y_pred= clf_T.predict(X_test_T)
test_score[i] = clf_T.loss_(y_test_T, y_pred)
y_pred_Train= clf_T.predict(X_train_T)
train_score[i] = clf_T.loss_(y_train_T, y_pred_Train)
plt.figure()
plt.plot(train_score ,'b-', label='Training Set Deviance')
plt.plot(test_score, 'r-', label='Test Set Deviance')
plt.xlabel('min samples of split')
plt.ylabel('Deviance')
plt.show()
#%%
def preProcess(Xtr,y_thic):
# print ('\t Inside Processing Function... ')
X=Xtr.values
y_T=y_thic.values
# y_T=np.float32(y_T)
# X[X<=np.finfo(np.float32).min]=np.nan
# X[X>=np.finfo(np.float32).max]=np.nan
# X=np.float32(X)
X[np.isinf(X)]=0
X[np.isneginf(X)]=0
X[np.isnan(X)]=0
# X = X[~np.all(X == 0, axis=1)]
y_T=np.ravel(y_T);
# y_T[y_T<=np.finfo(np.float32).min]=np.nan
# y_T[y_T>=np.finfo(np.float32).max]=np.nan
y_T[np.isinf(y_T)]=0
y_T[np.isneginf(y_T)]=0
y_T[np.isnan(y_T)]=0
# if np.isnan(X).any():
# print('\t NaN values found in X')
# if ~np.isfinite(X).all():
# print('\t Infinite values found in X')
## if (X<=np.finfo(np.float32).min).any():
## print('\t Values less than float32 found in X')
## if (X>=np.finfo(np.float32).max).any():
## print('\t Values more than float32 found in X')
# if (X==0).any():
# print('\t Zero Values found in X')
#
# if np.isnan(y_T).any():
# print('\t NaN values found in y')
# if ~np.isfinite(y_T).all():
# print('\t Infinite values found in y')
## if (y_T<=np.finfo(np.float32).min).any():
## print('\t Values less than float32 found in y')
## if (y_T>=np.finfo(np.float32).max).any():
## print('\t Values more than float32 found in y')
# if (y_T==0).any():
# print('\t Zero Values found in X')
#
# print('\t Finished Processing \n')
return X, y_T
#%%
def normalizeData(X,y):
print ('Normalizing the Data... \n')
min_max_scaler = preprocessing.MinMaxScaler();
X = min_max_scaler.fit_transform(X);
return X, y
#%%
def splitData(X_T,y_T):
#separate the training and the test data for thickness versus emissions
print ('Splitting the Data... \n')
offset = int(X_T.shape[0] * 0.75)
X_train_T, y_train_T = X_T[:offset], y_T[:offset]
X_test_T, y_test_T = X_T[offset:], y_T[offset:]
return X_train_T, y_train_T, X_test_T, y_test_T
#%%
def featureImportance(clf,feature_names,fileName):
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
sorted_idx=sorted_idx[::-1]
sorted_idx=sorted_idx[0:25]
plt.figure()
pos = np.arange(sorted_idx.shape[0]) + .5
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, feature_names[sorted_idx])
plt.xlabel('Relative Feature Importance')
plt.title('Feature Importance')
# name=destinationFolder+'/Age_FeatureRanking_'+textDescription+'.pdf' ;
plt.savefig(fileName,bbox_inches='tight',dpi=600)
# plt.show()
#%%
def DT_surface_floor_bottom(start, stop, testGroup, segmentName,agingTest):
#start=80
#stop=90
#testGroup=90
#segmentName='segments_Floor'
#agingTest=True
print ('\n----------Start-----------')
# (n_estimators,
# max_depth,
# min_samples_split,
# learning_rate,
# loss,
# start,
# stop,
# testGroup,
# segmentName) = parsingInit()
n_estimators =1000
max_depth = 2
min_samples_split =2
learning_rate=0.01
loss ='ls'
if agingTest:
nameStore='_Aging_Test_allFeatures_surface_floor_bottom'
else:
nameStore='_Normal_Test_allFeatures_surface_floor_bottom'
if 'Floor' in segmentName:
# name1='Surface_Floor1_Top'
name2='Surface_Floor4_Bottom'
elif 'Wall' in segmentName:
# name1='Surface_Wall3_Back'
name2='Surface_Wall2_Front'
else:
print('Invalid Segment Names')
destinationFolder='D:/GDrive/DT_Data/DAQ_Auto_Features/Results_Surface'+nameStore
if not os.path.exists(destinationFolder):
os.makedirs(destinationFolder)
# filename1=destinationFolder+'/Original_'+name1+'_Start_'+str(start)+'_Stop_'+str(stop)+'_TestG_'+str(testGroup)+'.pdf'
filename2=destinationFolder+'/Original_'+name2+'_Start_'+str(start)+'_Stop_'+str(stop)+'_TestG_'+str(testGroup)+'.pdf'
# fileNamecsv1=destinationFolder+'/'+name1+'_Start_'+str(start)+'_Stop_'+str(stop)+'_TestG_'+str(testGroup)+'.csv'
fileNamecsv2=destinationFolder+'/'+name2+'_Start_'+str(start)+'_Stop_'+str(stop)+'_TestG_'+str(testGroup)+'.csv'
# filename1_reTr=destinationFolder+'/Final_'+name1+'_Start_'+str(start)+'_Stop_'+str(stop)+'_TestG_'+str(testGroup)+'.pdf'
filename2_reTr=destinationFolder+'/Final_'+name2+'_Start_'+str(start)+'_Stop_'+str(stop)+'_TestG_'+str(testGroup)+'.pdf'
flowRates_Train=np.array([i for i in range(start,stop+10,10)])
flowRates_Test=np.array([i for i in range(testGroup,testGroup+10,10)])
flowRates_reTrain= np.append(flowRates_Train, flowRates_Test)
#The 160 flow rate data is corrupted!!
#TODO: recollect the data
flowRates_Train=np.delete(flowRates_Train,np.where(flowRates_Train==160))
flowRates_Test=np.delete(flowRates_Test,np.where(flowRates_Test==160))
flowRates_reTrain=np.delete(flowRates_reTrain,np.where(flowRates_reTrain==160))
print('Train: ',flowRates_Train)
print('Test: ',flowRates_Test)
print('reTrain: ',flowRates_reTrain)
#%%
print ('1. Extracting Data... \n')
#Train Data
(X_Train,y_thic_Train,
y_flow_Train,y_surf1_Train,
y_surf2_Train) = getXData(KPI_fileName,KPI_fileName_surf,objectName,
segment_Numbers, flowRates_Train,
segmentName,features)
featureNames=X_Train.columns
#Test Data
(X_Test,y_thic_Test,y_flow_Test,
y_surf1_Test,
y_surf2_Test) = getXData(KPI_fileName,KPI_fileName_surf,objectName,
segment_Numbers, flowRates_Test,
segmentName,features)
#ReTrain Data
(X_reTrain,y_thic_reTrain,
y_flow_reTrain,y_surf1_reTrain,
y_surf2_reTrain) = getXData(KPI_fileName,KPI_fileName_surf,objectName,
segment_Numbers, flowRates_reTrain,
segmentName,features)
#%%
paramsGBR = {'n_estimators': n_estimators, 'max_depth': max_depth,
'min_samples_split': min_samples_split,
'learning_rate': learning_rate, 'loss': loss}
model = ensemble.GradientBoostingRegressor(**paramsGBR)
# clf_Tr1 = clone(model)
# clf_Tr2 = clone(model)
#%%
print ('2. Preprocessing Data...')
imp1 = Imputer(missing_values='NaN', strategy='mean', axis=0)
# X_Train, y_thic_Train = preProcess(X_Train,y_thic_Train)
# X_Train1, y_surf1_Train = preProcess(X_Train,y_surf1_Train)
X_Train2, y_surf2_Train = preProcess(X_Train,y_surf2_Train)
# X_Train1, y_surf1_Train=shuffle(X_Train1, y_surf1_Train)
X_Train2, y_surf2_Train=shuffle(X_Train2, y_surf2_Train)
# X_Train1=imp1.fit_transform(X_Train1)
X_Train2=imp1.fit_transform(X_Train2)
# X_Test1,y_surf1_Test= preProcess(X_Test,y_surf1_Test)
X_Test2,y_surf2_Test= preProcess(X_Test,y_surf2_Test)
# X_Test1,y_surf1_Test=shuffle(X_Test1,y_surf1_Test)
X_Test2,y_surf2_Test=shuffle(X_Test2,y_surf2_Test)
# X_Test1=imp1.fit_transform(X_Test1)
X_Test2=imp1.fit_transform(X_Test2)
# min_max_scaler_Train_X1 = preprocessing.MinMaxScaler().fit(X_Train1);
# scaler_Train_X1 = preprocessing.StandardScaler().fit(X_Train1)
# X_Tr1=min_max_scaler_Train_X1.transform(X_Train1)
# X_Tr1=scaler_Train_X1.transform(X_Tr1)
#
min_max_scaler_Train_X2 = preprocessing.MinMaxScaler().fit(X_Train2);
scaler_Train_X2 = preprocessing.StandardScaler().fit(X_Train2)
X_Tr2=min_max_scaler_Train_X2.transform(X_Train2)
X_Tr2=scaler_Train_X2.transform(X_Tr2)
# X_Te1=min_max_scaler_Train_X1.transform(X_Test1)
# X_Te1=scaler_Train_X1.transform(X_Te1)
X_Te2=min_max_scaler_Train_X2.transform(X_Test2)
X_Te2=scaler_Train_X2.transform(X_Te2)
# X_reTrain1, y_surf1_reTrain = preProcess(X_reTrain,y_surf1_reTrain)
X_reTrain2, y_surf2_reTrain = preProcess(X_reTrain,y_surf2_reTrain)
# X_reTrain1, y_surf1_reTrain=shuffle(X_reTrain1, y_surf1_reTrain)
X_reTrain2, y_surf2_reTrain=shuffle(X_reTrain2, y_surf2_reTrain)
# X_reTrain1=imp1.fit_transform(X_reTrain1)
X_reTrain2=imp1.fit_transform(X_reTrain2)
#%%
print ('3. Building Model with all the Samples...')
# X_Tr1, y_surf1_Train=shuffle(X_Tr1, y_surf1_Train)
X_Tr2, y_surf2_Train = shuffle(X_Tr2, y_surf2_Train)
# clf_Tr1 = model.fit(X_Tr1, y_surf1_Train)
clf_Tr2 = model.fit(X_Tr2, y_surf2_Train)
print ('4. Saving results of Training...')
# featureImportance(clf_Tr1, featureNames, filename1)
featureImportance(clf_Tr2, featureNames, filename2)
#%%
print ('5. Predicting for Group: ',flowRates_Test,' ...')
# y_pred_Te1=clf_Tr1.predict(X_Te1)
y_pred_Te2=clf_Tr2.predict(X_Te2)
#
# mse_Test1 = mean_squared_error(y_surf1_Test, y_pred_Te1)
# mae_Test1=mean_absolute_error(y_surf1_Test, y_pred_Te1)
# medae_Test1=median_absolute_error(y_surf1_Test, y_pred_Te1)
# r2_Test1=r2_score(y_surf1_Test, y_pred_Te1)
# exvs_Test1=explained_variance_score(y_surf1_Test, y_pred_Te1)
mse_Test2 = mean_squared_error(y_surf2_Test, y_pred_Te2)
mae_Test2=mean_absolute_error(y_surf2_Test, y_pred_Te2)
medae_Test2=median_absolute_error(y_surf2_Test, y_pred_Te2)
r2_Test2=r2_score(y_surf2_Test, y_pred_Te2)
exvs_Test2=explained_variance_score(y_surf2_Test, y_pred_Te2)
print ('6. Results for testing Group:',flowRates_Test,':')
print ('\t Mean Squared Errors :', mse_Test2 )
print ('\t Mean Absolute Error :', mae_Test2 )
print ('\t Median Absolute Error :', medae_Test2)
print ('\t R2 Score :', r2_Test2 )
print ('\t Explained Variance Score:', exvs_Test2 )
print ('7. Saving Results for testing Group:',flowRates_Test,':')
# np.savetxt(fileNamecsv1, [[mse_Test1,
# mae_Test1,
# medae_Test1,
# r2_Test1,
# exvs_Test1]],
# delimiter=',',header='Mean Squared Error, Mean Absolute Error, Median Absolute Error,R2 Score, Explained Variance Score',comments='')
np.savetxt(fileNamecsv2, [[mse_Test2,
mae_Test2,
medae_Test2,
r2_Test2,
exvs_Test2]],
delimiter=',',header='Mean Squared Error, Mean Absolute Error, Median Absolute Error,R2 Score, Explained Variance Score',comments='')
#%%
print ('8. Retraining the Model with new emission Signal...')
# min_max_scaler_Train_X1 = preprocessing.MinMaxScaler().fit(X_reTrain1);
# scaler_Train_X1 = preprocessing.StandardScaler().fit(X_reTrain1)
# X_reTr1=min_max_scaler_Train_X1.transform(X_reTrain1)
# X_reTr1=scaler_Train_X1.transform(X_reTr1)
min_max_scaler_Train_X2 = preprocessing.MinMaxScaler().fit(X_reTrain2);
scaler_Train_X2 = preprocessing.StandardScaler().fit(X_reTrain2)
X_reTr2=min_max_scaler_Train_X2.transform(X_reTrain2)
X_reTr2=scaler_Train_X2.transform(X_reTr2)
#
# X_Te1=min_max_scaler_Train_X1.transform(X_Test1)
# X_Te1=scaler_Train_X1.transform(X_Te1)
X_Te2=min_max_scaler_Train_X2.transform(X_Test2)
X_Te2=scaler_Train_X2.transform(X_Te2)
# X_reTr1, y_surf1_reTrain=shuffle(X_reTr1, y_surf1_reTrain)
X_reTr2, y_surf2_reTrain=shuffle(X_reTr2, y_surf2_reTrain)
# clf_reTr1 = model.fit(X_reTr1, y_surf1_reTrain)
clf_reTr2 = model.fit(X_reTr2, y_surf2_reTrain)
print ('8. new Results after training with recent emissions:')
# y_pred_Te1=clf_reTr1.predict(X_Te1)
# mse_Test1 = mean_squared_error(y_surf1_Test, y_pred_Te1)
# mae_Test1=mean_absolute_error(y_surf1_Test, y_pred_Te1)
# medae_Test1=median_absolute_error(y_surf1_Test, y_pred_Te1)
# r2_Test1=r2_score(y_surf1_Test, y_pred_Te1)
# exvs_Test1=explained_variance_score(y_surf1_Test, y_pred_Te1)
y_pred_Te2=clf_reTr2.predict(X_Te2)
mse_Test2 = mean_squared_error(y_surf2_Test, y_pred_Te2)
mae_Test2=mean_absolute_error(y_surf2_Test, y_pred_Te2)
medae_Test2=median_absolute_error(y_surf2_Test, y_pred_Te2)
r2_Test2=r2_score(y_surf2_Test, y_pred_Te2)
exvs_Test2=explained_variance_score(y_surf2_Test, y_pred_Te2)
print ('\t Mean Squared Error :', mse_Test2 )
print ('\t Mean Absolute Error :', mae_Test2 )
print ('\t Median Absolute Error :', medae_Test2 )
print ('\t R2 Score :', r2_Test2 )
print ('\t Explained Variance Score:', exvs_Test2)
print ('9. Saving the new Results after training with recent emissions...')
# f =open(fileNamecsv1,'a');
# df = pd.DataFrame([[mse_Test1, mae_Test1,medae_Test1,r2_Test1, exvs_Test1]])
# df.to_csv(f,index = False,header= False);
# f.close();
# featureImportance(clf_reTr1, featureNames,filename1_reTr)
f =open(fileNamecsv2,'a');
df = pd.DataFrame([[mse_Test2, mae_Test2,medae_Test2,r2_Test2, exvs_Test2]])
df.to_csv(f,index = False,header= False);
f.close();
featureImportance(clf_reTr2, featureNames,filename2_reTr)
print ('-----------:Finished!:--------------- \n') | [
"noreply@github.com"
] | noreply@github.com |
78db5d3a48d034fb1a52c5fe15957372c718061f | d5ed3ed782fd4cc6e0682c04de96196bf7163f3d | /workspace/python1/settings.py | 27e1df36112abeb43e24ccec47625b38c115a028 | [] | no_license | superbicode/scrum | af2b6238f69ec68ca6bac0b68ec89cfbaffce0b7 | 388439498b6d5eee7c9526de9aa71feea82320a3 | refs/heads/master | 2016-09-09T23:13:33.967536 | 2015-09-10T22:40:00 | 2015-09-10T22:40:00 | 42,274,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | """
Django settings for python1 project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fm!dy0v$4)dez5(361(3re1k_l-pn!5f_nvkf3p8k$%(senz2e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'scrum',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'python1.urls'
WSGI_APPLICATION = 'python1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| [
"superbicode@gmail.com"
] | superbicode@gmail.com |
daabd04ef66e95085d98f1fa3e703af811ec01c0 | 2344d7aacbcec14b054beafe04225fb7ea0f7c0e | /mainapp/migrations/0018_auto_20190803_0033.py | b7c20cc2c9fe72124be0943fbaf6914eab402c32 | [] | no_license | RomanJordan/dota-website-public | 65873aba6f9bf2bd9080eab143ae70e4dd4c631e | 4fbb9d6b1de013310ea8d018a94a09d7dde894cd | refs/heads/master | 2022-12-07T18:47:29.804789 | 2019-09-10T04:24:05 | 2019-09-10T04:24:05 | 207,461,787 | 0 | 0 | null | 2022-11-22T04:14:16 | 2019-09-10T03:59:24 | Python | UTF-8 | Python | false | false | 1,871 | py | # Generated by Django 2.2.3 on 2019-08-03 04:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0017_hero_hero_advantages'),
]
operations = [
migrations.AddField(
model_name='hero',
name='hero_talent_left_lvl10',
field=models.CharField(default='default talent description', max_length=100),
),
migrations.AddField(
model_name='hero',
name='hero_talent_left_lvl15',
field=models.CharField(default='default talent description', max_length=100),
),
migrations.AddField(
model_name='hero',
name='hero_talent_left_lvl20',
field=models.CharField(default='default talent description', max_length=100),
),
migrations.AddField(
model_name='hero',
name='hero_talent_left_lvl25',
field=models.CharField(default='default talent description', max_length=100),
),
migrations.AddField(
model_name='hero',
name='hero_talent_right_lvl10',
field=models.CharField(default='default talent description', max_length=100),
),
migrations.AddField(
model_name='hero',
name='hero_talent_right_lvl15',
field=models.CharField(default='default talent description', max_length=100),
),
migrations.AddField(
model_name='hero',
name='hero_talent_right_lvl20',
field=models.CharField(default='default talent description', max_length=100),
),
migrations.AddField(
model_name='hero',
name='hero_talent_right_lvl25',
field=models.CharField(default='default talent description', max_length=100),
),
]
| [
"32529400+RomanJordan@users.noreply.github.com"
] | 32529400+RomanJordan@users.noreply.github.com |
0dab538450e0b1a1b063150ff917b8ea62aa6416 | d36b4b87a933a35f2c04cf3f0e2832326689a070 | /app/routes/activity_router.py | ac54df99a25d59927f2523aa65f293af2f4a9745 | [] | no_license | kirill-kundik/SportUnity-Backend | a2c8f45ece5b30e74ff373b34939968ae7152cad | 0ac9508799c9b31781ed2d5291175fd39b419d21 | refs/heads/master | 2023-03-09T01:12:54.360803 | 2020-02-23T07:09:37 | 2020-02-23T07:09:37 | 242,365,176 | 0 | 0 | null | 2021-02-26T02:45:23 | 2020-02-22T15:32:58 | Python | UTF-8 | Python | false | false | 9,697 | py | import datetime
import aiohttp.web
import app.postgres.queries as db
async def track(request):
try:
body = await request.json()
user_id = int(body[0]["userId"])
except:
raise aiohttp.web.HTTPBadRequest()
async with request.app['db'].acquire() as conn:
activity = await db.get_user_active(conn, user_id)
if not activity:
raise aiohttp.web.HTTPBadRequest()
for item in body:
await db.add_point(conn, item["lon"], item["lat"], activity.id)
# cb = request.app["cb"]
# await couchbase.insert(cb, f"{activity.id}-{int(time.time())}", long, lat)
return aiohttp.web.HTTPOk()
async def start_by_activity(request):
try:
body = await request.json()
activity_id = body["activityId"]
_user_id = body["userId"]
except:
raise aiohttp.web.HTTPBadRequest()
async with request.app['db'].acquire() as conn:
activity = await db.get_activity(conn, activity_id)
if not activity:
raise aiohttp.web.HTTPBadRequest()
await db.update_activity(
conn,
activity_id,
activity.name,
db.ActivityStatus.ACTIVE,
activity.expected_start,
datetime.datetime.now(),
None,
activity.description,
activity.type_fk
)
return aiohttp.web.HTTPOk()
async def start_by_type(request):
try:
body = await request.json()
type_id = body["typeId"]
user_id = body["userId"]
except:
raise aiohttp.web.HTTPBadRequest()
async with request.app["db"].acquire() as conn:
type_ = await db.get_type(conn, type_id)
await db.add_activity(
conn, f"Activity - {type_.name}", db.ActivityStatus.ACTIVE, None, datetime.datetime.now(), None,
"Created automatically when you started your activity", user_id, type_id
)
return aiohttp.web.HTTPOk()
async def add_activity(request):
try:
body = await request.json()
user_id = body["userId"]
expected_start = datetime.datetime.fromisoformat(body["expectedStart"])
type_id = body["typeId"]
except:
raise aiohttp.web.HTTPBadRequest()
async with request.app["db"].acquire() as conn:
await db.add_activity(
conn, None, db.ActivityStatus.NOT_STARTED, expected_start, None, None, None, user_id, type_id
)
return aiohttp.web.HTTPOk()
async def copy_activity(request):
try:
body = await request.json()
user_id = body["userId"]
activity_id = request.match_info["id"]
except:
raise aiohttp.web.HTTPBadRequest()
async with request.app["db"].acquire() as conn:
activity = await db.get_activity(conn, activity_id)
if not activity:
raise aiohttp.web.HTTPBadRequest()
await db.add_activity(
conn,
activity.name,
activity.status,
activity.expected_start,
activity.start_time,
activity.end_time,
activity.description,
user_id,
activity.type_fk
)
return aiohttp.web.HTTPOk()
async def get_activity(request):
try:
activity_id = request.match_info["id"]
except:
raise aiohttp.web.HTTPBadRequest()
async with request.app["db"].acquire() as conn:
activity = await db.get_activity(conn, activity_id)
if not activity:
raise aiohttp.web.HTTPBadRequest()
type_ = await db.get_type(conn, activity.type_fk)
return aiohttp.web.json_response(
{
"id": activity.id,
"name": activity.name,
"status": activity.status.name,
"expected_start": datetime.datetime.isoformat(activity.expected_start) if activity.expected_start else None,
"start_time": datetime.datetime.isoformat(activity.start_time) if activity.start_time else None,
"end_time": datetime.datetime.isoformat(activity.end_time) if activity.end_time else None,
"description": activity.description,
"user_id": activity.user_fk,
"type": {
"id": type_.id,
"name": type_.name,
"recent_loc_count": type_.recent_loc_count,
"image_url": type_.image_url,
"color": type_.color,
}
}
)
async def get_nearby(request):
response = []
async with request.app["db"].acquire() as conn:
all_active = await db.get_all_active(conn)
for active in all_active:
type_ = await db.get_type(conn, active.type_fk)
locations = await db.get_points(conn, active.id, type_.recent_loc_count)
response.append({
"user_id": active.user_fk,
"color": type_.color,
"image_url": type_.image_url,
"locations": [
{"lat": loc.lat, "lon": loc.long}
for loc in locations
],
})
return aiohttp.web.json_response(response)
async def end_activity(request):
try:
body = await request.json()
user_id = body["userId"]
except:
raise aiohttp.web.HTTPBadRequest()
async with request.app['db'].acquire() as conn:
activity = await db.get_user_active(conn, user_id)
if not activity:
raise aiohttp.web.HTTPBadRequest()
await db.update_activity(
conn,
activity.id,
activity.name,
db.ActivityStatus.FINISHED,
activity.expected_start,
activity.start_time,
datetime.datetime.now(),
activity.description,
activity.type_fk
)
return aiohttp.web.HTTPOk()
async def check_activity(request):
try:
user_id = request.match_info["id"]
except:
raise aiohttp.web.HTTPBadRequest()
async with request.app['db'].acquire() as conn:
activity = await db.get_user_active(conn, user_id)
if not activity:
return aiohttp.web.json_response({
"da": False,
})
return aiohttp.web.json_response({
"da": True,
})
async def user_activities(request):
try:
user_id = request.match_info["id"]
except:
raise aiohttp.web.HTTPBadRequest()
response = []
async with request.app['db'].acquire() as conn:
activities = await db.get_user_activities(conn, user_id)
for active in activities:
type_ = await db.get_type(conn, active.type_fk)
response.append({
"id": active.id,
"name": active.name,
"status": active.status.name,
"expected_start": datetime.datetime.isoformat(
active.expected_start) if active.expected_start else None,
"start_time": datetime.datetime.isoformat(active.start_time) if active.start_time else None,
"end_time": datetime.datetime.isoformat(active.end_time) if active.end_time else None,
"description": active.description,
"type": {
"id": type_.id,
"name": type_.name,
"recent_loc_count": type_.recent_loc_count,
"image_url": type_.image_url,
"color": type_.color,
}
})
return aiohttp.web.json_response(response)
async def get_all_activities(request):
response = []
async with request.app["db"].acquire() as conn:
activities = await db.get_all_activities(conn)
for active in activities:
type_ = await db.get_type(conn, active.type_fk)
response.append({
"id": active.id,
"name": active.name,
"status": active.status.name,
"expected_start": datetime.datetime.isoformat(
active.expected_start) if active.expected_start else None,
"start_time": datetime.datetime.isoformat(active.start_time) if active.start_time else None,
"end_time": datetime.datetime.isoformat(active.end_time) if active.end_time else None,
"description": active.description,
"type": {
"id": type_.id,
"name": type_.name,
"recent_loc_count": type_.recent_loc_count,
"image_url": type_.image_url,
"color": type_.color,
}
})
return aiohttp.web.json_response(response)
def configure(app):
router = app.router
router.add_route('POST', '/track', track, name='track')
router.add_route('POST', '/activity', add_activity, name='activity')
router.add_route('POST', '/startTrackType', start_by_type, name='start_by_type')
router.add_route('POST', '/startTrackActivity', start_by_activity, name='start_by_activity')
router.add_route('POST', '/copyActivity/{id}', copy_activity, name='copy_activity')
router.add_route('GET', '/activity/{id}', get_activity, name='get_activity')
router.add_route('GET', '/getNearby', get_nearby, name='get_nearby')
router.add_route('POST', '/stopTrack', end_activity, name='end_activity')
router.add_route('GET', '/check/{id}', check_activity, name='check_activity')
router.add_route('GET', '/activities/{id}', user_activities, name='user_activities')
router.add_route('GET', '/allActivities', get_all_activities, name='all_activities')
| [
"goliathusua@gmail.com"
] | goliathusua@gmail.com |
5dd6e9370327b7405c71203257389455825b2b55 | 9d12c69327ee35fb2b478405a7a06e962bedc433 | /claims/models.py | e5768190bdf6d1c9a01e15d9710b7f1f219eb767 | [] | no_license | denizen-ru/partnerKredit | 3429eaad75942dcbc40d873a8a91a739f4458036 | 5e59afbc29080e135bef23c78be199987344a5e4 | refs/heads/master | 2021-01-01T03:49:55.460491 | 2016-05-25T09:25:40 | 2016-05-25T09:25:40 | 59,650,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,056 | py | from django.db import models
from accounts.models import SuperUser, Partner, CreditOrganization
OFFER_TYPE_CHOICES = (
('CC', 'Consumer credit'),
('MG', 'Mortgage'),
('CL', 'Car loan'),
('SME', 'SME credit')
)
CLAIM_STATUS_CHOICES = (('NEW', 'New claim'), ('SENT', 'Sended claim'))
class Offer(models.Model):
creation_date = models.DateTimeField(auto_now_add=True,
verbose_name='Creation date/time')
changing_date = models.DateTimeField(auto_now=True,
verbose_name='Changing date/time')
rotation_beginning_date = models.DateTimeField(
verbose_name='Rotation beginning date/time')
rotation_ending_date = models.DateTimeField(
verbose_name='Rotation ending date/time')
title = models.CharField(max_length=255, verbose_name="Offer's title")
offer_type = models.CharField(max_length=3, choices=OFFER_TYPE_CHOICES)
min_scoring_points = models.IntegerField()
max_scoring_points = models.IntegerField()
credit_organization = models.ForeignKey(CreditOrganization,
related_name='offers')
class Meta:
verbose_name = "Offer"
verbose_name_plural = "Offers"
def __str__(self):
return self.title
class Questionnaire(models.Model):
creation_date = models.DateTimeField(auto_now_add=True,
verbose_name='Creation date/time')
changing_date = models.DateTimeField(auto_now=True,
verbose_name='Changing date/time')
client_name = models.CharField(max_length=255,
verbose_name="Client's full name")
birthday = models.DateField(verbose_name="Client's birthday")
phone_number = models.CharField(max_length=50,
verbose_name="Client's phone")
passport = models.CharField(max_length=100,
verbose_name="Client's passport")
scoring_points = models.IntegerField()
partner = models.ForeignKey(Partner, related_name='questionnaires')
class Meta:
verbose_name = "Questionnaire"
verbose_name_plural = "Questionnaires"
def __str__(self):
return self.client_name
class Claim(models.Model):
creation_date = models.DateTimeField(auto_now_add=True,
verbose_name='Creation date/time')
sending_date = models.DateTimeField(verbose_name='Sending date/time')
questionnaire = models.ForeignKey(Questionnaire, related_name='claims',
on_delete=models.CASCADE)
offer = models.ForeignKey(Offer, related_name='claims',
on_delete=models.CASCADE)
status = models.CharField(max_length=4, choices=CLAIM_STATUS_CHOICES)
class Meta:
verbose_name = "Claim"
verbose_name_plural = "Claims"
def __str__(self):
return '{} - {}'.format(self.questionnaire, self.offer)
| [
"denis.m.knyazev@gmail.com"
] | denis.m.knyazev@gmail.com |
90b5d147893c7fb4e128a6b6787bdd1219b8a793 | 57b62a2c0a64926eaa2f7f7fbef197276004e7d1 | /BUsBot/news.py | 711011a80ed8f937f4dd34a3a03d9db3d31731ad | [] | no_license | MasayoshiI/bu_bus_cb | 680520e1c75b34069cfefee55c05420622bafdab | b1aada6da2743ac6472912b070a6e28c8755491e | refs/heads/master | 2020-04-06T12:08:17.653883 | 2019-04-25T00:58:29 | 2019-04-25T00:58:29 | 157,444,367 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # should I separate news and twitter authorization?
class News:
def __init__(self):
self.authorized = False
def get_news(self,authorized):
return None
| [
"miwasa20@bu.edu"
] | miwasa20@bu.edu |
8d6d450c53634324abe9438facd8aa95c0534131 | 2492705863a873333c9df9d61732d1d5c0969b56 | /VIZARD - Python/Vizard1ShaderExample.py | 2d9de7605f1641eb61b096696a21e0a98ffabdaf | [] | no_license | jarsal558/Coding-Portfolio | 3a4f514321288f6f4abbbb82fda74bc0361f6fe1 | c08588ed62e6e9f3b8f6cc09196b0a2ab611692c | refs/heads/master | 2020-04-15T18:54:52.481281 | 2019-01-09T20:24:14 | 2019-01-09T20:24:14 | 164,930,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | vert_shader = '''
void main (void)
{
gl_TexCoord[0] = gl_TextureMatrix[0]*gl_MultiTexCoord0;
gl_FrontColor = gl_Color;
gl_Position = gl_Vertex;
}
'''
import viz
viz.go()
viz.clearcolor(1,1,1)
my_shader = viz.addShader(vert = vert_shader)
viz.startLayer(viz.TRIANGLES)
viz.vertexColor(1,0,0); viz.texCoord(0,0)
viz.vertex(-0.5,-0.5,0)
viz.vertexColor(0,1,0); viz.texCoord(1,0)
viz.vertex(0.5,-0.5,0)
viz.vertexColor(0,0,1); viz.texCoord(0.5,1.0)
viz.vertex(0.0,0.5,0)
my_triangle = viz.endLayer()
my_triangle.apply(my_shader)
my_triangle.disable(viz.CULLING)
my_texture = viz.addTexture('lake3.jpg')
my_triangle.texture(my_texture)
| [
"noreply@github.com"
] | noreply@github.com |
be503a2325322b3afaffbe0751bad9e35b57d03f | 1f15008a52ab5d60513af1f0cf7949bd69ebc7a0 | /src/domain/modules/vehicle_module/model_factory.py | 9acdbb0e69e136a87af541f7dd912b128a5b2c86 | [] | no_license | chris-coelho/saoma-v2 | 88dad03b7f75b71462a05210533289fadb7e1d24 | e5184b683c9a94c916cc38ede74d62d479eb1fdd | refs/heads/master | 2021-05-01T16:41:45.155695 | 2018-02-13T02:11:29 | 2018-02-13T02:11:29 | 121,051,212 | 0 | 0 | null | 2018-02-14T18:31:01 | 2018-02-10T20:24:28 | Python | UTF-8 | Python | false | false | 1,006 | py | from src.domain.modules.factory_base import FactoryBase
from src.domain.modules.vehicle_module.brand import Brand
from src.domain.modules.vehicle_module.model import Model
from src.domain.modules.vehicle_module.model_exceptions import ModelExceptions
class ModelFactory(FactoryBase):
@staticmethod
def create(name, brand, _id=None):
model = Model(name=name, brand=brand, _id=_id)
return model if ModelFactory.validate(model) else None
@staticmethod
def create_from_db(entity_as_dict):
return ModelFactory.create(entity_as_dict['name'], entity_as_dict['brand'], entity_as_dict['_id'])
@staticmethod
def validate(entity):
messages = []
if not entity.name:
messages.append("Nome obrigatório!")
if not entity.brand or not isinstance(entity.brand, Brand):
messages.append("Marca obrigatória!")
if len(messages) == 0:
return True
else:
raise ModelExceptions(messages)
| [
"cristovao3g@gmail.com"
] | cristovao3g@gmail.com |
bb090a14d03d9ae34916626a733163fb80a13d07 | 6fd5d30cf21716893388442eb0f9c16e13b91315 | /ABC/146/b.py | c2dd6c1a2676c0ffb2fe3790a90434aca68c06bd | [] | no_license | mgmk2/atcoder-python | 23d45f3195977f1f5839f6a6315e19cac80da2be | beec5857a8df2957ff7b688f717d4253b4196e10 | refs/heads/master | 2021-06-09T20:00:22.500222 | 2021-05-04T15:36:39 | 2021-05-04T15:36:39 | 179,711,330 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | n = int(input())
s = input()
a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
ans = ''
for i in range(len(s)):
idx = n + a.find(s[i])
ans += a[idx % 26]
print(ans)
| [
"mgmk2.dev@gmail.com"
] | mgmk2.dev@gmail.com |
ad03b895afc6d180aa2358f68de8fcb600e871dd | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /W3Hptw6ieTtrWNw4H_17.py | 8740c16f02a86224026dad019c28269cb2d8f877 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,753 | py | """
The basic **Polybius Square** is a 5x5 square grid with the letters A-Z
written into the grid. "I" and "J" typically share a slot (as there are 26
letters and only 25 slots).
| 1| 2| 3| 4| 5
---|---|---|---|---|---
**1**| A| B| C| D| E
**2**| F| G| H| I/J| K
**3**| L| M| N| O| P
**4**| Q| R| S| T| U
**5**| V| W| X| Y| Z
The **Bifid** cipher uses the Polybius square but adds a layer of complexity.
Start with a secret message. Remove spaces and punctuation.
plaintext = "ikilledmufasa"
Encipher the message using the basic Polybius cipher (see my [previous
challenge](https://edabit.com/challenge/2C3gtb4treAFyWJMg) — right click and
select "open in new tab"), but write the numbers in two rows under the
message, like so:
i| k| i| l| l| e| d| m| u| f| a| s| a
---|---|---|---|---|---|---|---|---|---|---|---|---
2| 2| 2| 3| 3| 1| 1| 3| 4| 2| 1| 4| 1
4| 5| 4| 1| 1| 5| 4| 2| 5| 1| 1| 3| 1
Read off the numbers horizontally, in pairs:
22 23 31 13 42 14 14 54 11 54 25 11 31
Generate the ciphertext by converting these new pairs of numbers into new
letters using the Polybius square.
ciphertext = "ghlcrddyaykal"
Create a function that takes a plaintext or ciphertext, and returns the
corresponding ciphertext or plaintext.
### Examples
bifid("I killed Mufasa!") ➞ "ghlcrddyaykal"
bifid("ghlcrddyaykal") ➞ "ikilledmufasa"
bifid("hi") ➞ "go"
### Notes
N/A
"""
def bifid(text):
text = text.upper()
tabel = []
nr = 0
plaintext = ''
ok = 2
if ' ' in text:
ok = 1
else:
ok = 0
for i in range(len(text)):
if (text[i] < 'a' or text[i] > 'z') and (text[i] < 'A' or text[i] > 'Z'):
plaintext = plaintext
else:
plaintext += text[i]
for i in range(5):
a = []
for j in range(5):
if nr == 9:
nr += 1
a.append(chr(65 + nr))
nr += 1
else:
a.append(chr(65 + nr))
nr += 1
tabel.append(a)
linie1 = ''
linie2 = ''
if ok == 1:
for i in range(len(plaintext)):
for j in range(len(tabel)):
if tabel[j][0] > plaintext[i]:
linie1 = linie1 + str(j)
linie2 = linie2 + str(tabel[j - 1] .index(plaintext[i]) + 1)
break
if j == len(tabel) - 1 and ord(plaintext[i]) >= ord(tabel[j][0]):
linie1 = linie1 + str(j + 1)
linie2 = linie2 + str(tabel[j].index(plaintext[i]) + 1)
linief = linie1 + linie2
message = ''
for i in range(0, len(linief), 2):
message += tabel[int(linief[i]) - 1][int(linief[i + 1]) - 1]
message = message.lower()
return message
else:
linie1 = ''
linie2 = ''
for i in range(len(plaintext)):
for j in range(len(tabel)):
if tabel[j][0] > plaintext[i]:
linie1 = linie1 + str(j)
linie2 = linie2 + str(tabel[j - 1].index(plaintext[i]) + 1)
break
if j == len(tabel) - 1 and ord(plaintext[i]) >= ord(tabel[j][0]):
linie1 = linie1 + str(j + 1)
linie2 = linie2 + str(tabel[j].index(plaintext[i]) + 1)
linief = ''
for i in range(len(linie1)):
linief += linie1[i] + linie2[i]
linie1 = linief[0:len(linie1)]
linie2 = linief[len(linie2):]
message = ''
for i in range(len(linie1)):
message += tabel[int(linie1[i]) - 1][int(linie2[i]) - 1]
message = message.lower()
return message
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0633d7ede215b645bc834d754358a81a2a6958b3 | df21ff970371d278473a19e851f4ab34ff15f97a | /bin/cftp | 2aad09f522365becd158b70a8772d373df34fcfd | [
"MIT"
] | permissive | harshavardhan98/news-detection | 032546bf1a13bdaa0a9bbc0c4dd8c3fc4c75d361 | 9f2039354346f71d284f36e9d4b654feac071f7e | refs/heads/master | 2022-12-17T04:27:21.677387 | 2018-08-10T17:34:12 | 2018-08-10T17:34:12 | 144,314,511 | 1 | 0 | MIT | 2022-12-08T01:00:31 | 2018-08-10T17:25:58 | Python | UTF-8 | Python | false | false | 419 | #!/home/atrophy98/Desktop/sih/Application/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==17.9.0','console_scripts','cftp'
__requires__ = 'Twisted==17.9.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==17.9.0', 'console_scripts', 'cftp')()
)
| [
"noreply@github.com"
] | noreply@github.com | |
b9bfcb28d66d01892330615f38b9420e3f501ff2 | 1e47d2fd3df5bb7958e6286fd85dd9bfc0341ac6 | /app2n9k.py | eadc9fcb73b5d91c485d4caa87e2b10783be2db6 | [
"BSD-3-Clause"
] | permissive | leeahnduk/Apps2N9k | cf463e7b6a4ecfab8e58666b827fee5646ea8e08 | 2992a4cc14ed4056e7e7fcf6565521988f0f9eea | refs/heads/master | 2023-01-22T19:29:04.753626 | 2020-11-13T00:53:35 | 2020-11-13T00:53:35 | 294,311,265 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 18,482 | py | import tetpyclient
import json
import requests.packages.urllib3
import sys
import os
import xlsxwriter
import argparse
import time
import csv
from argparse import ArgumentParser
from collections import defaultdict
from datetime import datetime
from builtins import input
from columnar import columnar
from tetpyclient import RestClient
from tqdm import tqdm as progress
from terminaltables import AsciiTable
import urllib3
CEND = "\33[0m" #End
CGREEN = "\33[32m" #Information
CYELLOW = "\33[33m" #Request Input
CRED = "\33[31m" #Error
URED = "\33[4;31m"
Cyan = "\33[0;36m" #Return
# =================================================================================
# See reason below -- why verify=False param is used
# python3 app2n9k.py --url https://tet-cluster/ --credential api_credentials.json
# feedback: Le Anh Duc - anhdle@cisco.com
# =================================================================================
requests.packages.urllib3.disable_warnings()
parser = argparse.ArgumentParser(description='Tetration Create Policy under Apps')
parser.add_argument('--url', help='Tetration URL', required=True)
parser.add_argument('--credential', help='Path to Tetration json credential file', required=True)
args = parser.parse_args()
def CreateRestClient():
"""create REST API connection to Tetration cluster
Returns:
REST Client
"""
rc = RestClient(args.url,
credentials_file=args.credential, verify=False)
return rc
def GetApps(rc):
resp = rc.get('/applications')
if resp.status_code != 200:
print(URED + "Failed to retrieve Apps list" + CEND)
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def GetAppsId(Apps, name):
try:
for app in Apps:
if name == app["name"]: return app["id"]
except:
print(URED + "Failed to retrieve App ID "+ CEND)
def ShowApps(Apps):
AppsList = []
headers = ['Number', 'App Name', 'Author', 'App ID', 'Primary?']
for i,app in enumerate(Apps): AppsList.append([i+1,app["name"] , app['author'], app["id"], app['primary']])
table = columnar(AppsList, headers, no_borders=False)
print(table)
def GetApplicationScopes(rc):
resp = rc.get('/app_scopes')
if resp.status_code != 200:
print(URED + "Failed to retrieve app scopes")
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def GetAppScopeId(scopes,name):
try:
return [scope["id"] for scope in scopes if scope["name"] == name][0]
except:
print(URED + "App Scope {name} not found".format(name=name))
def ShowScopes(scopes):
ScopesList = []
headers = ['Number', 'Scope Name', 'Scope ID', 'VRF ID']
for i,scope in enumerate(scopes): ScopesList.append([i+1,scope["name"] , scope["id"], scope['vrf_id']])
table = columnar(ScopesList, headers, no_borders=False)
print(table)
def GetPolicies(rc, app_id):
resp = rc.get('/applications/' + app_id + '/policies')
if resp.status_code != 200:
print(URED + "Failed to retrieve Policies list")
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def GetClusters(rc, appid):
resp = rc.get('/applications/' + appid + '/clusters')
if resp.status_code != 200:
print(URED + "Failed to retrieve Clusters list" + CEND)
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def GetClustersName(clusters):
Clusters_Detail = [["ID","NAME","APPROVED"]]
try:
for value in clusters:
Clusters_Detail.append([value["id"],value["name"],value["approved"]])
return Clusters_Detail
except:
print(CRED + "Clusters detail not found" +CEND)
def getDefaultDetail(rc, id):
resp = rc.get('/applications/'+ id + '/default_policies')
if resp.status_code != 200:
print(URED + "Failed to retrieve Default Policies from your Apps"+ CEND)
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def getAbsoluteDetail(rc, id):
resp = rc.get('/applications/'+ id + '/absolute_policies')
if resp.status_code != 200:
print(URED + "Failed to retrieve Absolute Policies from your Apps"+ CEND)
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def getCatchAllDetail(rc, id):
resp = rc.get('/applications/'+ id + '/catch_all')
if resp.status_code != 200:
print(URED + "Failed to retrieve catch_all Policy from your Apps"+ CEND)
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def selectTetApps(apps):
# Return App IDa for one or many Tetration Apps that we choose
print (Cyan + "\nHere are all Application workspaces in your cluster: " + CEND)
ShowApps(apps)
choice = input('\nSelect which Tetration Apps (Number, Number) above you want to download polices: ')
choice = choice.split(',')
appIDs = []
for app in choice:
if '-' in app:
for app in range(int(app.split('-')[0])-1,int(app.split('-')[1])):
appIDs.append(resp.json()[int(app)-1]['id'])
else:
appIDs.append(apps[int(app)-1]['id'])
return appIDs
def downloadPolicies(rc,appIDs):
# Download Policies JSON files from Apps workspace
apps = []
for appID in appIDs:
print('Downloading app details for '+appID + "into json file")
apps.append(rc.get('/openapi/v1/applications/%s/details'%appID).json())
#json_object = json.load(apps)
for app in apps:
with open('./'+app['name'].replace('/','-')+'.json', "w") as config_file:
json.dump(apps, config_file, indent=4)
print(app['name'].replace('/','-')+".json created")
return apps
def GetAppVersions(rc, appid):
resp = rc.get('/applications/' + appid + '/versions')
if resp.status_code != 200:
print(URED + "Failed to retrieve list of versions for your app" + CEND)
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def GetLatestVersion(app_versions):
try:
for vers in app_versions:
if "v" in vers["version"]: return vers["version"]
except:
print(URED + "Failed to retrieve latest app version"+ CEND)
def getAppDetail(rc, id):
resp = rc.get('/applications/'+ id)
if resp.status_code != 200:
print(URED + "Failed to retrieve App detail"+ CEND)
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def GetInvFromApps(apps):
for app in apps[0]:
if 'inventory_filters' in app.keys():
return app['inventory_filters']
else: print("CRED + There's no inventory filters in the apps")
def GetInventoriesId(inventories, name):
try:
for inv in inventories:
if name == inv["name"]:
print (Cyan + "\nHere is your Inventory ID: " + inv["id"] + Cend)
return inv["id"]
else: continue
except:
print(URED + "Inventory {name} not found".format(name=name))
def GetInventoriesNamewithID(inventories):
inventoriesList = []
try:
for inv in inventories:
inventoriesList.append([inv["name"] , inv["id"]])
return inventoriesList
except:
print(URED + "Failed to retrieve inventories name with ID list"+ CEND)
def GetInventories(rc):
resp = rc.get('/filters/inventories')
if resp.status_code != 200:
print(URED + "Failed to retrieve inventories list"+ CEND)
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def filterToString(invfilter):
if 'filters' in invfilter.keys():
query=[]
for x in invfilter['filters']:
if 'filters' in x.keys():
query.append(filterToString(x))
elif 'filter' in x.keys():
query.append(x['type'] + filterToString(x['filter']))
else:
query.append(x['field'].replace('user_','*')+ ' '+ x['type'] + ' '+ str(x['value']))
operator = ' '+invfilter['type']+' '
return '('+operator.join(query)+')'
else:
return invfilter['field']+ ' '+ invfilter['type'] + ' '+ str(invfilter['value'])
def resolveFilter(rc, filters):# return all IP and hosts for a specific filters
ipSet = []
#hosts = []
#for inv in filters:
body = json.dumps({'filter':filters['query']})
#print ("Query in inventory " + filters['name'] + " :\n" + json.dumps(body, indent=4))
resp = rc.post('/inventory/search',json_body=body)
if resp:
ips = resp.json()
#print ("IP rest call: " + json.dumps(ips, indent=4))
for i in ips['results']:
ipSet.append(i['ip'])
#hosts.append(i)
#print(self._ipSet)
return ipSet#, hosts
def GetAppScopeName(scopes,id):
try:
return [scope["name"] for scope in scopes if scope["id"] == id][0]
except:
print("App Scope {id} not found".format(name=name))
def ShowApplicationScopes(scopes):
"""
List all the Scopes in Tetration Appliance
Scope ID | Name | Policy Priority | Query | VRF ID | Parent Scope ID | Root Scope ID | Created At | Updated At
"""
headers = ['Scope ID', 'Name', 'Policy Priority', 'Query', 'VRF ID', 'Parent Scope ID', 'Root Scope ID', 'Created At', 'Updated At']
data_list = []
for x in scopes: data_list. append([x['id'],
x['name'],
x['policy_priority'],
x['short_query'],
x['vrf_id'],
x['parent_app_scope_id'],
x['root_app_scope_id'],
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x['created_at'])),
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x['updated_at']))])
table = columnar(data_list, headers, no_borders=False)
print(table)
def GetVRFs(rc):
# Get all VRFs in the cluster
resp = rc.get('/vrfs')
if resp.status_code != 200:
print("Failed to retrieve app scopes")
print(resp.status_code)
print(resp.text)
else:
return resp.json()
def ShowVRFs(vrfs):
"""
List all the Apps in Tetration Appliance
VRF ID | Created At | Updated At | Name | Tenant name | Root Scope ID
"""
data_list = []
headers = ['VRF ID', 'Created At', 'Updated At', 'Name', 'Tenant Name', 'Root Scope ID']
for x in vrfs:
data_list.append([x['id'], time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(x['created_at'])), time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(x['updated_at'])), x['name'], x['tenant_name'], x['root_app_scope_id']])
table = columnar(data_list, headers, no_borders=False)
print(table)
def GetRootScope(vrfs):
#return list of Root Scopes and its' names
rootScopes = []
headers = ['Root Scope Name', 'VRF ID']
for vrf in vrfs:
rootScopes.append([vrf["name"] , vrf["vrf_id"]])
table = columnar(rootScopes, headers, no_borders=False)
print(table)
def GetAllSubScopeNames(scopes, name):
subScopeNames = []
try:
for scope in scopes:
if name in scope["name"]:
subScopeNames.append(scope["name"])
else: continue
return subScopeNames
except:
print(URED + "App Scope {name} not found".format(name=name))
def convApps2n9k(rc):
AllApps = GetApps(rc)
scopes = GetApplicationScopes(rc)
apps = []
appIDs = selectTetApps(AllApps)
apps.append(downloadPolicies(rc, appIDs))
def_policies = getDefaultDetail(rc,str(appIDs[0]))
#print ("Default Policies: \n" +json.dumps(def_policies, indent=4))
abs_policies = getAbsoluteDetail(rc,str(appIDs[0]))
#print ("Absolute Policies: \n" + json.dumps(abs_policies, indent=4))
# Load in the IANA Protocols
protocols = {}
try:
with open('protocol-numbers-1.csv') as protocol_file:
reader = csv.DictReader(protocol_file)
for row in reader:
protocols[row['Decimal']]=row
except IOError:
print('%% Could not load protocols file')
return
except ValueError:
print('Could not load improperly formatted protocols file')
return
# Load in N9k known ports
ports = {}
print('\nN9k ACL Config\n---------------------------------------\n\n')
#Process nodes and output information to N9k Objects
file1 = open("ACL_config.txt","w")
print ('ip access-list tet-acl')
file1.write('ip access-list tet-acl \n')
#Process policies and output information as N9k ACL Lines
for policy in def_policies:
#print ("Policy: \n" + json.dumps(policy, indent=4))
for param in policy['l4_params']:
#print ("L4 Param: \n" + json.dumps(param, indent=4))
l4params = []
if param['proto'] == 1: l4params.append({'port_min': 'NA' ,'port_max': 'NA','proto':param['proto']})
else: l4params.append({'port_min':param['port'][0],'port_max':param['port'][1],'proto':param['proto']})
#if policy['consumer_filter']['name'] == 'Default' and policy['provider_filter']['name'] != 'Default':
#print ("L4 Params: \n" + json.dumps(l4params, indent=4))
for rule in l4params:
if policy['consumer_filter_id'] != policy['provider_filter_id']:
if rule['proto'] == 1:
for app in apps[0]:
if 'clusters' in app.keys():
clusters = GetClusters(rc,str(appIDs[0]))
for cluster in clusters:
if policy['provider_filter']['name'] == cluster['name']:
ProvipSet = resolveFilter(rc, cluster)
if policy['consumer_filter']['name'] == cluster['name']:
ConsipSet = resolveFilter(rc, cluster)
if 'inventory_filters' in app.keys():
filters = GetInvFromApps(apps)
for invfilter in filters:
if invfilter['name'] != 'Default':
if policy['provider_filter']['name'] == invfilter['name']:
ProvipSet = resolveFilter(rc, invfilter)
if policy['consumer_filter']['name'] == invfilter['name']:
ConsipSet = resolveFilter(rc, invfilter)
for a in ConsipSet:
for b in ProvipSet:
if a != b:
print ("\t permit " + protocols[str(rule['proto'])]['Keyword'] + " host " + (a if policy['provider_filter']['name'] != 'Default' else " any") + " host " + (b if policy['provider_filter']['name'] != 'Default' else " any"))
file1.write("\t permit " + protocols[str(rule['proto'])]['Keyword'] + " host " + (a if policy['provider_filter']['name'] != 'Default' else " any") + " host " + (b if policy['provider_filter']['name'] != 'Default' else " any\n"))
elif (rule['proto'] == 6) or (rule['proto'] == 17):
for app in apps[0]:
if 'clusters' in app.keys():
clusters = GetClusters(rc,str(appIDs[0]))
for cluster in clusters:
if policy['provider_filter']['name'] == cluster['name']:
ProvipSet = resolveFilter(rc, cluster)
if policy['consumer_filter']['name'] == cluster['name']:
ConsipSet = resolveFilter(rc, cluster)
if 'inventory_filters' in app.keys():
filters = GetInvFromApps(apps)
for invfilter in filters:
if invfilter['name'] != 'Default':
if policy['provider_filter']['name'] == invfilter['name']:
ProvipSet = resolveFilter(rc, invfilter)
if policy['consumer_filter']['name'] == invfilter['name']:
ConsipSet = resolveFilter(rc, invfilter)
for a in ConsipSet:
for b in ProvipSet:
if a != b:
if rule['port_min'] == rule['port_max']:
port = rule['port_min']
print ("\t permit " + protocols[str(rule['proto'])]['Keyword'] + " host " + (a if policy['consumer_filter']['name'] != 'Default' else " any") + " host " + (b if policy['provider_filter']['name'] != 'Default' else " any") + " eq " + str(port))
file1.write("\t permit " + protocols[str(rule['proto'])]['Keyword'] + " host " + (a if policy['consumer_filter']['name'] != 'Default' else " any") + " host " + (b if policy['provider_filter']['name'] != 'Default' else " any") + " eq " + str(port) + "\n")
else:
print ("\t permit " + protocols[str(rule['proto'])]['Keyword'] + " host " + (a if policy['consumer_filter']['name'] != 'Default' else " any") + " host " + (b if policy['provider_filter']['name'] != 'Default' else " any") + " range " + str(rule['port_min']) + "-" + str(rule['port_max']))
file1.write("\t permit " + protocols[str(rule['proto'])]['Keyword'] + " host " + (a if policy['consumer_filter']['name'] != 'Default' else " any") + " host " + (b if policy['provider_filter']['name'] != 'Default' else " any") + " range " + str(rule['port_min']) + "-" + str(rule['port_max']) + "\n")
print ("\t deny ip any any\n!\n\n")
file1.write("\t deny ip any any\n!\n\n")
file1.close()
print (CYELLOW + "ACL Config File: ACL_config.txt created" + CEND)
def main():
rc = CreateRestClient()
convApps2n9k(rc)
if __name__ == "__main__":
main() | [
"anhdle@cisco.com"
] | anhdle@cisco.com |
031f01341b3f6a3f5d55120b736d178b2b4babe0 | 7302f7cfbbc992cdbb64548c121c9c23e326b47b | /web/migrations/0016_studyrecord.py | f5a01f20a4c6911328dc4fbd13e1d907f587ba6a | [] | no_license | kingLDY/luffy_crm | 7268e1c0164ba8a6a72eb83dc47e678d8fd7dbbc | d117b5ac6a57390300e5d6a73d7d3c01061a1b5e | refs/heads/develop | 2021-05-20T19:16:43.036840 | 2020-04-02T08:37:02 | 2020-04-02T08:37:02 | 252,387,116 | 1 | 0 | null | 2020-04-02T08:26:30 | 2020-04-02T07:37:08 | Python | UTF-8 | Python | false | false | 1,077 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-12-29 07:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0015_auto_20181227_0751'),
]
operations = [
migrations.CreateModel(
name='StudyRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('record', models.CharField(choices=[('checked', '已签到'), ('vacate', '请假'), ('late', '迟到'), ('noshow', '缺勤'), ('leave_early', '早退')], default='checked', max_length=64, verbose_name='上课纪录')),
('course_record', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.CourseRecord', verbose_name='第几天课程')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web.Student', verbose_name='学员')),
],
),
]
| [
"1824243518@qq.com"
] | 1824243518@qq.com |
279fd19ae32254d0fd602d477672d39a3c43f3e1 | 2083b21877700004a65f35c20abe8f5885244207 | /regularization/orthogonal_weight.py | 9296f5a3b9b8708d811e077e627ac080fab9c703 | [
"Apache-2.0"
] | permissive | ETRI-EdgeAnalytics/AIGrandChallenge | f0f2e8b5aa3b4ae31f34f4bae01961d6fb747dca | ecd6c2c9911d195155d3cbd14017d1692593926a | refs/heads/main | 2023-03-28T19:54:28.319583 | 2021-04-01T06:42:31 | 2021-04-01T06:42:31 | 353,196,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,671 | py | import torch
from torch.optim import lr_scheduler
import copy
from torch import cuda, nn, optim
from tqdm import tqdm, trange
import numpy
from torch.nn.functional import normalize
from torch.autograd import Variable
def gram_schmidt(vectors,device):
#to make pointwise matrix independent matrix
if vectors.shape[0]>vectors.shape[1]:
vectors = vectors.transpose(0,1)
basis = torch.zeros_like(vectors).to(device)
for num in range(vectors.shape[0]):
temp = torch.zeros_like(vectors[num])
for b in basis:
temp += torch.matmul(vectors[num],b) * b
w = vectors[num] - temp
if (w > 1e-10).any():
basis[num] = w/torch.norm(w)
basis = basis.half()
if vectors.shape[0]>vectors.shape[1]:
return basis.transpose(0,1)
else:
return basis
def gr_sch_pr(mdl, device):
for name, module in mdl.named_children():
if 'layer' in name:
for m in module:
m.conv1.weight = torch.nn.Parameter(gram_schmidt(m.conv1.weight, device))
m.conv3.weight = torch.nn.Parameter(gram_schmidt(m.conv3.weight, device))
def l2_reg_ortho(mdl, device):
"""
SRIP function from 'Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?,'
https://arxiv.org/abs/1810.09102.
"""
l2_reg = None
for name, module in mdl.named_children():
if 'layer' in name:
for m in module:
W = m.conv1.weight
cols = W[0].numel()
w1 = W.view(-1,cols)
wt = torch.transpose(w1,0,1)
m = torch.matmul(wt,w1)
ident = Variable(torch.eye(cols,cols)).type(torch.HalfTensor).to(device)
w_tmp = (m - ident)
height = w_tmp.size(0)
u = normalize(w_tmp.new_empty(height).normal_(0,1), dim=0, eps=1e-12)
v = normalize(torch.matmul(w_tmp.t(), u), dim=0, eps=1e-12)
u = normalize(torch.matmul(w_tmp, v), dim=0, eps=1e-12)
sigma = torch.dot(u, torch.matmul(w_tmp, v))
if l2_reg is None:
l2_reg = (sigma)**2
else:
l2_reg = l2_reg + (sigma)**2
return l2_reg
'''
def l2_reg_ortho_32bit(mdl, device):
"""
SRIP function from 'Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?,'
https://arxiv.org/abs/1810.09102.
"""
l2_reg = None
for name, module in mdl.named_children():
if 'layer' in name:
for m in module:
W = m.conv1.weight
cols = W[0].numel()
w1 = W.view(-1,cols)
wt = torch.transpose(w1,0,1)
m = torch.matmul(wt,w1)
ident = Variable(torch.eye(cols,cols)).to(device)
w_tmp = (m - ident)
height = w_tmp.size(0)
u = normalize(w_tmp.new_empty(height).normal_(0,1), dim=0, eps=1e-12)
v = normalize(torch.matmul(w_tmp.t(), u), dim=0, eps=1e-12)
u = normalize(torch.matmul(w_tmp, v), dim=0, eps=1e-12)
sigma = torch.dot(u, torch.matmul(w_tmp, v))
if l2_reg is None:
l2_reg = (sigma)**2
else:
l2_reg = l2_reg + (sigma)**2
return l2_reg
'''
def l2_reg_ortho_32bit(conf, mdl, device):
l2_reg = None
for W in mdl.parameters():
if W.ndimension() < 2:
continue
elif W.ndimension() == 4:
if W.shape[3] == 1:
cols = W[0].numel()
rows = W.shape[0]
w1 = W.view(-1,cols)
wt = torch.transpose(w1,0,1)
m = torch.matmul(wt,w1)
ident = Variable(torch.eye(cols,cols)).to(device)
w_tmp = (m - ident)
height = w_tmp.size(0)
u = normalize(w_tmp.new_empty(height).normal_(0, 1), dim=0, eps=1e-12)
v = normalize(torch.matmul(w_tmp.t(), u), dim=0, eps=1e-12)
u = normalize(torch.matmul(w_tmp, v), dim=0, eps=1e-12)
sigma = torch.dot(u, torch.matmul(w_tmp, v))
if l2_reg is None:
l2_reg = (torch.norm(sigma,2))**2 / numpy.prod(w_tmp.size())
num = 1
else:
l2_reg = l2_reg + ((torch.norm(sigma,2))**2 / numpy.prod(w_tmp.size()))
num += 1
return l2_reg / num
def conv3_l2_reg_ortho(mdl, device):
"""
SRIP function from 'Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?,'
https://arxiv.org/abs/1810.09102.
"""
l2_reg = None
for name, module in mdl.named_children():
if 'layer' in name:
for m in module:
W = m.conv3.weight
cols = W[0].numel()
w1 = W.view(-1,cols)
wt = torch.transpose(w1,0,1)
m = torch.matmul(wt,w1)
ident = Variable(torch.eye(cols,cols)).type(torch.HalfTensor).to(device)
w_tmp = (m - ident)
height = w_tmp.size(0)
u = normalize(w_tmp.new_empty(height).normal_(0,1), dim=0, eps=1e-12)
v = normalize(torch.matmul(w_tmp.t(), u), dim=0, eps=1e-12)
u = normalize(torch.matmul(w_tmp, v), dim=0, eps=1e-12)
sigma = torch.dot(u, torch.matmul(w_tmp, v))
if l2_reg is None:
l2_reg = (sigma)**2
else:
l2_reg = l2_reg + (sigma)**2
return l2_reg
def conv3_l2_reg_ortho_32bit(mdl, device):
"""
SRIP function from 'Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?,'
https://arxiv.org/abs/1810.09102.
"""
l2_reg = None
for name, module in mdl.named_children():
if 'layer' in name:
for m in module:
W = m.conv3.weight
cols = W[0].numel()
w1 = W.view(-1,cols)
wt = torch.transpose(w1,0,1)
m = torch.matmul(wt,w1)
ident = Variable(torch.eye(cols,cols)).to(device)
w_tmp = (m - ident)
height = w_tmp.size(0)
u = normalize(w_tmp.new_empty(height).normal_(0,1), dim=0, eps=1e-12)
v = normalize(torch.matmul(w_tmp.t(), u), dim=0, eps=1e-12)
u = normalize(torch.matmul(w_tmp, v), dim=0, eps=1e-12)
sigma = torch.dot(u, torch.matmul(w_tmp, v))
if l2_reg is None:
l2_reg = (sigma)**2
else:
l2_reg = l2_reg + (sigma)**2
return l2_reg
def fc_l2_reg_ortho(mdl, device):
"""
SRIP function from 'Can We Gain More from Orthogonality Regularizations in Training Deep CNNs?,'
https://arxiv.org/abs/1810.09102.
"""
l2_reg = None
for name, m in mdl.named_children():
if 'last' in name:
W = m.weight
cols = W[0].numel()
rows = W.shape[0]
w1 = W.view(-1,cols)
wt = torch.transpose(w1,0,1)
m = torch.matmul(wt,w1)
ident = Variable(torch.eye(cols,cols)).type(torch.HalfTensor).to(device)
w_tmp = (m - ident)
height = w_tmp.size(0)
u = normalize(w_tmp.new_empty(height).normal_(0,1), dim=0, eps=1e-12)
v = normalize(torch.matmul(w_tmp.t(), u), dim=0, eps=1e-12)
u = normalize(torch.matmul(w_tmp, v), dim=0, eps=1e-12)
sigma = torch.dot(u, torch.matmul(w_tmp, v))
if l2_reg is None:
l2_reg = (sigma)**2
else:
l2_reg = l2_reg + (sigma)**2
return l2_reg
def conv1_l2_reg_orthogonal(mdl, device):
"""
Make weight matrixs be an orthogonal matrix. (not a orthonormal matrix.)
This is to analyze only the effect of orthogonality, not from the orthonormal vectors.
"""
l2_reg = None
for name, module in mdl.named_children():
if 'layer' in name:
for m in module:
W = m.conv1.weight
cols = W[0].numel()
rows = W.shape[0]
w1 = W.view(-1,cols)
wt = torch.transpose(w1,0,1)
if (rows > cols):
m = torch.matmul(wt,w1)
else:
m = torch.matmul(w1,wt)
w_tmp = (m - torch.diagflat(torch.diagonal(m)))
b_k = Variable(torch.rand(w_tmp.shape[1],1)).type(torch.HalfTensor).to(mdl.device)
b_k = b_k.to(mdl.device)
v1 = torch.matmul(w_tmp, b_k)
norm1 = torch.norm(v1,2)
v2 = torch.div(v1,norm1)
v3 = torch.matmul(w_tmp,v2)
if l2_reg is None:
l2_reg = (torch.norm(v3,2))**2
else:
l2_reg = l2_reg + (torch.norm(v3,2))**2
return l2_reg
def conv3_l2_reg_orthogonal(mdl, device):
"""
Make weight matrixs be an orthogonal matrix. (not a orthonormal matrix.)
This is to analyze only the effect of orthogonality, not from the orthonormal vectors.
"""
l2_reg = None
for name, module in mdl.named_children():
if 'layer' in name:
for m in module:
W = m.conv3.weight
cols = W[0].numel()
rows = W.shape[0]
w1 = W.view(-1,cols)
wt = torch.transpose(w1,0,1)
if (rows > cols):
m = torch.matmul(wt,w1)
else:
m = torch.matmul(w1,wt)
w_tmp = (m - torch.diagflat(torch.diagonal(m)))
b_k = Variable(torch.rand(w_tmp.shape[1],1)).type(torch.HalfTensor).to(mdl.device)
b_k = b_k.to(mdl.device)
v1 = torch.matmul(w_tmp, b_k)
norm1 = torch.norm(v1,2)
v2 = torch.div(v1,norm1)
v3 = torch.matmul(w_tmp,v2)
if l2_reg is None:
l2_reg = (torch.norm(v3,2))**2
else:
l2_reg = l2_reg + (torch.norm(v3,2))**2
return l2_reg
| [
"noreply@github.com"
] | noreply@github.com |
0113a3f2a583175b56b510febee6361c7eef79c7 | a941df10f19dc6d9a07575e7fbaf8577aa35de19 | /graphic.py | 68c274291e394647053ae420cbfdee7edc0a4951 | [] | no_license | GoDDoS/DevopsTasks | a22d65918c8d936f264dc9d2f40ef2276703cb36 | b0420c681d2316d3fdb45e353b088bd040bdef50 | refs/heads/main | 2023-04-28T04:39:51.217128 | 2021-05-10T17:19:07 | 2021-05-10T17:19:07 | 365,012,384 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,110 | py | from db import *
from tkinter import *
root = Tk()
root.title("Personal anime list")
ii = 0
gg = 0
ttl = ""
def add():
str = ('type', 'released_date', 'end_date', 'producer_name', 'rating', 'studio')
quest = ('When did you see it?', 'Rate it!', 'Type the best character')
array = {}
info_array = {}
def user_add(event):
global gg
question.delete(1.0, END)
question.insert(END, quest[gg])
question.grid(row=0, column=0)
but.config(text="OK")
text.delete(1.0, END)
text.grid(row=0, column=1)
but.grid(row=0, column=2)
but.bind("<Button-1>", read_info)
def read_info(event):
global gg
global ttl
info_array[gg] = text.get(1.0, END)
gg += 1
if gg == 3:
add_personal_list(ttl, info_array[1], info_array[0], info_array[2])
return
question.delete(1.0, END)
string = quest[gg]
question.insert(END, string)
question.grid(row=0, column=0)
text.delete(1.0, END)
text.grid(row=0, column=1)
but.grid(row=0, column=2)
def output(event):
global ttl
ttl = text.get("1.0", END)
if check_anime_in_DB(ttl) == 0:
string = "Sorry, we can't find the anime in the DB.\n"
string += "Please, give us info about it"
question.insert(END, string)
question.grid(row=0, column=0)
but.config(text="OK")
but.grid(row=0, column=1)
but.bind("<Button-1>", questions)
else:
user_add(event)
def questions(event):
global ii
question.delete(1.0, END)
string = 'Type '
string += str[ii]
string += " of the anime (type '-' if you don't know)"
question.insert(END, string)
question.grid(row=0, column=0)
text.delete(1.0, END)
text.grid(row=0, column=1)
but.grid(row=0, column=2)
but.bind("<Button-1>", read)
def read(event):
global ii
global ttl
array[ii] = text.get(1.0, END)
if array[ii] == '-':
array[ii] = 'DEFAULT'
ii += 1
# print(ii)
if ii == 6:
add_anime(ttl, array[0], array[1], array[2], array[3], array[4], array[5])
user_add()
return
question.delete(1.0, END)
string = 'Type '
string += str[ii]
string += " of the anime (type '-' if you don't know)"
question.insert(END, string)
question.grid(row=0, column=0)
text.delete(1.0, END)
text.grid(row=0, column=1)
but.grid(row=0, column=2)
# fra.config(bg="Red")
text = Text(fra, width=45, height=10, wrap=WORD)
but = Button(fra, width=45, height=10, text="Find it!")
question = Text(fra, width=45, height=10, wrap=WORD)
text.grid(row=0, column=0)
but.grid(row=0, column=1)
but.bind("<Button-1>", output)
def print_table(anm):
text = []
for i, token in enumerate(anm):
text.append([0] * len(token))
for j, word in enumerate(token):
text[i][j] = Text(fra, wrap=WORD, width=10, height=2.5)
text[i][j].insert(END, anm[i][j])
text[i][j].grid(row=i, column=j)
def find_by_title():
def find_title(event):
but.grid_remove()
s = text.get(1.0, END)
text.grid_remove()
print_table(find_anime_by_title(s))
text = Text(fra, width=45, height=10, wrap=WORD)
but = Button(fra, width=45, height=10, text="Search!")
text.grid(row=0, column=0)
but.grid(row=0, column=1)
but.bind("<Button-1>", find_title)
def find_by_tag():
def find_tag(event):
but.grid_remove()
s = text.get(1.0, END)
text.grid_remove()
print_table(find_anime_by_tag(s))
text = Text(fra, width=45, height=10, wrap=WORD)
but = Button(fra, width=45, height=10, text="Search!")
text.grid(row=0, column=0)
but.grid(row=0, column=1)
but.bind("<Button-1>", find_tag)
def top():
def show_top(event):
but.grid_remove()
print_table(top_anime())
but = Button(fra, width=45, height=10, text="Show!")
but.grid(row=0, column=1)
but.bind("<Button-1>", show_top)
def find_by_name():
def find_name(event):
but.grid_remove()
s = text.get(1.0, END)
text.grid_remove()
print_table(find_ch_by_name(s))
text = Text(fra, width=45, height=10, wrap=WORD)
but = Button(fra, width=45, height=10, text="Search!")
text.grid(row=0, column=0)
but.grid(row=0, column=1)
but.bind("<Button-1>", find_name)
def inf_seiyuu():
def find_seiyuu(event):
but.grid_remove()
s = text.get(1.0, END)
text.grid_remove()
print_table(inf_by_seiyuu(s))
text = Text(fra, width=45, height=10, wrap=WORD)
but = Button(fra, width=45, height=10, text="Search!")
text.grid(row=0, column=0)
but.grid(row=0, column=1)
but.bind("<Button-1>", find_seiyuu)
def personal_list():
def show_list(event):
but.grid_remove()
print_table(my_list())
but = Button(fra, width=45, height=10, text="Show!")
but.grid(row=0, column=1)
but.bind("<Button-1>", show_list)
fra = Frame(root, width=700, height=500, bg="Black")
fra.pack()
m = Menu(root)
root.config(menu=m)
cm = Menu(m)
dm = Menu(cm)
em = Menu(dm)
m.add_cascade(label="Choose option", menu=cm)
cm.add_command(label="Add anime to your list", command=add)
cm.add_cascade(label="Find...", menu=dm)
dm.add_cascade(label="anime by...", menu=em)
em.add_command(label="title", command=find_by_title)
em.add_command(label="tag", command=find_by_tag)
dm.add_command(label="character", command=find_by_name)
dm.add_command(label="seiyuu", command=inf_seiyuu)
#dm.add_command(label="producer", command=find_producer)
cm.add_command(label="Show top", command=top)
cm.add_command(label="Show my list", command=personal_list)
cm.add_command(label="Clear my list", command=clear_list)
#cm.add_command(label="Show tags", command=tags)
root.mainloop()
| [
"kuleshov.iv@phystech.edu"
] | kuleshov.iv@phystech.edu |
695cdb94db56ccb5b2689dc2825959a63ea15a94 | 0c94547925ed2f81965be5ec62711e1556ec3e33 | /main/pagecontexts/writview.py | c054786c2e1e085a1156bed0117a42e2310c4ac6 | [] | no_license | eremzeit/beatwrit | 53bc73319d32213d5e90669ad7b69268e81a36cd | 0f9bb9f5df9b15b756c140ad7b36a5c58b3faf45 | refs/heads/master | 2020-05-16T20:25:57.226715 | 2011-12-30T22:55:05 | 2011-12-30T22:55:05 | 3,077,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,947 | py | from django.utils import simplejson as json
from datetime import datetime, timedelta
import re
import pdb
import math
from django.http import HttpRequest
from main.models import *
from utils import logpath
from main.fetching import *
class WritViewPageContext():
def __init__(self, request, writ):
self.writ = writ
self.bwu = None
if request.user.is_authenticated():
self.bwu = request.user.beatwrituser
self.request = request
self._user_order = None
self._participant_set = None
self._participant_users = None
def get_user_order(self):
if self._user_order:
return self._user_order
self._user_order = self.writ.get_user_order()
return self._user_order
def get_participant_set(self):
if self._participant_set:
return self._participant_set
self._participant_set = self.writ.participant_set.all()
for part in self._participant_set:
part.can_add_to_circle_ = self.bwu.can_add_to_circle(part).issuccess()
return self._participant_set
def get_participant_users(self):
if self._participant_users:
return self._participant_users
self._participant_users = self.writ.participants.all()
return self._participant_users
def get_additions(self):
adds = self.writ.addition_set.all()
for add in adds:
add.can_nod_ = 1 if add.can_nod(self.bwu) else 0
return adds
def get_new_additions(self):
if not self.request.user.is_anonymous():
return []
#get the additions of the user in reverse order
my_adds = self.writ.addition_set.filter(author__exact=self.bwu).order_by('-position')
if len(my_adds) is 0:
if not self.bwu in self.writ.participants.all():
return []
return self.writ.addition_set.all()
most_recent_addition = my_adds[0]
most_recent_position = most_recent_addition.position
return self.writ.addition_set.filter(position__gt=most_recent_position)
def get_participant_infos(self):
# [{'name':'', 'nodcount':''},...]
infos = []
users = self.get_user_order()
for bwuser in users:
p_nods = Nod.objects.filter(addition__writ__id__exact=self.writ.id,receiver__pk__exact=bwuser.id)
info = {'name':bwuser.get_penname(), 'nodcount':len(p_nods), 'id':bwuser.pk, 'can_add':self.bwu.can_add_to_circle(bwuser).issuccess()}
infos.append(info)
return infos
def get_user_json_object(self):
user = {}
user['nodsRemaining'] = self.get_user_nods_remaining()
user['id'] = self.bwu.id
user['inWrit'] = self.bwu in self.writ.participants.all()
print user
return json.dumps(user)
def get_writ_json_object(self):
writ = {}
writ['id'] = self.writ.pk
writ['endingType'] = self.writ.settings.endingtype
writ['numContribWords'] = self.writ.settings.max_words_per_contribution
writ['wordsRemaining'] = self.writ.get_words_remaining()
adds = {}
for addition in self.writ.addition_set.all():
addition_info = addition.pack()
adds[str(addition_info['id'])] = addition_info
writ['additions'] = adds
return json.dumps(writ)
def is_users_turn(self):
user_order = self.get_user_order()
if user_order[0].pk == self.bwu.pk:
return True
else:
return False
def is_user_participant(self):
if self.bwu in self.get_participant_users():
return True
else:
return False
def get_user_nods_remaining(self):
if not self.is_user_participant():
return 0
return self.bwu.nods_remaining
def user_has_nods(self):
if self.get_user_nods_remaining() > 0:
return True
return False
def make_addition_html(self, addition):
template = Template("""<span id='add_{{ addition.pk }}' add_id='{{addition.pk}}'
{% if addition in wvc.get_new_additions %}
class='add_span new_addition'
{% else %}
class='add_span'
{% endif %}
>{{addition.content}}</span>
""")
"""
{% for addition in writ.addition_set.all %}
<span id='add_{{ addition.pk }}' add_id='{{addition.pk}}'
{% if addition in wvc.get_new_additions %}
class='add_span new_addition'
{% else %}
class='add_span'
{% endif %}
>{{addition.content}}</span>
{% endfor %}
"""
def can_user_join(self):
if self.writ.can_join(self.bwu).issuccess():
return True
else:
return False
| [
"erem.zeit@gmail.com"
] | erem.zeit@gmail.com |
9de4abdf643e0a2a6772a2eb914b3fc2796f7194 | 3e15b40f4e28675af82d7d19aca1e5769131b3a4 | /exonic_snps.py | 55c500fd1980d7102f2be0938655079c402957f3 | [
"MIT"
] | permissive | jzfarmer/learning_python | 69fd50457e2365d39bd23f6953d3d1456ff6147d | 279fc19d4405625b49f853575252bf1dee3cbb99 | refs/heads/master | 2022-08-04T01:42:35.597663 | 2020-06-01T23:35:15 | 2020-06-01T23:35:15 | 252,600,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,950 | py | #!/usr/bin/env python3
# Write a program that intersects SNPs and coding exons
# snps.txt.gz has all the 23andme snps on chrom 21
# exons.txt.gz has all the coding exons on chrom 21
# Report:
# number of SNPs
# number of genes
# number of exons
# names of genes with SNPs in them
# Given that there are about 100x more genes in the human genome
# And 100x more SNPs on the full chip
# Estimate how long your program would take to run using all available data
import gzip
import time
# get all the positions of the SNPs
t0 = time.perf_counter()
snps = []
with gzip.open('snps.txt.gz', 'rt') as fp:
for line in fp.readlines():
(id, chrom, pos, gtype) = line.split()
snps.append(int(pos))
# read exons and xref to SNPs
genes = []
exons = 0
snp_genes = []
with gzip.open('exons.txt.gz', 'rt') as fp:
for line in fp.readlines():
(gene, beg, end) = line.split()
exons += 1
if gene not in genes:
genes.append(genes)
beg = int(beg)
end = int(end)
print(beg)
found = False
for snp in snps:
if snp >= beg and snp <= end:
found = True
break
if found:
if gene not in snp_genes:
snp_genes.append(gene)
snp_genes.sort()
t1 = time.perf_counter()
et = (t1 - t0) * 100 * 100 / 3600
#report section
print(f'SNPs:{len(snps)}')
print(f'Genes:{len(genes)}')
print(f'Exons:{exons}')
print(f'Genes w/ SNPs: {len(snp_genes)}')
print(f'Gene List: {snp_genes}')
print(f'Estimated Full Time: {et} hours')
"""
SNPs: 8607
Genes: 234
Exons: 2433
Genes w/ SNPs: 54
Gene List: ABCG1, AGPAT3, AIRE, AP000311.1, BACE2, BACH1, BRWD1, C21orf58, C2CD2, CBS, CHAF1B, CLDN17, COL6A1, COL6A2, DNMT3L, DOP1B, DSCAM, FAM243A, GART, IFNAR1, IFNGR2, ITGB2, KRTAP10-5, KRTAP10-7, KRTAP19-3, KRTAP25-1, MCM3AP, MORC3, PAXBP1, PCNT, PDE9A, PDXK, PIGP, PRDM15, PTTG1IP, PWP2, RRP1, RWDD2B, SCAF4, SIK1, SIM2, SLC37A1, SLC5A3, SOD1, SON, SYNJ1, TMPRSS15, TMPRSS2, TMPRSS3, TRAPPC10, TTC3, U2AF1, UMODL1, USP25
Estimated Full Time: 4.76 hours
"""
| [
"jessicafarmer@Jessicas-MBP.attlocal.net"
] | jessicafarmer@Jessicas-MBP.attlocal.net |
e34f54532e8403ba6405c2e1be24e8b4eb190ba3 | bcc3359817a74c97b8804d415b5b578d03ca4fc9 | /test/assets/classes/message.py | ae9a9cd7c2fcfd518fe6587ade21a5477f78edb7 | [] | no_license | pydget/pyspare | 21c7677e66987ef4625dc7a71f041beb025b0350 | 46ef0e3c4eca1ceb52a86cae3d790483d25b2906 | refs/heads/master | 2023-02-19T19:18:13.743639 | 2021-01-16T01:27:20 | 2021-01-16T01:27:20 | 288,975,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | from dataclasses import dataclass
@dataclass
class Message:
__slots__ = 'origin', 'target', 'body'
def __init__(self, origin, target, body):
self.origin = origin
self.target = target
self.body = body
| [
"hoyeungw@outlook.com"
] | hoyeungw@outlook.com |
73cf1bf0cb2673991a13c5ad0255cdb583da53a6 | fd9e45ff4e0e8ea93c1204aeac6020eb82395bbe | /sel_elements/find_elements_by.py | 5ca6e054e53261a361ff556b80c2d3eaffe943b4 | [] | no_license | VishwajeetSaxena/selenium_python | e42a4c7bc971ff5380b73d617890af7de0b2a09d | 02fe6a1e3e92f1b2ccd42ae7537b898b1aa68b26 | refs/heads/master | 2022-12-06T18:42:00.444136 | 2020-08-16T15:28:17 | 2020-08-16T15:28:17 | 277,317,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,889 | py | from selenium import webdriver
class Find_By_Name():
def getChromeDriver(self):
driver = webdriver.Chrome(executable_path='C:\\Users\\91897\\workspace_python\\sel_drivers\\chromedriver.exe')
return driver
def test(self):
baseUrl = 'https://learn.letskodeit.com/p/practice'
driver = self.getChromeDriver()
driver.maximize_window()
driver.get(baseUrl)
# element_id = driver.find_element_by_id('name')
element_id = driver.find_elements_by_id('name')
if element_id != None:
print('We found the element using id')
print('number of elements =', len(element_id))
# element_name = driver.find_element_by_name('show-hide')
element_name = driver.find_elements_by_name('show-hide')
if element_name != None:
print('We found the element using name')
print('number of elements =', len(element_name))
# element_xpath = driver.find_element_by_xpath('.//*[@id="name"]')
element_xpath = driver.find_elements_by_xpath('.//*[@id="name"]')
if element_xpath is not None:
print('We found the element using xpath')
print('number of elements =', len(element_xpath))
# element_css = driver.find_element_by_css_selector('#displayed-text')
element_css = driver.find_elements_by_css_selector('#displayed-text')
if element_css is not None:
print('We have found the element using css')
print('number of elements =', len(element_css))
# element_linkText = driver.find_element_by_link_text('Login')
element_linkText = driver.find_elements_by_link_text('Login')
if element_linkText is not None:
print('We have found the element using linktext')
print('number of elements =', len(element_linkText))
# element_partialLinkText = driver.find_element_by_partial_link_text('Pract')
element_partialLinkText = driver.find_elements_by_partial_link_text('Pract')
if element_partialLinkText is not None:
print('We have found the element using partal link text')
print('number of elements =', len(element_partialLinkText))
# element_classname = driver.find_element_by_class_name('displayed-class')
element_classname = driver.find_elements_by_class_name('displayed-class')
if element_classname is not None:
print('We have found the element using class')
print('number of elements =', len(element_classname))
# element_tag = driver.find_element_by_tag_name('h1')
element_tag = driver.find_elements_by_tag_name('a')
if element_tag is not None:
print('We have found the element using tag')
print('number of elements =', len(element_tag))
myobj = Find_By_Name()
myobj.test()
| [
"asivish@gmail.com"
] | asivish@gmail.com |
64152ef3286b3b4aaf75d45d5ddad35339af9571 | 3cb89a2d4474608c5f47c4463eb8a3191774ab89 | /gensa.py | 3172fb7e7b17b44e207f4ad61280869721955943 | [] | no_license | haohaosteven/AutoRclone | 941fa08262679f9dd571197cc79b5ebcdaf119fb | ba5bafc28ed553a67d23516e965f667abdda83c1 | refs/heads/master | 2022-12-01T10:17:07.974194 | 2020-08-05T23:43:11 | 2020-08-05T23:43:11 | 284,528,513 | 0 | 0 | null | 2020-08-02T19:36:06 | 2020-08-02T19:36:05 | null | UTF-8 | Python | false | false | 16,466 | py | from __future__ import print_function
# mod by @avjun
# 所有项目邮箱都从1开始
import json,re
import sys
import errno
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from base64 import b64decode
from random import choice
from json import loads
from time import sleep
from glob import glob
import os, pickle
SCOPES = ['https://www.googleapis.com/auth/drive','https://www.googleapis.com/auth/cloud-platform','https://www.googleapis.com/auth/iam']
project_create_ops = []
current_key_dump = []
sleep_time = 30
class G:
n = 1 # 项目变量
x = 1 # 邮箱变量
pp = None # 项目前缀
sp = None # sa邮箱前缀
sq = 10 # 项目中sa个数
_x = 1 # 邮箱变量备份
lp = None # 上个项目
# 在项目中创建计数SA
def _create_accounts(service,project,count):
batch = service.new_batch_http_request(callback=_def_batch_resp)
for i in range(count):
aid = _x_id(project)
batch.add(service.projects().serviceAccounts().create(name='projects/' + project, body={ 'accountId': aid, 'serviceAccount': { 'displayName': aid }}))
sleep(sleep_time/100)
batch.execute()
# 创建填充项目所需的帐户
def _create_remaining_accounts(iam,project):
print('在%s中创建帐户' % project)
sa_count = len(_list_sas(iam,project))
_create_accounts(iam,project,G.sq - sa_count)
# 生成项目id序号
def _generate_id():
G.n += 1
return G.pp + ''.join(str(G.n))
# 邮箱序号
def _x_id(project=None):
if project != G.lp:
G.x = G._x
G.x += 1
G.lp = project
return G.sp + str(G.x)
# 随机序号
def _rand_id(prefix='saf-'):
chars = '-abcdefghijklmnopqrstuvwxyz1234567890'
return prefix + ''.join(choice(chars) for _ in range(25)) + choice(chars[1:])
# 列出使用服务的项目
def _get_projects(service):
return [i['projectId'] for i in service.projects().list().execute()['projects']]
# 默认批处理回调处理程序
def _def_batch_resp(id,resp,exception):
if exception is not None:
if str(exception).startswith('<HttpError 429'):
sleep(sleep_time/100)
else:
print(str(exception))
# 项目创建批处理程序
def _pc_resp(id,resp,exception):
global project_create_ops
if exception is not None:
print(str(exception))
else:
for i in resp.values():
project_create_ops.append(i)
# 项目创建
def _create_projects(cloud,count):
global project_create_ops
batch = cloud.new_batch_http_request(callback=_pc_resp)
new_projs = []
for i in range(count):
if G.pp is None:
new_proj = _rand_id()
else:
new_proj = _generate_id()
new_projs.append(new_proj)
batch.add(cloud.projects().create(body={'project_id':new_proj}))
batch.execute()
for i in project_create_ops:
while True:
resp = cloud.operations().get(name=i).execute()
if 'done' in resp and resp['done']:
break
sleep(3)
return new_projs
# 为项目中的项目启用Ste服务
def _enable_services(service,projects,ste):
batch = service.new_batch_http_request(callback=_def_batch_resp)
for i in projects:
for j in ste:
batch.add(service.services().enable(name='projects/%s/services/%s' % (i,j)))
batch.execute()
# 列出项目中的SA
def _list_sas(iam,project):
resp = iam.projects().serviceAccounts().list(name='projects/' + project,pageSize=G.sq).execute()
if 'accounts' in resp:
return resp['accounts']
return []
# 创建密钥批处理程序
def _batch_keys_resp(id,resp,exception):
global current_key_dump
if exception is not None:
current_key_dump = None
sleep(sleep_time/100)
elif current_key_dump is None:
sleep(sleep_time/100)
else:
current_key_dump.append((
resp['name'][resp['name'].rfind('/'):],
b64decode(resp['privateKeyData']).decode('utf-8')
))
# 创建密钥
def _create_sa_keys(iam,projects,path,naming_rules):
global current_key_dump
for i in projects:
current_key_dump = []
print('从 %s 下载密钥' % i)
while current_key_dump is None or len(current_key_dump) < G.sq:
batch = iam.new_batch_http_request(callback=_batch_keys_resp)
total_sas = _list_sas(iam,i)
for j in total_sas:
batch.add(iam.projects().serviceAccounts().keys().create(
name='projects/%s/serviceAccounts/%s' % (i,j['uniqueId']),
body={
'privateKeyType':'TYPE_GOOGLE_CREDENTIALS_FILE',
'keyAlgorithm':'KEY_ALG_RSA_2048'
}
))
batch.execute()
if current_key_dump is None:
print('从 %s 重新下载密钥' % i)
current_key_dump = []
else:
for j in current_key_dump:
if naming_rules == False:
email_prefix=json.loads(list(j)[1])['client_email']
else:
email_prefix=''.join(re.findall(re.compile(r'(.*)[@]'),json.loads(list(j)[1])['client_email']))
with open('%s/%s.json' % (path,email_prefix),'w+') as f:
f.write(j[1])
# 删除服务帐户
def _delete_sas(iam,project):
sas = _list_sas(iam,project)
batch = iam.new_batch_http_request(callback=_def_batch_resp)
for i in sas:
batch.add(iam.projects().serviceAccounts().delete(name=i['name']))
batch.execute()
# 服务账号工厂
def serviceaccountfactory(
credentials='credentials.json',
token='token.pickle',
path=None,
list_projects=False,
list_sas=None,
create_projects=None,
max_projects=200,
enable_services=None,
services=['iam','drive'],
create_sas=None,
delete_sas=None,
download_keys=None,
naming_rules=None
):
selected_projects = []
proj_id = loads(open(credentials,'r').read())['installed']['project_id']
creds = None
if os.path.exists(token):
with open(token, 'rb') as t:
creds = pickle.load(t)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(credentials, SCOPES)
# creds = flow.run_local_server(port=0)
creds = flow.run_console()
with open(token, 'wb') as t:
pickle.dump(creds, t)
cloud = build('cloudresourcemanager', 'v1', credentials=creds)
iam = build('iam', 'v1', credentials=creds)
serviceusage = build('serviceusage','v1',credentials=creds)
projs = None
while projs == None:
try:
projs = _get_projects(cloud)
except HttpError as e:
if loads(e.content.decode('utf-8'))['error']['status'] == 'PERMISSION_DENIED':
try:
serviceusage.services().enable(name='projects/%s/services/cloudresourcemanager.googleapis.com' % proj_id).execute()
except HttpError as e:
print(e._get_reason())
input('按Enter重试。')
if list_projects:
return _get_projects(cloud)
if list_sas:
return _list_sas(iam,list_sas)
if create_projects:
print("创建项目: {}".format(create_projects))
if create_projects > 0:
current_count = len(_get_projects(cloud))
if current_count + create_projects <= max_projects:
print('创建 %d 个项目' % (create_projects))
nprjs = _create_projects(cloud, create_projects)
selected_projects = nprjs
else:
sys.exit('No, 您无法创建 %d 个新项目.\n'
'请减少 --quick-setup 的值。\n'
'请记住,您可以完全创建 %d 个项目(已经有 %d 个)。\n'
'除非您知道自己在做什么,否则请不要删除现有项目' % (create_projects, max_projects, current_count))
else:
print('将覆盖现有项目中的所有服务帐户.\n'
'因此,请确保您已经有一些项目。')
input("按Enter继续...")
if enable_services:
ste = []
ste.append(enable_services)
if enable_services == '~':
ste = selected_projects
elif enable_services == '*':
ste = _get_projects(cloud)
services = [i + '.googleapis.com' for i in services]
print('启用服务')
_enable_services(serviceusage,ste,services)
if create_sas:
stc = []
stc.append(create_sas)
if create_sas == '~':
stc = selected_projects
elif create_sas == '*':
stc = _get_projects(cloud)
for i in stc:
_create_remaining_accounts(iam,i)
if download_keys:
try:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
std = []
std.append(download_keys)
if download_keys == '~':
std = selected_projects
elif download_keys == '*':
std = _get_projects(cloud)
_create_sa_keys(iam,std,path,naming_rules)
if delete_sas:
std = []
std.append(delete_sas)
if delete_sas == '~':
std = selected_projects
elif delete_sas == '*':
std = _get_projects(cloud)
for i in std:
print('删除 %s 中的服务帐户' % i)
_delete_sas(iam,i)
if __name__ == '__main__':
parse = ArgumentParser(description='创建Google服务帐户的工具。',formatter_class=RawTextHelpFormatter)
parse.add_argument('--create-projects', default=None, type=int, help='\n最多创建N个项目。\n\n')
parse.add_argument('--create-sas', default=None, help='\n在项目中创建服务帐户。\n\n')
parse.add_argument('--credentials', default='credentials.json', help='\n指定凭证文件路径。\n\n')
parse.add_argument('--delete-sas', default=None, help='\n删除项目中的服务帐户。')
parse.add_argument('--download-keys', default=None, help='\n下载项目中所有服务帐户的密钥。\n\n')
parse.add_argument('--enable-services', default=None, help='\n在项目上启用服务。 默认:IAM 和 Drive\n\n')
parse.add_argument('--list-projects', default=False,action='store_true',help='\n列出用户可见的项目。\n\n')
parse.add_argument('--list-sas', default=False, help='\n列出项目中的服务帐户。\n\n')
parse.add_argument('--max-projects', default=200, type=int, help='\n允许的最大项目量。默认值:200\n\n')
parse.add_argument('--new-only', default=False,action='store_true',help='\n不使用现有项目。\n\n')
parse.add_argument('--path','-p', default='accounts', help='\n指定备用目录,以输出所述证书的文件。\n\n')
parse.add_argument('--project-prefix', default=None, type=str, help='\n批量建项目时的项目前缀,如:\nsa-xiangmu1,1前的为前缀。\n项目 ID 可以包含小写字母、数字或连字符,必须以小写字母开头并以字母或数字结尾。\n6-30字符之间\n使用前缀前请确保前缀能用,不是别人使用过的\n如不确定,请自行去API创建一个项目点修改项目ID,确定\n\n')
parse.add_argument('--quick-setup', default=None, type=int, help='\n创建项目,启用服务,创建服务帐户和下载密钥。\n\n')
parse.add_argument('--sa-quantity', default=100, type=int, help='\n每个项目中要创建的sa个数。同时可用于指定下载数量。默认:100\n\n')
parse.add_argument('--sa-prefix', default='sauser', type=str, help='\nsa邮箱前缀\n服务帐号ID的长度必须介于6和30个字符之间。\n服务帐号 ID 必须以小写字母开头,后跟一个或多个小写字母数字字符(可使用连字符分隔\n默认:sauser\n\n')
parse.add_argument('--services', default=['iam','drive'],nargs='+',help='\n指定要启用的另一组服务。 覆盖默认值。\n\n')
parse.add_argument('--token', default='token.pickle', help='\n指定pickle令牌文件路径。\n\n')
parse.add_argument('-n', default=1, type=int, help='\n项目序号。默认:1\n\n')
parse.add_argument('-x', default=1, type=int, help='\n邮箱序号。默认:1\n\n')
parse.add_argument('--email-name', default=False,action='store_true',help='\n下载sa时按照邮箱前缀命名。默认:按邮箱命名\n\n')
args = parse.parse_args()
# 如果凭据文件无效,请搜索一个。
G.x = args.x - 1
G.n = args.n - 1
G.pp = args.project_prefix
G.sp = args.sa_prefix
G.sq = args.sa_quantity
G._x = args.x - 1
if not os.path.exists(args.credentials):
options = glob('*.json')
print('找不到凭证 %s.请启用云端硬盘API:\n'
'https://developers.google.com/drive/api/v3/quickstart/python\n'
'并将json文件另存为certificate.json' % args.credentials)
if len(options) < 1:
exit(-1)
else:
i = 0
print('在下面选择一个凭证文件。')
inp_options = [str(i) for i in list(range(1,len(options) + 1))] + options
while i < len(options):
print(' %d) %s' % (i + 1,options[i]))
i += 1
inp = None
while True:
inp = input('> ')
if inp in inp_options:
break
if inp in options:
args.credentials = inp
else:
args.credentials = options[int(inp) - 1]
print('下次使用 --credentials %s 来使用此凭据文件。' % args.credentials)
if args.quick_setup:
if args.project_prefix is not None:
opt = '*'
if args.new_only:
opt = '~'
args.services = ['iam','drive']
args.create_projects = args.quick_setup
args.enable_services = opt
args.create_sas = opt
args.download_keys = opt
else:
print('没有 --project-prefix 参数,将启用随机项目名模式')
input('按Enter继续...')
opt = '*'
if args.new_only:
opt = '~'
args.services = ['iam','drive']
args.create_projects = args.quick_setup
args.enable_services = opt
args.create_sas = opt
args.download_keys = opt
resp = serviceaccountfactory(
path=args.path,
token=args.token,
credentials=args.credentials,
list_projects=args.list_projects,
list_sas=args.list_sas,
create_projects=args.create_projects,
max_projects=args.max_projects,
create_sas=args.create_sas,
delete_sas=args.delete_sas,
enable_services=args.enable_services,
services=args.services,
download_keys=args.download_keys,
naming_rules=args.email_name
)
if resp is not None:
if args.list_projects:
if resp:
print('项目 (%d):' % len(resp))
for i in resp:
print(' ' + i)
else:
print('没有项目.')
elif args.list_sas:
if resp:
print('服务帐户在 %s (%d):' % (args.list_sas,len(resp)))
for i in resp:
print(' %s (%s)' % (i['email'],i['uniqueId']))
else:
print('没有服务帐户.')
| [
"noreply@github.com"
] | noreply@github.com |
a3522fca20b8464003183ee290a2778619feb8d8 | cb4db25a0b13f058f1a31b38d80d76a118d1e2dc | /venv/lib/python3.6/site-packages/google/cloud/pubsub_v1/subscriber/policy/thread.py | 39f161a3b93e9a362f65cc6dcd60b839cd2cad34 | [
"MIT"
] | permissive | Hackaton-Dragons/Never-Boils | 73df2b65f54a77d961ce53dea350b7d2a4261154 | 2d43e6e07fb18409d5a964f44f481d28d2352531 | refs/heads/master | 2020-03-09T20:27:54.554616 | 2018-10-08T05:52:33 | 2018-10-08T05:52:33 | 128,985,616 | 1 | 0 | MIT | 2018-04-15T13:32:45 | 2018-04-10T19:35:32 | Python | UTF-8 | Python | false | false | 12,056 | py | # Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from concurrent import futures
import logging
import sys
import threading
import grpc
from six.moves import queue as queue_mod
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber import _helper_threads
from google.cloud.pubsub_v1.subscriber.futures import Future
from google.cloud.pubsub_v1.subscriber.policy import base
from google.cloud.pubsub_v1.subscriber.message import Message
_LOGGER = logging.getLogger(__name__)
_CALLBACK_WORKER_NAME = 'Thread-Consumer-CallbackRequestsWorker'
def _callback_completed(future):
"""Simple callback that just logs a future's result.
Used on completion of processing a message received by a
subscriber.
Args:
future (concurrent.futures.Future): A future returned
from :meth:`~concurrent.futures.Executor.submit`.
"""
_LOGGER.debug('Result: %s', future.result())
def _do_nothing_callback(message):
"""Default callback for messages received by subscriber.
Does nothing with the message and returns :data:`None`.
Args:
message (~google.cloud.pubsub_v1.subscriber.message.Message): A
protobuf message returned by the backend and parsed into
our high level message type.
Returns:
NoneType: Always.
"""
return None
class Policy(base.BasePolicy):
"""A consumer class based on :class:`threading.Thread`.
This consumer handles the connection to the Pub/Sub service and all of
the concurrency needs.
Args:
client (~.pubsub_v1.subscriber.client): The subscriber client used
to create this instance.
subscription (str): The name of the subscription. The canonical
format for this is
``projects/{project}/subscriptions/{subscription}``.
flow_control (~google.cloud.pubsub_v1.types.FlowControl): The flow
control settings.
executor (~concurrent.futures.ThreadPoolExecutor): (Optional.) A
ThreadPoolExecutor instance, or anything duck-type compatible
with it.
queue (~queue.Queue): (Optional.) A Queue instance, appropriate
for crossing the concurrency boundary implemented by
``executor``.
"""
def __init__(self, client, subscription, flow_control=types.FlowControl(),
executor=None, queue=None):
super(Policy, self).__init__(
client=client,
flow_control=flow_control,
subscription=subscription,
)
# Default the callback to a no-op; the **actual** callback is
# provided by ``.open()``.
self._callback = _do_nothing_callback
# Create a queue for keeping track of shared state.
self._request_queue = self._get_queue(queue)
# Also maintain an executor.
self._executor = self._get_executor(executor)
# The threads created in ``.open()``.
self._dispatch_thread = None
self._leases_thread = None
@staticmethod
def _get_queue(queue):
"""Gets a queue for the constructor.
Args:
queue (Optional[~queue.Queue]): A Queue instance, appropriate
for crossing the concurrency boundary implemented by
``executor``.
Returns:
~queue.Queue: Either ``queue`` if not :data:`None` or a default
queue.
"""
if queue is None:
return queue_mod.Queue()
else:
return queue
@staticmethod
def _get_executor(executor):
"""Gets an executor for the constructor.
Args:
executor (Optional[~concurrent.futures.ThreadPoolExecutor]): A
ThreadPoolExecutor instance, or anything duck-type compatible
with it.
Returns:
~concurrent.futures.ThreadPoolExecutor: Either ``executor`` if not
:data:`None` or a default thread pool executor with 10 workers
and a prefix (if supported).
"""
if executor is None:
executor_kwargs = {}
if sys.version_info[:2] == (2, 7) or sys.version_info >= (3, 6):
executor_kwargs['thread_name_prefix'] = (
'ThreadPoolExecutor-SubscriberPolicy')
return futures.ThreadPoolExecutor(
max_workers=10,
**executor_kwargs
)
else:
return executor
def close(self):
"""Close the existing connection.
.. warning::
This method is not thread-safe. For example, if this method is
called while another thread is executing :meth:`open`, then the
policy could end up in an undefined state. The **same** policy
instance is not intended to be used by multiple workers (though
each policy instance **does** have a thread-safe private queue).
Returns:
~google.api_core.future.Future: The future that **was** attached
to the subscription.
Raises:
ValueError: If the policy has not been opened yet.
"""
if self._future is None:
raise ValueError('This policy has not been opened yet.')
# Stop consuming messages.
self._request_queue.put(_helper_threads.STOP)
self._dispatch_thread.join() # Wait until stopped.
self._dispatch_thread = None
self._consumer.stop_consuming()
self._leases_thread.join()
self._leases_thread = None
self._executor.shutdown()
# The subscription is closing cleanly; resolve the future if it is not
# resolved already.
if not self._future.done():
self._future.set_result(None)
future = self._future
self._future = None
return future
def _start_dispatch(self):
"""Start a thread to dispatch requests queued up by callbacks.
.. note::
This assumes, but does not check, that ``_dispatch_thread``
is :data:`None`.
Spawns a thread to run :meth:`dispatch_callback` and sets the
"dispatch thread" member on the current policy.
"""
_LOGGER.debug('Starting callback requests worker.')
dispatch_worker = _helper_threads.QueueCallbackWorker(
self._request_queue,
self.dispatch_callback,
)
# Create and start the helper thread.
thread = threading.Thread(
name=_CALLBACK_WORKER_NAME,
target=dispatch_worker,
)
thread.daemon = True
thread.start()
_LOGGER.debug('Started helper thread %s', thread.name)
self._dispatch_thread = thread
def _start_lease_worker(self):
"""Spawn a helper thread that maintains all of leases for this policy.
.. note::
This assumes, but does not check, that ``_leases_thread`` is
:data:`None`.
Spawns a thread to run :meth:`maintain_leases` and sets the
"leases thread" member on the current policy.
"""
_LOGGER.debug('Starting lease maintenance worker.')
thread = threading.Thread(
name='Thread-LeaseMaintenance',
target=self.maintain_leases,
)
thread.daemon = True
thread.start()
self._leases_thread = thread
def open(self, callback):
"""Open a streaming pull connection and begin receiving messages.
.. warning::
This method is not thread-safe. For example, if this method is
called while another thread is executing :meth:`close`, then the
policy could end up in an undefined state. The **same** policy
instance is not intended to be used by multiple workers (though
each policy instance **does** have a thread-safe private queue).
For each message received, the ``callback`` function is fired with
a :class:`~.pubsub_v1.subscriber.message.Message` as its only
argument.
Args:
callback (Callable): The callback function.
Returns:
~google.api_core.future.Future: A future that provides
an interface to block on the subscription if desired, and
handle errors.
Raises:
ValueError: If the policy has already been opened.
"""
if self._future is not None:
raise ValueError('This policy has already been opened.')
# Create the Future that this method will return.
# This future is the main thread's interface to handle exceptions,
# block on the subscription, etc.
self._future = Future(policy=self)
# Start the thread to pass the requests.
self._callback = callback
self._start_dispatch()
# Actually start consuming messages.
self._consumer.start_consuming(self)
self._start_lease_worker()
# Return the future.
return self._future
def dispatch_callback(self, action, kwargs):
"""Map the callback request to the appropriate gRPC request.
Args:
action (str): The method to be invoked.
kwargs (Dict[str, Any]): The keyword arguments for the method
specified by ``action``.
Raises:
ValueError: If ``action`` isn't one of the expected actions
"ack", "drop", "lease", "modify_ack_deadline" or "nack".
"""
if action == 'ack':
self.ack(**kwargs)
elif action == 'drop':
self.drop(**kwargs)
elif action == 'lease':
self.lease(**kwargs)
elif action == 'modify_ack_deadline':
self.modify_ack_deadline(**kwargs)
elif action == 'nack':
self.nack(**kwargs)
else:
raise ValueError(
'Unexpected action', action,
'Must be one of "ack", "drop", "lease", '
'"modify_ack_deadline" or "nack".')
def on_exception(self, exception):
"""Handle the exception.
If the exception is one of the retryable exceptions, this will signal
to the consumer thread that it should "recover" from the failure.
This will cause the stream to exit when it returns :data:`False`.
Returns:
bool: Indicates if the caller should recover or shut down.
Will be :data:`True` if the ``exception`` is "acceptable", i.e.
in a list of retryable / idempotent exceptions.
"""
# If this is in the list of idempotent exceptions, then we want to
# retry. That entails just returning None.
if isinstance(exception, self._RETRYABLE_STREAM_ERRORS):
return True
# Set any other exception on the future.
self._future.set_exception(exception)
return False
def on_response(self, response):
"""Process all received Pub/Sub messages.
For each message, schedule a callback with the executor.
"""
for msg in response.received_messages:
_LOGGER.debug(
'Using %s to process new message received:\n%r',
self._callback, msg)
message = Message(msg.message, msg.ack_id, self._request_queue)
future = self._executor.submit(self._callback, message)
future.add_done_callback(_callback_completed)
| [
"contact@noahh.io"
] | contact@noahh.io |
ffe8f0bfa0cda93b62e2884a1937315803555c18 | c40d9297d54ee369afe5de5819e038386d32f5bd | /app/review_bid_list.py | 5a42529d3ffe0de2c0f2e9f7206ba26811513ace | [] | no_license | kuangwanjing/IntelligentAssignment | 47a14d5b7a8024fb8ea9f99a81e6d709aec0c886 | 06c0be177e43d240381c3c7e2254545f66b4b61c | refs/heads/master | 2020-04-10T03:03:42.743916 | 2018-12-07T16:14:53 | 2018-12-07T16:14:53 | 160,759,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | class ReviewBidList:
def __init__(self, review_size, participants = []):
self.participants = participants
self.review_size = review_size
| [
"samrappl50@gmail.com"
] | samrappl50@gmail.com |
6cb65b44504b20720b6967c08c0fb580dd2850cb | cd6a835b14596620d46236ce2ec8003b42dcd393 | /machina/apps/forum/urls.py | 588a345c6d1914ddacaafe935dbb9bae7b6ff0a3 | [] | no_license | VanHai88/covert-site | bfec3ed75a75f4a29614906d982fd565ac1e011b | 2385ebaf1ed6c0eb42027f6665f545ce60828c12 | refs/heads/master | 2023-06-08T19:43:18.339787 | 2021-06-22T09:28:00 | 2021-06-22T09:28:00 | 379,212,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | """
Forum URLs
==========
This module defines URL patterns associated with the django-machina's ``forum`` application.
"""
from django.urls import path
from machina.core.loading import get_class
from machina.core.urls import URLPatternsFactory
class ForumURLPatternsFactory(URLPatternsFactory):
""" Allows to generate the URL patterns of the ``forum`` application. """
app_namespace = 'forum'
index_view = get_class('forum.views', 'IndexView')
forum_view = get_class('forum.views', 'ForumView')
def get_urlpatterns(self):
""" Returns the URL patterns managed by the considered factory / application. """
return [
path('', self.index_view.as_view(), name='index'),
path(
'forum/<str:slug>/<str:uuid>/',
self.forum_view.as_view(),
name='forum',
),
]
urlpatterns_factory = ForumURLPatternsFactory()
| [
"hai.nguyen@emwealthtech.com"
] | hai.nguyen@emwealthtech.com |
39a3b88526f8165408904493963979bfd1fd988e | 9191f82029b47613133659a828834230e50e695a | /coding_problem4.5.9.py | 81c58aa4d7f03b0894d91decfef0c48adb5aa920 | [] | no_license | JCGilPose/Intro_Computing_David_Joyner | 08bb2d61c3b8c629acd1efeb9a672f63ce65078c | ef84a89b81fed6bb8af9cc94193d509b499d3582 | refs/heads/master | 2020-05-31T00:01:06.768200 | 2019-08-12T11:22:30 | 2019-08-12T11:22:30 | 190,028,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | #Recall last exercise that you wrote a function, word_lengths,
#which took in a string and returned a dictionary where each
#word of the string was mapped to an integer value of how
#long it was.
#
#This time, write a new function called length_words so that
#the returned dictionary maps an integer, the length of a
#word, to a list of words from the sentence with that length.
#If a word occurs more than once, add it more than once. The
#words in the list should appear in the same order in which
#they appeared in the sentence.
#
#For example:
#
# length_words("I ate a bowl of cereal out of a dog bowl today.")
# -> {3: ['ate', 'dog', 'out'], 1: ['a', 'a', 'i'],
# 5: ['today'], 2: ['of', 'of'], 4: ['bowl'], 6: ['cereal']}
#
#As before, you should remove any punctuation and make the
#string lowercase.
#Write your function here!
def length_words(string):
lengths = {}
string = string.lower()
to_replace = ".,'!?"
for mark in to_replace:
string = string.replace(mark, "")
words = string.split()
for word in words:
if len(word) not in lengths:
lengths[len(word)] = [word]
else:
lengths[len(word)].append(word)
return lengths
#Below are some lines of code that will test your function.
#You can change the value of the variable(s) to test your
#function with different inputs.
#
#If your function works correctly, this will originally
#print:
#{1: ['i', 'a', 'a'], 2: ['of', 'of'], 3: ['ate', 'out', 'dog'], 4: ['bowl', 'bowl'], 5: ['today'], 6: ['cereal']}
#
#The keys may appear in a different order, but within each
#list the words should appear in the order shown above.
print(length_words("I ate a bowl of cereal out of a dog bowl today."))
| [
"noreply@github.com"
] | noreply@github.com |
c03780fcab120293920c7a174d3b1e18f90e1e0f | 3e55e0bf2b751163680b4557c614d9facd5c97c0 | /cart/views.py | 7774a2f6c65b353be32149af9b0d5d62a2f98081 | [] | no_license | jtuck15/ecommerce-mini-django-project | 8efb696d158a581d571982f523cb7cf721fee287 | 69fa59793074811e4849eb6450f155e86a8be041 | refs/heads/master | 2022-12-16T17:05:04.640977 | 2019-06-04T19:52:18 | 2019-06-04T19:52:18 | 189,295,159 | 0 | 0 | null | 2022-12-08T05:12:06 | 2019-05-29T20:43:05 | Python | UTF-8 | Python | false | false | 984 | py | from django.shortcuts import render, redirect, reverse
# Create your views here.
def view_cart(request):
"""A view that renders the cart contents page"""
return render(request, 'cart.html')
def add_to_cart(request, id):
"""Add a quantity of the specified product to the cart"""
quantity = int(request.POST.get('quantity'))
cart = request.session.get('cart', {})
if id in cart:
cart[id] = int(cart[id]) + quantity
else:
cart[id] = cart.get(id, quantity)
request.session['cart'] = cart
return redirect(reverse('index'))
def adjust_cart(request, id):
"""Adjust the quantity of the specified product to the specified amount"""
quantity = int(request.POST.get('quantity'))
cart = request.session.get('cart', {})
if quantity > 0:
cart[id] = quantity
else:
cart.pop(id)
request.session['cart'] = cart
return redirect(reverse('view_cart')) | [
"jim.tuck15@gmail.com"
] | jim.tuck15@gmail.com |
4342584878b19df84503285c1ef03b3e9e403054 | 2d91300f0624f5287dea65dbedcaf2ecb1f5274b | /App/PageObject/HomePage.py | 0ddd990524f11b9febe72b6028be631e24121500 | [] | no_license | zlmone/nonoapp-android-autotest | e82faf04961c51526f4611eb3b36566d78a3031c | 10fc369d426707b85be874bb7835c17a0f222c4d | refs/heads/master | 2022-01-21T04:02:09.216843 | 2019-07-19T08:17:33 | 2019-07-19T08:17:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | from App.Driver import Driver
import time
class HomePage:
@property
def mine_view(self):
return Driver.d(resourceId="com.nonoapp:id/tv_home_tab_title", text=u"我的")
@property
def invest_view(self):
return Driver.d(text=u"出借")
@property
def discover_view(self):
return Driver.d(text="发现")
@property
def firstpage_view(self):
return Driver.d(text="首页")
def click_mine_view(self):
self.mine_view.click()
time.sleep(4) | [
"suzhiyu@suzhiyu-raindeMacBook.local"
] | suzhiyu@suzhiyu-raindeMacBook.local |
f73d89394fef515dd649d7bc36678ebade8faa53 | 017b29bd393cfafc2fe398aae7754bce0182da7c | /examples/ice40hx8k-yosys/SConscript | fd9268d63da7d8ddc623c6c5cb4ad2409fb12f6e | [
"LicenseRef-scancode-bsd-3-clause-jtag"
] | permissive | chipyard/chisel-jtag | a971d50a82097c77b7cad8970a87df39efa4a86f | 9646b090c16fe26790be77135e1bc491353c9815 | refs/heads/master | 2022-04-16T03:03:00.657118 | 2020-04-14T20:26:43 | 2020-04-14T20:26:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | Import('env')
blif = env.Synthesize('top', ['build/Top.v', '#../../external/vsrc/AsyncResetReg.v'])
asc = env.PlaceRoute(blif, pcf='ice40hx8k-b-evn.pcf')
bits = env.BitGen(asc)
report = env.Timing(asc)
env.Default(bits, report)
env.Alias('prog', [env.BitProg('prog', bits), report])
env_sram = env.Clone()
env_sram.Append(BITPROGOPTS = '-S')
env.Alias('prog-sram', [env_sram.BitProg('prog-sram', bits), report])
| [
"richard.lin@berkeley.edu"
] | richard.lin@berkeley.edu | |
5b34e7d87886c5e4e1f84382ab18bc06fd905371 | 5cb94263a7ccdc5a3f6ca18040672124c483ca12 | /backend_server/efluent_webserver/e_fluent_app/migrations/0001_initial.py | b6aac4292d59df3e823afc4b94f0b1e1fc720334 | [] | no_license | martinvol/e-fluent | e6ad8524673a746b95f9f2f20e9a46f9b1e80dd1 | 06c18b1ab853e44a7b07b4821ed966ae79d2c707 | refs/heads/master | 2020-12-07T02:32:38.605369 | 2016-06-19T00:34:29 | 2016-06-19T00:34:29 | 57,111,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-03 07:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Orthophoniste',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"martin.volpe@gmail.com"
] | martin.volpe@gmail.com |
a44762f7f83ec08c0a592bc170b50259d8bd49e2 | 292417a70e83d33fc4cedaed34d1b8e859ffe1a7 | /market/urls.py | 113708503a9a96ee4ed392f28d9d1321ee1c94c8 | [] | no_license | cooluks2/Niche-market-mine | c739144b61dfecd641f19bfa20439388d9dd562d | 2eacedd83ae3d1690ac56f9ae4089a44737c4771 | refs/heads/master | 2022-12-10T09:45:13.759650 | 2020-08-31T02:52:07 | 2020-08-31T02:52:07 | 289,786,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | from django.urls import path
from market.views import StoreDV, StoreLV, StoreCreateView, StoreUpdateView, StoreDeleteView, store_download
from market.views import MarketDV, MarketCreateView, MarketUpdateView, MarketDeleteView, market_download
from mysite.views import HomeView
from market.models import Market, Location
app_name = 'market'
urlpatterns = [
path('', HomeView.as_view(), name='home'),
path('<int:pk>/', MarketDV.as_view(), name='market'),
path('market_add/', MarketCreateView.as_view(), name="market_add"),
path('<int:pk>/market_update/', MarketUpdateView.as_view(), name="market_update"),
path('<int:pk>/market_delete/', MarketDeleteView.as_view(), name="market_delete"),
path('market_download/<int:id>', market_download, name="market_download"),
path('store/<int:pk>/', StoreLV.as_view(), name='store'),
path('store/<int:fk>/<int:pk>/', StoreDV.as_view(), name='store_detail'),
path('<int:fk>/store_add/', StoreCreateView.as_view(), name="store_add"),
path('<int:pk>/store_update/', StoreUpdateView.as_view(), name="store_update"),
path('<int:pk>/store_delete/', StoreDeleteView.as_view(), name="store_delete"),
path('store_download/<int:id>', store_download, name="store_download"),
]
| [
"cooluks2@gmail.com"
] | cooluks2@gmail.com |
6caf9bad8efb32e56e0a9001ee05d706bcc31682 | a40f3574cebafaf0ae82277cf7d128aea8e340d9 | /GAN/gan.py | 19e948a8f69cc0aecbe17607b1b1ff1a63913430 | [] | no_license | ybai62868/PytorchLearning | b14f36896477baf6afb65412e14a9f8e38efb086 | 3ed4252978fedc65349eb094d85ddaf461c0bd18 | refs/heads/master | 2021-08-15T06:43:05.961680 | 2017-11-17T14:30:22 | 2017-11-17T14:30:22 | 106,394,788 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
torch.manual_seed(1)
np.random.seed(1)
BATCH_SIZE = 64
LR_G = 0.0001
LR_D = 0.0001
N_IDEAS = 5
ART_COMPONENTS = 15
PAINT_POINTS = np.vstack([np.linspace(-1,1,ART_COMPONENTS) for _ in range(BATCH_SIZE)])
def artists_works():
a = np.random.uniform(1,2,size = BATCH_SIZE)[:,np.newaxis]
paintings = a*np.power(PAINT_POINTS,2)+(a-1)
paintings = torch.from_numpy(paintings).float()
return Variable(paintings)
G = nn.Sequential(
nn.Linear(N_IDEAS,128),
nn.ReLU(),
nn.Linear(128,ART_COMPONENTS),
)
D = nn.Sequential(
nn.Linear(ART_COMPONENTS,128),
nn.ReLU(),
nn.Linear(128,1),
nn.Sigmoid(),
)
for step in range(2000):
artists_paintings = artists_works()
G_ideas = Variable(torch.randn(BATCH_SIZE,N_IDEAS))
G_paintings = G(G_ideas)
prob_artist0 = D(artists_paintings)
prob_artist1 = D(G_paintings)
D_loss = -torch.mean(torch.log(prob_artist0)+torch.log(1-prob_artist1))
G_loss = torch.mean(torch.log(1-prob_artist1))
opt_D = torch.optim.Adam(D.parameters(), lr=LR_D)
opt_G = torch.optim.Adam(G.parameters(), lr=LR_G)
opt_D.zero_grad()
D_loss.backward(retain_variables = True)
opt_D.step()
opt_G.zero_grad()
G_loss.backward()
opt_G.step()
| [
"ybai62868@gmail.com"
] | ybai62868@gmail.com |
c0da0137dc94f85eb66878eb6c898a2804ae0fd3 | 3e97ac25dc95de9136fdc28840b869aa6f95f4ed | /ProcessFluorescence/NovelRegressionDevelopment/df_on_f_novel_regression_validation_with_created_data.py | 1dab77727f0fb088e4d89fbb2c883a3032adbf1f | [] | no_license | vivian-imbriotis/AccDataTools | d0753996bfbc5ae7d8c43eb94b6fd053f6565d33 | df2b85355c2dce2b68848064e2dc57c98cc5675b | refs/heads/master | 2022-12-30T15:03:37.367395 | 2020-10-22T07:44:43 | 2020-10-22T07:44:43 | 243,228,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,622 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 11:12:15 2020
@author: Vivian Imbriotis
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
from scipy.ndimage.filters import minimum_filter1d, uniform_filter1d
from sklearn.linear_model import TheilSenRegressor
from df_on_f_novel_regression_test import underline_regression
from vectorised_underline_regression import UnderlineRegressor
import seaborn as sns
###SETTINGS###
np.random.seed(72)
sns.set_style("dark")
def make_data(n_points = 10000):
spikes = make_spikes(n_points)
bg = make_bg(n_points)
data = {}
data["frame_n"] = np.arange(0, n_points)
data["ground_truth"] = spikes
data["bg"] = bg
data["slope"] = np.random.random()+0.5
data["raw"] = data["ground_truth"] + data["slope"]*bg
return data
def make_spikes(n_points = 10000):
spike_prob = 0.01
spike_size_mean = 1
kernel = make_kernel()
spike_times = 1.0 * (np.random.rand(n_points-kernel.size+1) < spike_prob)
spike_amps = np.random.lognormal(mean = spike_size_mean, sigma = 0.5, size=spike_times.size) * spike_times
spikes = np.convolve(spike_amps, kernel) + 5*(np.random.randn(n_points) / 2) + 100
return spikes
def make_bg(n_points = 10000):
return np.sin(np.arange(0, n_points)/(6.28*100))/2 + np.random.randn(n_points)/2 + 10
def make_kernel(tau = 10):
length = tau*8
time = np.arange(0, length)
return np.exp(-time/tau)
#Just plotting and helper functions below
def plot_data_creation_process():
data = make_data()
fig, ax = plt.subplots(nrows = 3,tight_layout= True)
ax[0].set_title("Cell Fluorescence")
ax[0].plot(data["frame_n"],data["ground_truth"])
ax[1].set_title("Background Fluorescence")
ax[1].plot(data["frame_n"],data["bg"])
ax[2].set_title("Measured ROI Fluorescence")
ax[2].plot(data["frame_n"],data["raw"])
fig.show()
def plot_data():
data = make_data()
data = proc_data(data)
fig, ax = plt.subplots(nrows = 3, ncols = 2,figsize = (15, 9))
ax[0][0].plot(data["frame_n"], data["raw"])
ax[0][0].set_ylabel('Raw F')
ax[0][0].set_xlabel('Frame Number')
ax[0][1].plot(data["bg"], data["raw"], 'o')
ax[0][1].plot(data["bg"], data["NRR_theta"][0] + data["NRR_theta"][1]*data["bg"], label = 'Naive Robust Regression')
ax[0][1].plot(data["bg"], data["RR_theta"][0] + data["RR_theta"][1]*data["bg"], label = 'Robust Regression')
ax[0][1].set_ylabel('Total Fluorescence')
ax[0][1].set_xlabel('Neuropil Fluorescence')
ax[0][1].legend(loc='upper left')
ax[1][0].plot(data["frame_n"], data["ground_truth"])
ax[1][0].set_ylabel("Ground Truth")
ax[1][1].plot(data["frame_n"], data["NRR_df_on_f"])
ax[1][1].set_ylabel("Naive Robust Reg df/f")
ax[2][0].plot(data["frame_n"], data["RR_df_on_f"])
ax[2][0].set_ylabel("Robust Regression df/f")
ax[2][1].plot(data["ground_truth"], data["RR_df_on_f"], 'o')
ax[2][1].plot(data["ground_truth"], data["ground_truth"], label= 'Unity')
ax[2][1].set_title("MeanSquareError = " + str(calc_error(data)))
ax[2][1].set_xlabel("Ground Truth")
ax[2][1].set_ylabel("Robust Regression Aproach")
ax[2][1].legend(loc='upper left')
fig.show()
def compare_approaches(df = False):
data = make_data()
data = proc_data(data)
fig, ax = plt.subplots(nrows = 4, ncols = 2,figsize = (15, 12),
tight_layout=True)
ax[0][0].plot(data["frame_n"], data["gt_df_on_f"]if df else data["ground_truth"])
ax[0][0].set_ylabel(f"Ground Truth {'dF/F0' if df else 'Fcell'}")
ax[0][1].plot(data["bg"], data["raw"], 'o')
ax[0][1].plot(data["bg"], data["RR_theta"][0] + data["RR_theta"][1]*data["bg"], label = 'Robust Regression')
ax[0][1].plot(data["bg"], data["CR_theta"][0] + data["CR_theta"][1]*data["bg"], label = 'Biased L1 Regression (Powell Method)')
ax[0][1].plot(data["bg"], data["PR_theta"][0] + data["PR_theta"][1]*data["bg"], label = 'Biased L2 Regression (BFGS Method)')
ax[0][1].set_ylabel('Total Fluorescence')
ax[0][1].set_xlabel('Neuropil Fluorescence')
ax[0][1].legend(loc='upper left')
ax[1][1].plot(data["gt_df_on_f"]if df else data["ground_truth"],
data["RR_df_on_f"] if df else data["RR_bg_subtracted"], 'o')
ax[1][1].plot(data["gt_df_on_f"]if df else data["ground_truth"],
data["gt_df_on_f"]if df else data["ground_truth"],
label= 'Unity')
ax[1][1].set_title(f"MeanSquareError = {calc_error(data,df)[0]:.8f}")
ax[1][1].set_xlabel("Ground Truth")
ax[1][1].set_ylabel("Robust Regression Approach")
ax[1][1].legend(loc='upper left')
ax[1][0].plot(data["frame_n"],
data["RR_df_on_f"] if df else data["RR_bg_subtracted"])
ax[1][0].set_ylabel(f"Robust Regression {'dF/F0' if df else 'Fcell'}")
ax[2][0].plot(data["frame_n"],
data["CR_df_on_f"] if df else data["CR_bg_subtracted"])
ax[2][0].set_ylabel(f"L1-Heaviside Regression {'dF/F0' if df else 'Fcell'}")
ax[2][1].plot(data["gt_df_on_f"]if df else data["ground_truth"],
data["CR_df_on_f"] if df else data["CR_bg_subtracted"], 'o')
ax[2][1].plot(data["gt_df_on_f"]if df else data["ground_truth"],
data["gt_df_on_f"]if df else data["ground_truth"],
label= 'Unity')
ax[2][1].set_title(f"MeanSquareError = {calc_error(data,df)[1]:.8f}")
ax[2][1].set_xlabel("Ground Truth")
ax[2][1].set_ylabel("Custom Regression Approach 1")
ax[2][1].legend(loc='upper left')
ax[3][0].plot(data["frame_n"],
data["PR_df_on_f"] if df else data["PR_bg_subtracted"])
ax[3][0].set_ylabel(f"Biased L2 vs L2k norm Regression {'dF/F0' if df else 'Fcell'}")
ax[3][1].plot(data["gt_df_on_f"]if df else data["ground_truth"],
data["PR_df_on_f"] if df else data["PR_bg_subtracted"], 'o')
ax[3][1].plot(data["gt_df_on_f"]if df else data["ground_truth"],
data["gt_df_on_f"]if df else data["ground_truth"],
label= 'Unity')
ax[3][1].set_title(f"MeanSquareError = {calc_error(data,df)[2]:.8f}")
ax[3][1].set_xlabel("Ground Truth")
ax[3][1].set_ylabel("Custom Regression Approach 2")
ax[3][1].legend(loc='upper left')
fig.show()
def time_approaches():
N = np.linspace(80,100000,10).astype(int)
robust_regression_times = np.zeros(N.shape)
powell_times = np.zeros(N.shape)
L2_norm_times = np.zeros(N.shape)
for idx,n_points in enumerate(N):
print(idx)
t1=np.zeros(10); t2=np.zeros(10); t3=np.zeros(10); t4=np.zeros(10)
for i in range(1):
data = make_data(n_points)
t1[i] = time()
robust_regression(data["bg"], data["raw"])
t2[i] = time()
custom_regression(data["bg"], data["raw"])
t3[i] = time()
parabolic_regression(data["bg"], data["raw"])
t4[i] = time()
robust_regression_times[idx] = (t2-t1).mean()
powell_times[idx] = (t3-t2).mean()
L2_norm_times[idx] = (t4-t3).mean()
fig,ax = plt.subplots(nrows = 3, sharex=True)
ax[0].set_title("Robust Regression Time Complexity")
ax[0].plot(N,robust_regression_times)
ax[1].set_title("Powell Method Time Complexity")
ax[1].plot(N,powell_times)
ax[1].set_ylabel("Execution Time")
ax[2].set_title("BFGS Method Time Complexity")
ax[2].plot(N,L2_norm_times)
ax[2].set_xlabel("Number of data points")
fig.show()
def proc_data(data):
data["gt_df_on_f"] = get_df_on_f0(data["ground_truth"])
data["NRR_theta"] = naive_robust_regression(data["bg"], data["raw"])
data["NRR_bg_subtracted"] = subtract_bg(data["raw"], data["bg"], data["NRR_theta"])
data["NRR_df_on_f"] = get_df_on_f0(data["NRR_bg_subtracted"])
data["RR_theta"] = robust_regression(data["bg"], data["raw"])
data["RR_bg_subtracted"] = subtract_bg(data["raw"], data["bg"], data["RR_theta"])
data["RR_df_on_f"] = get_df_on_f0(data["RR_bg_subtracted"])
data["CR_theta"] = custom_regression(data["bg"], data["raw"])
data["CR_bg_subtracted"] = subtract_bg(data["raw"], data["bg"], data["CR_theta"])
data["CR_df_on_f"] = get_df_on_f0(data["CR_bg_subtracted"])
data["PR_theta"] = parabolic_regression(data["bg"], data["raw"])
data["PR_bg_subtracted"] = subtract_bg(data["raw"], data["bg"], data["PR_theta"])
data["PR_df_on_f"] = get_df_on_f0(data["PR_bg_subtracted"])
return data
def calc_error(data,df):
ground_truth = data["gt_df_on_f"]if df else data["ground_truth"]
return (np.sum((ground_truth - (data["RR_df_on_f"] if df else data["RR_bg_subtracted"]))**2)/ground_truth.size,
np.sum((ground_truth - (data["CR_df_on_f"] if df else data["CR_bg_subtracted"]))**2)/ground_truth.size,
np.sum((ground_truth - (data["PR_df_on_f"] if df else data["PR_bg_subtracted"]))**2)/ground_truth.size)
def ols(x,y):
slope, intercept, r_value, p_value, std_err = linregress(x,y)
return np.array([intercept, slope])
def naive_robust_regression(x, y):
y = y.reshape(-1, 1)
X = np.vstack((np.ones(y.shape).transpose(), x.reshape(-1, 1).transpose()))
reg = TheilSenRegressor(random_state=0).fit(X.transpose(), np.ravel(y))
return np.array([reg.coef_[0], reg.coef_[1]])
def robust_regression(x, y):
y_reshape = y.reshape(-1, 1)
X = np.vstack((np.ones(y_reshape.shape).transpose(), x.reshape(-1, 1).transpose()))
reg = TheilSenRegressor(random_state=0).fit(X.transpose(), np.ravel(y_reshape))
# subtracted_data = subtract_bg(y, x, [reg.coef_[0], reg.coef_[1]])
subtracted_data = y - reg.coef_[0] - reg.coef_[1]*x
offset = np.min(subtracted_data)
return np.array([reg.coef_[0]+offset, reg.coef_[1]])
def custom_regression(x, y):
return underline_regression(x,y, method = "Powell")
def parabolic_regression(x,y):
return UnderlineRegressor.regress(x,y)
def subtract_bg(f, bg, theta):
# return f - ( theta[0] + theta[1] * bg)
return f - theta[1]*bg
def get_smoothed_running_minimum(timeseries, tau1 = 30, tau2 = 100):
result = minimum_filter1d(uniform_filter1d(timeseries,tau1,mode='nearest'),
tau2,
mode = 'reflect')
return result
def get_df_on_f0(F,F0=None):
if not F0 is None:
return (F - F0) / F0
else:
F0 = get_smoothed_running_minimum(F)
return get_df_on_f0(F,F0)
compare_approaches(df=True) | [
"vivian.imbriotis@gmail.com"
] | vivian.imbriotis@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.