content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
"""RZFeeser@alta3.com | Alta3 Research
Using the CSV library to work with CSV data."""
# standard library import
import csv
# open our csv data (we want to loop across this)
with open("csv_users.txt", "r") as csvfile:
# counter to create unique file names
i = 0
# loop across our open file line by line
for row in csv.reader(csvfile):
i = i + 1 # increase i by 1 (to create unique admin.rc file names)
filename = f"admin.rc{i}" # this f string says "fill in the value of i"
# open a file via "with". This file will autoclose when the indentations stop
with open(filename, "w") as rcfile:
# use the standard library print function to print our data
# out to the open file open rcfile (open in write mode)
print("export OS_AUTH_URL=" + row[0], file=rcfile)
print("export OS_IDENTITY_API_VERSION=3", file=rcfile)
print("export OS_PROJECT_NAME=" + row[1], file=rcfile)
print("export OS_PROJECT_DOMAIN_NAME=" + row[2], file=rcfile)
print("export OS_USERNAME=" + row[3], file=rcfile)
print("export OS_USER_DOMAIN_NAME=" + row[4], file=rcfile)
print("export OS_PASSWORD=" + row[5], file=rcfile)
# all of the indentation ends, so all files are auto closed
# display this to the screen when all of the looping is over
print("admin.rc files created!")
|
import requests
import json
import uuid
import boto3
client = boto3.client('rekognition')
# store each person by a uuid instead of by name
uuid_to_person_map = {}
photo_link = ("https://upload.wikimedia.org/wikipedia/commons/thumb/e/ea/"
"Official_portrait_of_Vice_President_Joe_Biden.jpg/"
"1024px-Official_portrait_of_Vice_President_Joe_Biden.jpg")
name = "Joe Biden"
this_uuid = str(uuid.uuid4())
print("{} ({}): {}".format(name, this_uuid, photo_link))
# update our mapping
uuid_to_person_map[this_uuid] = [name, photo_link]
# actually get the photo
photo = requests.get(photo_link)
# now load it into our rekognition collection
response = client.index_faces(
CollectionId='company-photos',
Image={
'Bytes': photo.content
},
ExternalImageId=str(this_uuid)
)
# persist our uuid to photo mapping to a file
with open("uuid_to_person_map.json", 'w') as mapfile:
mapfile.write(json.dumps(uuid_to_person_map))
|
import tornado.ioloop
import tornado.web
import os
import uuid
import json
dictFood={'burger':5.4, 'french_fries':5.08 ,'chicken':3.88, 'toast':3.125, 'egg':1.95, 'pizza':2.66, 'cookie':5.1, 'hot dog':2.9, 'steak':2.7}
class NewHandler(tornado.web.RequestHandler):
def get(self):
#n = int(self.get_argument("n"))
#self.write(str(self.service.calc(n)))
self.render("in.html", title="My title", items=["Calorie", "Predictor"])
class WHandler(tornado.web.RequestHandler):
def post(self):
result = 0
category_path = os.path.join(os.path.dirname(__file__), 'static/categorys.txt')
file = open(category_path)
category = eval(file.readlines()[0])
categorys = []
for c in category:
categorys.append(c.strip())
rst = {}
weight = {}
f = open('WeightOutput.txt', 'w')
for c in categorys:
i = 0
temp = self.get_argument(c)
for j in range(len(temp)):
if '0' <= temp[j] <= '9':
i = i * 10 + int(temp[j]) - int('0')
rst[c] = i*dictFood[c]
weight[c] = self.get_argument(c)
result += rst[c]
f.writelines(c+":"+str(rst[c])+'\n')
self.render("hh.html", title="My title", ccc=rst, result=result, weight=weight)
class SHandler(tornado.web.RequestHandler):
def post(self):
upload_path = os.path.join(os.path.dirname(__file__), 'static/filess.jpg')
category_path = os.path.join(os.path.dirname(__file__), 'static/categorys.txt')
upload_path1 = os.path.join(os.path.dirname(__file__), 'static/file1.jpg')
file_metas = self.request.files['file']
for meta in file_metas:
filename = meta['filename']
filepath = os.path.join(upload_path1, filename)
with open(upload_path1, 'wb') as up:
up.write(meta['body'])
# self.write(json.dumps(result))
os.system('cd /Users/mym/Desktop/kaluli/Mask_RCNN-master/samples && python3 demo9.py')
file = open(category_path)
category = eval(file.readlines()[0])
categorys = []
for c in category:
categorys.append(c.strip())
img_path = 'static/filess.jpg'
result = {
"categorys": categorys,
"img_path": img_path
}
self.render("second.html", img_path='static/filess.jpg',
result=result)
def uuid_naming_strategy(original_name):
"File naming strategy that ignores original name and returns an UUID"
return str(uuid.uuid4())
class UploadFileHandler(tornado.web.RequestHandler):
def get(self):
upload_path=os.path.join(os.path.dirname(__file__),'static/file1.jpg')
category_path = os.path.join(os.path.dirname(__file__), 'static/categorys.txt')
file = open(category_path)
category = file.readlines()
categorys = []
for c in category:
categorys.append(c.strip())
img_path = 'static/file1.jpg'
result = {
"categorys": categorys,
"img_path": img_path
}
# self.write(json.dumps(result))
self.render("index.html", title="My title", items=["Calorie", "Predictor"], img_path='static/file1.jpg', result=result)
def post(self):
upload_path=os.path.join(os.path.dirname(__file__),'static/file1.jpg')
file_metas=self.request.files['file']
for meta in file_metas:
filename=meta['filename']
filepath=os.path.join(upload_path,filename)
with open(upload_path,'wb') as up:
up.write(meta['body'])
category_path = os.path.join(os.path.dirname(__file__), 'static/categorys.txt')
file = open(category_path)
category = file.readlines()
categorys= []
for c in category:
categorys.append(c.strip())
img_path = 'static/file1.jpg'
result = {
"categorys": categorys,
"img_path": img_path
}
# self.write(json.dumps(result))
os.system('cd /Users/mym/Desktop/kaluli/Mask_RCNN-master/samples && python3 demo9.py')
#with open('/Users/mym/Desktop/kaluli/Mask_RCNN-master/samples/demo9.py', 'r') as f:
#exec(f.read())
self.render("index.html", title="My title", items=["c", "p"], img_path='static/file1.jpg',result=result)
class DownloadHandler(tornado.web.RequestHandler):
def post(self, filename):
print('i download file handler : ', filename)
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Disposition', 'attachment; filename=' + filename)
with open(filename, 'rb') as f:
while True:
data = f.read(4096)
if not data:
break
self.write(data)
self.finish()
get = post
def make_app():
settings = dict(debug = True)
return tornado.web.Application([
(r"/", NewHandler),
(r"/s", SHandler),
(r"/k",UploadFileHandler),
(r"/w", WHandler)],
**settings,
# (r"/k",IndexHandler)],
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
if __name__ == "__main__":
app = make_app()
app.listen(8880)
tornado.ioloop.IOLoop.current().start()
|
from django.contrib import admin
from django.contrib.auth import get_user_model
CustomUser = get_user_model()
admin.site.register(CustomUser) |
#!/usr/bin/env python3
# coding: utf-8
from .base_test import BaseTest
from testgres.connection import ProgrammingError
class OTablesTest(BaseTest):
def assertTblCount(self, size):
self.assertEqual(size,
self.node.execute('postgres',
'SELECT count(*) FROM orioledb_table_oids();')[0][0])
def test_o_tables_wal_commit_1(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 varchar(50) NOT NULL,
PRIMARY KEY(num_1)
) USING orioledb;
INSERT INTO table_name VALUES(55, 'num');
""")
self.assertTblCount(1)
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS table_name_2(
num_3 int NOT NULL,
num_4 int NOT NULL,
PRIMARY KEY(num_4)
) USING orioledb;
INSERT INTO table_name_2 VALUES(222, 333);
""")
self.assertTblCount(2)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(2)
node.stop()
def test_o_tables_wal_commit_2(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 int NOT NULL,
num_3 int NOT NULL,
PRIMARY KEY(num_1)
) USING orioledb;
INSERT INTO table_name (num_1, num_2, num_3)
VALUES(1,2,3);
INSERT INTO table_name (num_1, num_2, num_3)
VALUES(4,5,6);
INSERT INTO table_name (num_1, num_2, num_3)
VALUES(7,8,9);
""")
self.assertTblCount(1)
self.assertEqual(node.execute('postgres','SELECT count(*) FROM table_name')[0][0], 3)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(1)
node.safe_psql('postgres', """
DELETE FROM table_name WHERE num_1 = 1;
""")
self.assertTblCount(1)
self.assertEqual(node.execute('postgres','SELECT count(*) FROM table_name')[0][0], 2)
node.stop()
def test_o_tables_wal_commit_3(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TYPE type_name AS ENUM('one','two');
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 type_name NOT NULL,
PRIMARY KEY(num_1)
) USING orioledb;
INSERT INTO table_name(num_1, num_2)
VALUES(1, 'one');
INSERT INTO table_name(num_1, num_2)
VALUES(2, 'two');
""")
self.assertTblCount(1)
node.safe_psql('postgres', """
DROP TYPE type_name CASCADE;
""")
self.assertTblCount(1)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(1)
node.stop
def test_o_tables_wal_rollback_1(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 int NOT NULL
) USING orioledb;
INSERT INTO table_name VALUES (11,22);
""")
self.assertTblCount(1)
con1 = node.connect()
con2 = node.connect()
con1.begin()
con2.begin()
con1.execute("DROP TABLE table_name;")
self.assertTblCount(1)
con1.rollback()
self.assertTblCount(1)
con1.close()
con2.execute("DROP TABLE table_name;")
self.assertTblCount(1)
con2.commit()
con2.close()
self.assertTblCount(0)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(0)
node.stop()
def test_o_tables_wal_drop_rollback_2(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 int NOT NULL
) USING orioledb;
INSERT INTO table_name VALUES (11,22);
CREATE TABLE IF NOT EXISTS table_name_2(
num_1 int NOT NULL,
num_2 int NOT NULL
) USING orioledb;
INSERT INTO table_name_2 VALUES (111,222);
""")
self.assertTblCount(2)
con1 = node.connect()
con2 = node.connect()
con1.begin()
con2.begin()
con1.execute("DROP TABLE table_name;")
self.assertTblCount(2)
con1.commit()
con1.close()
con2.execute("DROP TABLE table_name_2;")
self.assertTblCount(1)
con2.rollback()
self.assertTblCount(1)
con2.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(1)
node.stop()
def test_o_tables_wal_drop_extension_rollback_1(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 int NOT NULL
)USING orioledb;
INSERT INTO table_name VALUES (11, 22);
""")
self.assertTblCount(1)
con1 = node.connect()
con2 = node.connect()
con1.begin()
con2.begin()
con1.execute("DROP EXTENSION orioledb CASCADE;")
con1.rollback()
con1.close()
con2.execute("ALTER TABLE table_name DROP COLUMN num_2")
con2.commit()
con2.close()
self.assertTblCount(1)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(1)
node.stop()
def test_o_tables_mix_1(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 int NOT NULL
)USING orioledb;
INSERT INTO table_name (num_1, num_2)
VALUES (11,22);
INSERT INTO table_name (num_1, num_2)
VALUES (33,44);
INSERT INTO table_name (num_1, num_2)
VALUES (55,66);
INSERT INTO table_name (num_1, num_2)
VALUES (77,88);
""")
con1 = node.connect()
con2 = node.connect()
con1.begin()
con2.begin()
self.assertTblCount(1)
con1.execute("DELETE FROM table_name WHERE num_1 = 55")
self.assertTblCount(1)
con1.commit()
con1.close()
con2.execute("ALTER TABLE table_name DROP COLUMN num_1")
self.assertTblCount(1)
con2.commit()
con2.close()
self.assertTblCount(1)
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(1)
node.safe_psql('postgres', """
DROP TABLE table_name;
""")
self.assertTblCount(0)
node.stop()
def test_o_tables_mix_2(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
""")
self.assertTblCount(0)
con1 = node.connect()
con1.begin()
con1.execute("""CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 int NOT NULL
)USING orioledb;
INSERT INTO table_name VALUES (11, 22);""")
self.assertTblCount(1)
con1.commit()
con1.close()
self.assertTblCount(1)
node.safe_psql('postgres', """
DROP TABLE table_name;
""")
self.assertTblCount(0)
node.stop()
def test_o_tables_mix_3(self):
node = self.node
node.start()
con1 = node.connect()
con2 = node.connect()
con1.begin()
con2.begin()
con1.execute("CREATE EXTENSION IF NOT EXISTS orioledb;")
self.assertEqual(0,
con1.execute('SELECT count(*) FROM orioledb_table_oids();')[0][0])
con1.execute("""
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 int NOT NULL
)USING orioledb;
INSERT INTO table_name (num_1, num_2)
VALUES (11,22);
INSERT INTO table_name (num_1, num_2)
VALUES (33,44);
INSERT INTO table_name (num_1, num_2)
VALUES (55,66);
INSERT INTO table_name (num_1, num_2)
VALUES (77,88);
""")
con1.commit()
con1.close()
self.assertTblCount(1)
con2.execute("DROP EXTENSION orioledb CASCADE;")
self.assertTblCount(1)
con2.commit()
con2.close()
node.stop()
def test_o_tables_mix_4(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
""")
self.assertTblCount(0)
con1 = node.connect()
con1.begin()
con1.execute("""
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 int NOT NULL
)USING orioledb;
INSERT INTO table_name (num_1, num_2)
VALUES (11,22);
INSERT INTO table_name (num_1, num_2)
VALUES (33,44);
""")
self.assertTblCount(1)
con1.execute("""
DROP TABLE table_name
""")
self.assertTblCount(1)
con1.commit()
con1.close()
self.assertTblCount(0)
node.stop()
def test_o_tables_mix_5(self):
node = self.node
node.start()
node.safe_psql('postgres', """
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TYPE type_name AS ENUM('one','two');
CREATE TABLE IF NOT EXISTS table_name(
num_1 int NOT NULL,
num_2 type_name NOT NULL
)USING orioledb;
INSERT INTO table_name (num_1, num_2)
VALUES (11,'one');
INSERT INTO table_name (num_1, num_2)
VALUES (33,'two');
""")
self.assertTblCount(1)
con1 = node.connect()
con2 = node.connect()
con1.begin()
con2.begin()
self.assertTblCount(1)
con1.execute("DROP TYPE type_name CASCADE;")
con1.execute("""
CREATE TABLE table_name_2(
num_3 int NOT NULL,
num_4 int NOT NULL
)USING orioledb;
""")
self.assertTblCount(2)
con1.commit()
con1.close()
self.assertTblCount(2)
con2.execute("""
INSERT INTO table_name_2 (num_3, num_4)
VALUES (11,22);
INSERT INTO table_name_2 (num_3, num_4)
VALUES (33,44);
INSERT INTO table_name_2 (num_3, num_4)
VALUES (55,66);
INSERT INTO table_name_2 (num_3, num_4)
VALUES (77,88);
""")
self.assertTblCount(2)
con2.commit()
con2.close()
node.stop(['-m', 'immediate'])
node.start()
self.assertTblCount(2)
node.safe_psql('postgres', """
DROP EXTENSION orioledb CASCADE;
CREATE EXTENSION IF NOT EXISTS orioledb;
""")
self.assertTblCount(0)
node.stop()
def test_null_o_table(self):
node = self.node
node.start()
con_control = node.connect()
con_control.execute("""
CREATE EXTENSION IF NOT EXISTS orioledb;
CREATE TABLE o_test_1 (
val_1 int,
val_2 int
) USING orioledb;
INSERT INTO o_test_1(val_1, val_2)
(SELECT val_1, val_1 * 100 FROM generate_series (1, 11) val_1);
""")
con_control.commit()
con1 = node.connect()
con1.begin()
con1.execute("DROP TABLE o_test_1;")
con1.commit()
with self.assertRaises(ProgrammingError) as e:
con_control.execute("""
DROP TABLE o_test_1;
""")
self.assertErrorMessageEquals(e, 'table "o_test_1" does not exist')
con_control.rollback()
con1.close()
con_control.close()
|
# -*- coding: utf-8 -*-
"""Yelp_Hotel_Reviews_Web_Scraper.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/13awHtJ5dnpdutcAMoxECNnlvbApnoB7z
"""
import bs4
import urllib
import urllib.request as url
import pandas as pd
url_list=[
['https://www.yelp.com/biz/aria-resort-and-casino-las-vegas-8',
'https://www.yelp.com/biz/aria-resort-and-casino-las-vegas-8?start=20'],
['https://www.yelp.com/biz/four-seasons-hotel-las-vegas-las-vegas',
'https://www.yelp.com/biz/four-seasons-hotel-las-vegas-las-vegas?start=20'],
['https://www.yelp.com/biz/skylofts-at-mgm-grand-las-vegas-2',
'https://www.yelp.com/biz/skylofts-at-mgm-grand-las-vegas-2?start=20'],
['https://www.yelp.com/biz/encore-las-vegas-2',
'https://www.yelp.com/biz/encore-las-vegas-2?start=20'],
['https://www.yelp.com/biz/wynn-las-vegas-las-vegas-3',
'https://www.yelp.com/biz/wynn-las-vegas-las-vegas-3?start=20'],
['https://www.yelp.com/biz/the-venetian-las-vegas-las-vegas-6',
'https://www.yelp.com/biz/the-venetian-las-vegas-las-vegas-6?start=20'],
['https://www.yelp.com/biz/the-cosmopolitan-of-las-vegas-las-vegas',
'https://www.yelp.com/biz/the-cosmopolitan-of-las-vegas-las-vegas?start=20'],
['https://www.yelp.com/biz/the-palazzo-las-vegas-las-vegas-3',
'https://www.yelp.com/biz/the-palazzo-las-vegas-las-vegas-3?start=20'],
['https://www.yelp.com/biz/bellagio-hotel-las-vegas-6',
'https://www.yelp.com/biz/bellagio-hotel-las-vegas-6?start=20'],
['https://www.yelp.com/biz/aria-sky-suites-las-vegas-3',
'https://www.yelp.com/biz/aria-sky-suites-las-vegas-3?start=20'],
['https://www.yelp.com/biz/caesars-palace-las-vegas-10',
'https://www.yelp.com/biz/caesars-palace-las-vegas-10?start=20'],
['https://www.yelp.com/biz/the-nomad-hotel-las-vegas-las-vegas-3',
'https://www.yelp.com/biz/the-nomad-hotel-las-vegas-las-vegas-3?start=20'],
['https://www.yelp.com/biz/red-rock-casino-resort-and-spa-las-vegas-7',
'https://www.yelp.com/biz/red-rock-casino-resort-and-spa-las-vegas-7?start=20'],
['https://www.yelp.com/biz/vdara-hotel-and-spa-las-vegas-3',
'https://www.yelp.com/biz/vdara-hotel-and-spa-las-vegas-3?start=20'],
['https://www.yelp.com/biz/trump-international-hotel-las-vegas-las-vegas-2',
'https://www.yelp.com/biz/trump-international-hotel-las-vegas-las-vegas-2?start=20'],
['https://www.yelp.com/biz/mandalay-bay-resort-and-casino-las-vegas-5',
'https://www.yelp.com/biz/mandalay-bay-resort-and-casino-las-vegas-5?start=20'],
['https://www.yelp.com/biz/waldorf-astoria-las-vegas-las-vegas-2',
'https://www.yelp.com/biz/waldorf-astoria-las-vegas-las-vegas-2?start=20'],
['https://www.yelp.com/biz/delano-las-vegas-las-vegas-3',
'https://www.yelp.com/biz/delano-las-vegas-las-vegas-3?start=20'],
['https://www.yelp.com/biz/the-nobu-hotel-las-vegas',
'https://www.yelp.com/biz/the-nobu-hotel-las-vegas?start=20'],
['https://www.yelp.com/biz/m-resort-spa-casino-henderson-3',
'https://www.yelp.com/biz/m-resort-spa-casino-henderson-3?start=20']
]
def ExtractReviews(urllist):
reviews=[]
for h in range(len(urllist)):
opener = url.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
srclink_hotel = opener.open(urllist[h])
data_hotel = bs4.BeautifulSoup(srclink_hotel,'html.parser')
hotel_name = data_hotel.find("h1", {"class": "lemon--h1__373c0__2ZHSL heading--h1__373c0__dvYgw undefined heading--inline__373c0__10ozy"}).string
review_raw = data_hotel.find_all("span", {"class": "lemon--span__373c0__3997G raw__373c0__3rcx7", "lang": "en"})
for i in range(len(review_raw)):
reviews.append(review_raw[i].get_text())
reviews = list(dict.fromkeys(reviews))
print("")
return [['Yelp']*len(reviews), [hotel_name]*len(reviews), reviews]
final_reviews_list = []
final_hotel_name_list = []
final_source_list = []
for l in range(len(url_list)):
current_url_list = url_list[l]
output = ExtractReviews(current_url_list)
final_reviews_list.append(output[2])
final_hotel_name_list.append(output[1])
final_source_list.append(output[0])
for i in range(len(final_reviews_list)):
if len(final_reviews_list[i])>=20:
final_reviews_list[i] = final_reviews_list[i][:20]
final_hotel_name_list[i] = final_hotel_name_list[i][:20]
final_source_list[i] = final_source_list[i][:20]
merged_hotel_list = []
merged_reviews_list = []
merged_source_list = []
for sub in final_hotel_name_list:
for item in sub:
merged_hotel_list.append(item)
for sub in final_reviews_list:
for item in sub:
merged_reviews_list.append(item)
for sub in final_source_list:
for item in sub:
merged_source_list.append(item)
final_df = pd.DataFrame(list(zip(merged_hotel_list, merged_source_list, merged_reviews_list)),columns =['Hotel', 'Source', 'Text'])
final_df.to_csv("yelp_hotel_reviews.csv",index=False) |
from django.http import HttpResponse
# from lib.handler import dispatcherBase
def ass_IP(request):
return HttpResponse("IP分析") |
import numpy as np
from stl import mesh
from sklearn.decomposition import PCA
import json
import os
import sys
from tqdm import tqdm
def get_stl_paths(full_path, json_filename=''):
dir_list = os.listdir(full_path)
data = {}
for d in dir_list:
file_list = os.listdir(os.path.join(full_path, d))
if 'new.stl' in file_list:
data[d] = os.path.join(full_path, d+'/new.stl')
else:
data[d] = os.path.join(full_path, d+'/xyz_0_0_0.stl')
if json_filename:
print('Making JSON for', json_filename)
with open(json_filename+".json", 'w') as f:
json.dump(data, f)
return data
def stl_PCA(path_dict, path_dict2=None, pair_mapping=None):
assert ((path_dict2 is None) + (pair_mapping is None)) != 1, \
'please specify all kwargs or none of them'
if pair_mapping is not None:
# dict:
for k,values in tqdm(pair_mapping.iteritems()):
test_mesh = mesh.Mesh.from_file(path_dict[k])
mesh_arr = np.asarray(test_mesh)
num_points = mesh_arr.shape[0]
mesh_stacked = np.vstack([mesh_arr[:,0:3], mesh_arr[:,3:6], \
mesh_arr[:,6:9]])
pca = PCA(n_components=3)
pca.fit(mesh_stacked)
new_stack = pca.transform(mesh_stacked)
new_mesh_arr = np.hstack([new_stack[0:num_points,:], \
new_stack[num_points:2*num_points,:], \
new_stack[2*num_points:3*num_points,:]])
data = np.zeros(num_points, dtype=mesh.Mesh.dtype)
data['vectors'] = new_mesh_arr.reshape(num_points, 3,3)
new_stl = mesh.Mesh(data)
new_stl.save( path_dict[k].replace('.stl', '_pca_pair.stl') )
for v in values:
test_mesh2 = mesh.Mesh.from_file(path_dict2[str(v)])
mesh_arr2 = np.asarray(test_mesh2)
num_points2 = mesh_arr2.shape[0]
mesh_stacked2 = np.vstack([mesh_arr2[:,0:3], mesh_arr2[:,3:6], \
mesh_arr2[:,6:9]])
new_stack2 = pca.transform(mesh_stacked2)
new_mesh_arr2 = np.hstack([new_stack2[0:num_points2,:], \
new_stack2[num_points2:2*num_points2,:], \
new_stack2[2*num_points2:3*num_points2,:]])
data2 = np.zeros(num_points2, dtype=mesh.Mesh.dtype)
data2['vectors'] = new_mesh_arr2.reshape(num_points2, 3,3)
new_stl2 = mesh.Mesh(data2)
new_stl2.save( path_dict2[str(v)].replace('.stl', '_pca_pair.stl') )
else:
for k in tqdm(path_dict):
test_mesh = mesh.Mesh.from_file(path_dict[k])
mesh_arr = np.asarray(test_mesh)
num_points = mesh_arr.shape[0]
mesh_stacked = np.vstack([mesh_arr[:,0:3], mesh_arr[:,3:6], \
mesh_arr[:,6:9]])
pca = PCA(n_components=3)
pca.fit(mesh_stacked)
new_stack = pca.transform(mesh_stacked)
new_mesh_arr = np.hstack([new_stack[0:num_points,:], \
new_stack[num_points:2*num_points,:], \
new_stack[2*num_points:3*num_points,:]])
data = np.zeros(num_points, dtype=mesh.Mesh.dtype)
data['vectors'] = new_mesh_arr.reshape(num_points, 3,3)
new_stl = mesh.Mesh(data)
new_stl.save( path_dict[k].replace('.stl', '_pca.stl') )
if __name__ == '__main__':
"""
Returns PCA-rotated instances as .stl files, which allows for maximization of the surface that will be rendered to a picture when the .stl file is plotted.
There are 2 plotting scenarios considered:
pairs == False: You just want to render individual .stl files.
pairs == True: You want to render muliple instances with the same rotation, e.g. mitos contained in dendrites should have the same oritentation as the dendrite they belong to.
if pairs is false, each .stl file can be calculated individually with its own pca
if pairs is true, you need to specify an array containing the matched pairs that should be rotated according to the same pca. The first of the 2 paths specified for the pairs corresponds to the fist column of the matched pairs array and will be the object of the pca, while the same rotation will be applied to the second pair element.
"""
print('start')
pairs = True
if pairs == False:
dendrite_paths = get_stl_paths('/n/home00/nwendt/snowjournal/volume2stl/stl_mitos_dendrites_length_500/stl_dendrites_nocrumbs')
stl_PCA(dendrite_paths)
else:
dendrite_paths = get_stl_paths('/n/home00/nwendt/snowjournal/volume2stl/stl')
mito_paths = get_stl_paths('/n/home00/nwendt/snowjournal/volume2stl/stl_all_mitos')
# mito-id, seg-id --> seg-id, mito-id
# dict:
with open('data/lut_dendrite_mito_237.json') as json_file:
idmap = json.load(json_file)
stl_PCA(dendrite_paths, path_dict2=mito_paths, pair_mapping=idmap)
print('done')
|
import xmc.tools as tools
from exaqute import *
# TODO When MomentEstimator is updated to specs, one level of nesting will have to be added above solver level: each element of sampleGroup is to be a list of sample components; as of now, it is just a single component.
@task(keep=True, estimators=INOUT, samples={Type: COLLECTION_IN, Depth: 3})
def updatePartialQoiEstimators_Task(estimators, samples):
"""
Update a list of estimators with a set of samples.
Input arguments:
- estimators: list of StatisticalEstimator objects.
- samples: tridimensional array (nested list) whose dimensions are (event,solver,component)
It is assumed that estimators[i] expects the i-th component (i.e. samples[?][?][i]).
Consequently, samples must have length len(estimators) along its last dimension.
"""
# Iterate over estimators
for iEst, _ in enumerate(estimators):
# Get subset of samples for component iEst
# as array expected by update methud: (event, solver).
# Example:
# samples[event][solver] = [q_0_solver, ..., q_N_solver]
# then
# sampleSubset[event] = [q_iEst_0, ..., q_iEst_numberOfSolvers]
# Observe the type of each sample (float or list) depends on the chosen moment estimator
sampleSubset = [[perSolver[iEst] for perSolver in perEvent] for perEvent in samples]
# Pass to update method
estimators[iEst].update(sampleSubset)
@task(keep=True, origin=IN, destination=INOUT)
def assignByIndex_Task(destination, origin, mapping):
"""
Assign elements into a list, according to an index.
Input arguments:
- destination: list in which to assign elements
- origin: list of elements to assign
- mapping: list of integers such that origin[i] must go to destination[mapping[i]]
"""
for i, j in enumerate(mapping):
destination[j] = origin[i]
@task(keep=True, costEstimator=INOUT, times={Type: COLLECTION_IN, Depth: 2})
def updateCostEstimator_Task(costEstimator, times):
"""
Update cost estimator with a set of time samples.
Input arguments:
- costEstimator: MomentEstimator object.
- times: bidimensional array (nested list) of time samples, whose dimensions are
(event,solver).
"""
# For each event, sum times across solvers
# Structure as expected by MomentEstimator.update: bidimensional list
timeSums = [[sum(perEvent)] for perEvent in times]
# Update cost estimator
costEstimator.update(timeSums)
|
# Copyright (c) Yugabyte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
from yugabyte_db_thirdparty.build_definition_helpers import * # noqa
from yugabyte_db_thirdparty.util import read_file, write_file
import os
class Icu4cDependency(Dependency):
VERSION_MAJOR = 70
VERSION_MINOR = 1
VERSION_WITH_UNDERSCORE = '%d_%d' % (VERSION_MAJOR, VERSION_MINOR)
VERSION_WITH_DASH = '%d-%d' % (VERSION_MAJOR, VERSION_MINOR)
CUSTOM_URL_PATTERN = \
'http://github.com/unicode-org/icu/releases/download/release-%s/icu4c-%s-src.tgz'
def __init__(self) -> None:
super(Icu4cDependency, self).__init__(
name='icu4c',
version=Icu4cDependency.VERSION_WITH_UNDERSCORE,
url_pattern=Icu4cDependency.CUSTOM_URL_PATTERN % (
Icu4cDependency.VERSION_WITH_DASH,
Icu4cDependency.VERSION_WITH_UNDERSCORE),
build_group=BUILD_GROUP_INSTRUMENTED)
self.copy_sources = True
def get_additional_ld_flags(self, builder: BuilderInterface) -> List[str]:
if builder.compiler_choice.is_linux_clang1x() and builder.build_type == BUILD_TYPE_ASAN:
# Needed to find dlsym.
return ['-ldl']
return []
def _copy_res_files_from_uninstrumented(self) -> None:
"""
Updates the following rule in source/extra/uconv/Makefile in ASAN build:
$(MSGNAME)/%.res: $(srcdir)/$(RESOURCESDIR)/%.txt
$(INVOKE) $(TOOLBINDIR)/genrb -e UTF-8 -s $(^D) -d $(@D) $(^F)
The genrb program built with Clang 14 on Linux reports an ODR violation when run with ASAN,
and the ASAN_OPTIONS=detect_odr_violation=0 flag does not work correctly in Clang 14.
We work around this by just copying the correspondig .res files from the uninstrumented
build of icu4c.
"""
# We are in the "source" directory under the build directory.
configured_dir = os.getcwd()
# Get the build directory name, e.g. "icu4c-70_1".
build_dir_name = os.path.basename(os.path.dirname(configured_dir))
makefile_path = os.path.join(configured_dir, 'extra', 'uconv', 'Makefile')
makefile_lines = read_file(makefile_path).split('\n')
expected_build_rule = '$(INVOKE) $(TOOLBINDIR)/genrb -e UTF-8 -s $(^D) -d $(@D) $(^F)'
line_found = False
made_changes = False
rule_prefix = '$(MSGNAME)/%.res:'
for i in range(len(makefile_lines) - 1):
if makefile_lines[i].strip().startswith(rule_prefix):
line_found = True
actual_build_rule = makefile_lines[i + 1].strip()
if (expected_build_rule != actual_build_rule and
not actual_build_rule.startswith('cp ')):
raise ValueError(
"The line %s is followed by %s, not by %s" % (
makefile_lines[i].strip(),
actual_build_rule,
expected_build_rule))
makefile_lines[i + 1] = ' '.join([
'\tcp',
'"../../../../../uninstrumented/%s/source/extra/uconv/uconvmsg/$(@F)"' %
build_dir_name,
'"$@"'
])
made_changes = True
if not line_found:
raise IOError('Did not find Makefile rule starting with %s in %s' % (
rule_prefix, makefile_path))
if not made_changes:
log('Did not make any changes to %s, assuming previously applied', makefile_path)
return
write_file(makefile_path, '\n'.join(makefile_lines) + '\n')
def build(self, builder: BuilderInterface) -> None:
configure_extra_args = [
'--disable-samples',
'--disable-tests',
'--disable-layout',
'--enable-static',
'--with-library-bits=64'
]
post_configure_action: Optional[Callable] = None
llvm_major_version = builder.compiler_choice.get_llvm_major_version()
if (is_linux() and
llvm_major_version is not None and
llvm_major_version >= 14 and
builder.build_type == BUILD_TYPE_ASAN):
post_configure_action = self._copy_res_files_from_uninstrumented
builder.build_with_configure(
log_prefix=builder.log_prefix(self),
src_subdir_name='source',
extra_args=configure_extra_args,
post_configure_action=post_configure_action
)
fix_shared_library_references(builder.prefix, 'libicu')
|
'''
Created on May 28, 2020
bdio_update_project_name - updates project name referenced in bdio file
@parameters
bdio_in - original bdio file
--bdio-out bdio_ot - output bdio file
--project-name projetc_name - new Project name
Invoked without --bdio-out and --project-name will read project name referenced in bdio file
@author: kumykov
'''
import errno
import json
import os
import shutil
import sys
import zipfile
from argparse import ArgumentParser
from zipfile import ZIP_DEFLATED
from email.policy import default
bdio_in = '/Users/kumykov/Downloads/e9c96cab-8d7c-3247-ac17-fca205fabd62.bdio'
bdio_out = '/Users/kumykov/Downloads/renamed.bdio'
workdir = 'workdir'
inputdir = 'workdir/input'
outputdir = 'workdir/output'
def zip_extract_files(zip_file, dir_name):
print("Extracting content of {} into {}".format(zip_file, dir_name))
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(dir_name)
def zip_create_archive(zip_file, dir_name):
print ("writing content of {} into {} file".format(dir_name, zip_file))
with zipfile.ZipFile(zip_file, mode='w', compression=ZIP_DEFLATED) as zipObj:
for folderName, subfolders, filenames in os.walk(dir_name):
for filename in filenames:
filePath = os.path.join(folderName, filename)
zipObj.write(filePath, os.path.basename(filePath))
def read_json_object(filepath):
with open(os.path.join(workdir,filepath)) as jsonfile:
data = json.load(jsonfile)
return data
def write_json_file(filepath, data):
with open(filepath, "w") as outfile:
json.dump(data, outfile)
def update_project_name(data, name):
content_array = data['@graph']
for counter, array_entry in enumerate(content_array):
if array_entry['@type'][0] == 'https://blackducksoftware.github.io/bdio#Project':
#print (counter)
#print (content_array[counter].keys())
if "https://blackducksoftware.github.io/bdio#hasName" in content_array[counter]:
content_array[counter]["https://blackducksoftware.github.io/bdio#hasName"][0]['@value'] = name
#print (content_array[counter])
def get_project_name(data):
content_array = data['@graph']
names = []
for counter, array_entry in enumerate(content_array):
if array_entry['@type'][0] == 'https://blackducksoftware.github.io/bdio#Project':
#print (counter)
#print (content_array[counter].keys())
if "https://blackducksoftware.github.io/bdio#hasName" in content_array[counter]:
names.append(content_array[counter]["https://blackducksoftware.github.io/bdio#hasName"][0]['@value'])
#print (content_array[counter])
return names
def setup_workspace():
global workdir
global inputdir
global outputdir
try:
current_dir = os.getcwd()
workdir = os.path.join(current_dir, 'workdir')
inputdir = os.path.join(workdir, "input")
outputdir = os.path.join(workdir, "output")
if os.path.exists(inputdir):
shutil.rmtree(inputdir)
os.makedirs(inputdir)
if os.path.exists(outputdir):
shutil.rmtree(outputdir)
os.makedirs(outputdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def cleanup_workspace():
try:
shutil.rmtree(workdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def bdio_read_project_name(bdio_in):
zip_extract_files(bdio_in, inputdir)
filelist = os.listdir(inputdir)
names = []
for filename in filelist:
#print ("processing {}".format(filename))
filepath_in = os.path.join(inputdir, filename)
data = read_json_object(filepath_in)
names.extend(get_project_name(data))
return names
def bdio_update_project_name(bdio_in, bdio_out, new_project_name):
zip_extract_files(bdio_in, inputdir)
filelist = os.listdir(inputdir)
for filename in filelist:
print ("processing {}".format(filename))
filepath_in = os.path.join(inputdir, filename)
filepath_out = os.path.join(outputdir, filename)
data = read_json_object(filepath_in)
update_project_name(data,new_project_name)
write_json_file(filepath_out, data)
zip_create_archive(bdio_out, outputdir)
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
try:
# Setup argument parser
parser = ArgumentParser()
parser.add_argument("bdio_in", help="Path to the original BDIO file")
parser.add_argument("--bdio-out", default=None, help="Path to the output file to be written")
parser.add_argument("--project-name", default=None, help="New project name")
args = parser.parse_args()
if not args.bdio_in:
parser.print_help(sys.stdout)
sys.exit(1)
if not args.project_name:
if not args.bdio_out:
setup_workspace()
print ("Project names found {}".format(set(bdio_read_project_name(args.bdio_in))))
cleanup_workspace()
else:
parser.print_help(sys.stdout)
sys.exit(1)
else:
if args.bdio_out:
setup_workspace()
bdio_update_project_name(args.bdio_in, args.bdio_out, args.project_name)
cleanup_workspace()
else:
parser.print_help(sys.stdout)
sys.exit(1)
return 0
except Exception as e:
pass
if __name__ == "__main__":
sys.exit(main())
|
"""
2D bimodal example
====================
This example shows how to use ``lightkde.kde_2d`` and how it compares to
``scipy.stats.gaussian_kde`` for a bimodal bivariate case.
"""
# %%
# Import packages:
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde, multivariate_normal
from lightkde import kde_2d
# %%
# Generate synthetic data from two univariate normal distributions:
np.random.seed(42)
sample = np.vstack(
(
multivariate_normal.rvs(mean=[0, 0], cov=0.3, size=2000),
multivariate_normal.rvs(
mean=[2, 2], cov=[[0.5, -0.48], [-0.48, 0.5]], size=2000
),
)
)
# %%
# Estimate kernel density using ``lightkde``:
density_mx, x_mx, y_mx = kde_2d(sample_mx=sample)
# %%
# Estimate kernel density using ``scipy``:
gkde = gaussian_kde(dataset=sample.T)
xy_mx = np.hstack((x_mx.reshape(-1, 1), y_mx.reshape(-1, 1)))
scipy_density_mx = gkde.evaluate(xy_mx.T).reshape(x_mx.shape)
# %%
# Plot the data against the kernel density estimates:
# pre-process
bins = (30, 30)
data_density_mx, xedges, yedges = np.histogram2d(
sample[:, 0], sample[:, 1], bins=bins, density=True
)
z_min = 0
z_max = np.max(data_density_mx)
x_min, x_max = min(xedges), max(xedges)
y_min, y_max = min(yedges), max(yedges)
# plot
cmap = "afmhot_r"
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(10, 3), sharex="all", sharey="all")
# data
h = ax1.hist2d(
sample[:, 0],
sample[:, 1],
bins=bins,
density=True,
vmin=z_min,
vmax=z_max,
cmap=cmap,
)[-1]
ax1.set_title("data")
# lightkde
ax2.contourf(x_mx, y_mx, density_mx, levels=50, vmin=z_min, vmax=z_max, cmap=cmap)
ax2.set_title("lightkde")
# scipy
ax3.contourf(x_mx, y_mx, scipy_density_mx, levels=50, vmin=z_min, vmax=z_max, cmap=cmap)
ax3.set_xlim(x_min, x_max)
ax3.set_ylim(y_min, y_max)
ax3.set_title("scipy.stats.gaussian_kde")
fig.subplots_adjust(right=0.89)
cbar_ax = fig.add_axes([0.90, 0.15, 0.05, 0.7], aspect=30)
fig.colorbar(h, cax=cbar_ax, label="density")
plt.show()
# %%
# The ``scipy`` method oversmooths the kernel density and it is far
# from the histogram of the data that it is expected to follow.
|
import os
import json
import pandas as pd
import collections
from .adjuster import Adjuster
from .desired_adjustment_calculator.calc_config import DesiredPlatformAdjustmentCalculatorConfig
from autoscalingsim.scaling.scaling_model import ScalingModel
from autoscalingsim.scaling.state_reader import StateReader
from autoscalingsim.desired_state.platform_state import PlatformState
from autoscalingsim.utils.error_check import ErrorChecker
class AdjustmentPolicy:
def __init__(self, node_for_scaled_services_types : dict, service_instance_requirements : dict,
state_reader : StateReader, scaling_model : ScalingModel, config_file : str, node_groups_registry : 'NodeGroupsRegistry'):
self.scaling_model = scaling_model
if not os.path.isfile(config_file):
raise ValueError(f'No configuration file found under the path {config_file} for {self.__class__.__name__}')
with open(config_file) as f:
config = json.load(f)
adjustment_horizon = ErrorChecker.key_check_and_load('adjustment_horizon', config, self.__class__.__name__)
cooldown_period = ErrorChecker.key_check_and_load('cooldown_period', config, self.__class__.__name__, default = {"value": 0, "unit": "s"})
optimizer_type = ErrorChecker.key_check_and_load('optimizer_type', config, self.__class__.__name__)
placement_hint = ErrorChecker.key_check_and_load('placement_hint', config, self.__class__.__name__)
combiner_settings = ErrorChecker.key_check_and_load('combiner', config, self.__class__.__name__)
adjustment_goal = ErrorChecker.key_check_and_load('adjustment_goal', config, self.__class__.__name__)
adjuster_class = Adjuster.get(adjustment_goal)
calc_conf = DesiredPlatformAdjustmentCalculatorConfig(placement_hint, optimizer_type, node_for_scaled_services_types, state_reader)
self.adjuster = adjuster_class(adjustment_horizon, cooldown_period, self.scaling_model,
service_instance_requirements, combiner_settings, calc_conf, node_groups_registry)
def adjust_platform_state(self, cur_timestamp : pd.Timestamp,
desired_states_timeline : dict, platform_state : PlatformState,
last_scaling_action_ts : pd.Timestamp):
services_scaling_events = self._convert_desired_services_states_to_scaling_events(platform_state, desired_states_timeline)
services_scaling_events = self._convert_scaling_events_to_dataframes(services_scaling_events)
return self.adjuster.adjust_platform_state(cur_timestamp, services_scaling_events, platform_state, last_scaling_action_ts)
def _convert_desired_services_states_to_scaling_events(self, platform_state : PlatformState, desired_states_timeline : dict):
result = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(dict)))
prev_services_state = platform_state.collective_services_states
for timestamp, desired_state_regionalized in desired_states_timeline.items():
services_state_delta_on_ts = desired_state_regionalized.to_delta() - prev_services_state.to_delta()
raw_scaling_aspects_changes = services_state_delta_on_ts.to_raw_scaling_aspects_changes()
for region_name, services_changes in raw_scaling_aspects_changes.items():
for service_name, aspect_vals_changes in services_changes.items():
for aspect_name, aspect_val_change in aspect_vals_changes.items():
if aspect_val_change != 0:
result[region_name][service_name][aspect_name][timestamp] = aspect_val_change
prev_services_state = desired_state_regionalized
return result
def _convert_scaling_events_to_dataframes(self, services_scaling_events : dict):
result = collections.defaultdict(lambda: collections.defaultdict(dict))
for region_name, services_changes_pr in services_scaling_events.items():
for service_name, aspects_change_dict in services_changes_pr.items():
result[region_name][service_name] = pd.DataFrame(aspects_change_dict)
return result
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import gym
import random
import numpy as np
from typing import Optional, List, Tuple
from itertools import count
from more_itertools import pairwise
from collections import deque, namedtuple
import matplotlib.pyplot as plt
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])
class ActionTuple:
def __init__(self, action, params):
self.action = action
self.params = params
def get(self):
return (self.action, self.params)
class Memory:
def __init__(self, capacity):
self.capacity = capacity
self.buffer = deque(maxlen=capacity)
def push(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
experiences = random.sample(self.buffer, batch_size)
return Experience(*zip(*experiences))
def __len__(self):
return len(self.buffer)
class DeepQLearn:
def __init__(self, cfg, actions, params):
self.actions = actions
self.params = params
self.cfg = cfg
class ParameterNetwork(nn.Module):
def __init__(self, state_space, parameters_space, hidden_units: Optional[list] = None):
super(ParameterNetwork, self).__init__()
hidden_units = hidden_units or [128, 128]
units = [state_space.shape[0]] + hidden_units
self.layers = nn.ModuleList()
for i, o in pairwise(units):
self.layers.append(nn.Linear(i, o))
self.parameter_outputs = nn.ModuleList()
for low, high in parameters_space.low, parameters_space.high:
self.parameter_outputs.append(ParameterOutput(units[-1], 1, low=low, high=high))
def forward(self, x, action_id: Optional[int] = None) -> torch.Tensor:
for layer in self.layers:
x = F.relu(layer(x))
if action_id:
return self.parameter_outputs[action_id](x)
output = []
for parameter_output in self.parameter_outputs:
output.append(parameter_output(x))
return torch.cat(output, 1)
class ParameterOutput(nn.Module):
def __init__(self, in_features, out_features, low=-1, high=1):
super(ParameterOutput, self).__init__()
self.low = low
self.high = high
self.linear = nn.Linear(in_features, out_features)
def forward(self, x):
x = F.softmax(self.linear(x))
return (self.high - self.low) * x + self.low
class QNetwork(nn.Module):
def __init__(self, state_space, action_space, parameters_space, hidden_units: Optional[list] = None):
super(QNetwork, self).__init__()
hidden_units = hidden_units or [128, 128]
units = [state_space.shape[0] + parameters_space.shape[0]] + hidden_units
self.layers = nn.ModuleList()
for i, o in pairwise(units):
self.layers.append(nn.Linear(i, o))
self.fc_value = nn.Linear(hidden_units[-1], action_space.n)
self.fc_advantage = nn.Linear(hidden_units[-1], action_space.n)
def forward(self, state, parameter) -> torch.Tensor:
x = torch.cat((state, parameter), 1)
for layer in self.layers[:-1]:
x = F.relu(layer(x))
value = self.fc_value(x)
advantage = self.fc_advantage(x)
return value + (advantage - advantage.mean(dim=1, keepdim=True))
class Agent:
def __init__(self, q_network: QNetwork, parameter_network: ParameterNetwork, action_space, parameters_space):
self.q_network = q_network
self.parameter_network = parameter_network
self.actions = range(action_space.n)
self.parameters_space = parameters_space
def __call__(self, observation_raw, epsilon=0.):
if random.random() > epsilon:
with torch.no_grad():
observation = torch.tensor(observation_raw, dtype=torch.float32).unsqueeze(0)
parameters = self.parameter_network.forward(observation)
action = self.q_network.forward(observation, parameters).max(1)[1].item()
return Action(action, parameters.tolist()[0])
else:
action = random.choice(self.actions)
parameters = np.random.uniform(self.parameters_space.low, self.parameters_space.high)
return Action(action, list(parameters))
class DeepQLearning:
def __init__(self, agent: Agent, q_network_optimizer, parameter_network_optimizer, memory_capacity, batch_size):
self.step = 0
self.agent = agent
self.batch_size = batch_size
self.memory = Memory(memory_capacity)
self.q_network_optimizer = q_network_optimizer
self.parameter_network_optimizer = parameter_network_optimizer
def learn(self):
batch = self.memory.sample(self.batch_size)
done = torch.FloatTensor(batch.done)
reward = torch.FloatTensor(batch.reward)
state = torch.FloatTensor(np.float32(batch.state))
next_state = torch.FloatTensor(np.float32(batch.next_state))
action = torch.LongTensor([action.id for action in batch.action])
parameters = torch.LongTensor(np.squeeze([action.parameters for action in batch.action]))
q_values = self.agent.q_network.forward(state, parameters)
next_parameters = self.agent.parameter_network.forward(next_state)
next_q_values = self.agent.q_network.forward(next_state, next_parameters)
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value = next_q_values.max(1)[0]
expected_q_value = reward + GAMMA * next_q_value * (1 - done)
q_network_loss = (q_value - expected_q_value.detach()).pow(2).mean()
self.q_network_optimizer.zero_grad()
q_network_loss.backward()
self.q_network_optimizer.step()
predicted_parameters = self.agent.parameter_network.forward(state)
parameter_network_loss = - torch.sum(self.agent.q_network.forward(state, predicted_parameters))
self.parameter_network_optimizer.zero_grad()
parameter_network_loss.backward()
self.parameter_network_optimizer.step()
return q_network_loss.item()
if __name__ == '__main__':
q_network = QNetwork(OBSERVATION_SPACE, ACTION_SPACE, PARAMETERS_SPACE, Q_NETWORK_UNITS)
q_network_optimizer = optim.Adam(q_network.parameters(), Q_NETWORK_LR)
parameter_network = ParameterNetwork(OBSERVATION_SPACE, PARAMETERS_SPACE, PARAMETER_NETWORK_UNITS)
parameter_network_optimizer = optim.Adam(parameter_network.parameters(), PARAMETER_NETWORK_LR)
agent = Agent(q_network, parameter_network, ACTION_SPACE, PARAMETERS_SPACE)
trainer = DeepQLearning(agent, q_network_optimizer, parameter_network_optimizer, MEMORY_CAPACITY, BATCH_SIZE)
trainer.train(100000) |
import collections
import pickle
import os
import spacy
import pandas as pd
from tqdm import tqdm
from nltk import pos_tag
from nltk.corpus import stopwords, wordnet as wn
from nltk.stem import WordNetLemmatizer
from typing import Dict
from embeddings import get_best_sentence, get_data_from_articles
def create_stats_dict():
hashmap = collections.defaultdict(int)
article_ids = os.listdir("data/articles")
all_text = ''
for article_id in article_ids:
all_text += ''.join( get_data_from_articles("data/articles/"+article_id))
for token in all_text.split(' '):
hashmap[token.lower()] += 1
with open("dict.pkl", "wb") as f:
pickle.dump(hashmap, f)
def get_data_from_sample(path: str) -> Dict:
REPLACEMENTS = [("(", ""), (")", ""),
("[", ""), ("]", ""),
("'", ""), ("\"", "")]
with open(path, "r", encoding="utf8") as f:
data = f.read()
data = data.split("\n")
examples = []
for val in data:
examples.append(val.split(", "))
qa_dict = {}
for example in examples:
try:
qa_index = int(example[0].replace("[", ""))
except ValueError:
print(example[0])
questions, answers = [], []
for idx in range(1, len(example)):
if idx%2 != 0:
clean_question = example[idx]
for k, v in REPLACEMENTS:
clean_question = clean_question.replace(k, v)
questions.append(clean_question.strip())
else:
clean_answer = example[idx]
for k, v in REPLACEMENTS:
clean_answer = clean_answer.replace(k, v)
answers.append(clean_answer.strip())
qa_dict[qa_index] = {'questions': questions, 'answers': answers}
return qa_dict
def remove_stopwords(sentence: str):
return ' '.join(word for word in sentence.split() if word.lower() not in stopwords.words('english'))
def get_pos_tags(sentence: str):
sentence_tagged = ""
for tag in pos_tag(sentence.split()):
sentence_tagged += f'{tag[0]}_{tag[1]} '
return sentence_tagged
def get_lemmas(words: str):
lemmatizer = WordNetLemmatizer()
lemmas = ""
for word in words.split():
lemmas += " " + lemmatizer.lemmatize(word)
return lemmas.strip()
def get_dependencies(sentence: str):
nlp = spacy.load('en_core_web_sm')
doc = nlp(sentence)
res = []
for token in doc:
res.append((token.text, token.dep_, token.head.text, token.head.pos_,
[child for child in token.children]))
return res
def get_wordnet_features(sentence: str, nym_type: str):
new_query = ''
for word in remove_stopwords(sentence).split():
new_query += " " + word
if (nym_type == 'hypernyms'):
for nym in [w.name().replace("_", " ") for s in wn.synsets(word) for w in s.hypernyms() if word != w.name()]:
new_query += " " + nym
elif (nym_type == 'hyponyms'):
for nym in [w.name().replace("_", " ") for s in wn.synsets(word) for w in s.hyponyms() if word != w.name()]:
new_query += " " + nym
elif (nym_type == 'meronyms'):
for nym in [w.name().replace("_", " ") for s in wn.synsets(word) for w in s.part_meronyms() if word != w.name()]:
new_query += " " + nym
elif (nym_type == 'holonyms'):
for nym in [w.name().replace("_", " ") for s in wn.synsets(word) for w in s.part_holonyms() if word != w.name()]:
new_query += " " + nym
return new_query.strip()
def get_synonyms(word: str, pns: bool = False, max: int = 3):
'''Returns a set of synonyms for a word
@param word - Lowercase word to get synonyms for
@param pns - True/False to include proper nouns (EX: panther -> Black Panthers)
@param max - Maximum number of synonyms to return'''
if pns:
return set([w.name().replace("_", " ") for s in wn.synsets(word) for w in s.lemmas() if word != w.name()][0:max])
return set([w.name().replace("_", " ") for s in wn.synsets(word) for w in s.lemmas() if word != w.name() and w.name() == w.name().lower()][0:max])
def expand_query(sentence: str):
'''Returns an expanded query with synonyms for all non-stopwords'''
new_query = ''
for word in remove_stopwords(sentence).split():
new_query += " " + word
for synonym in get_synonyms(word.lower()):
new_query += " " + synonym
return new_query.strip()
def get_accuracy(data_dict):
"""
`data_dict` is of the following type
{'qa_idx': {'questions': List[questions], 'answers': List[answers]}}
"""
for qa_index, qa_dict in tqdm(data_dict.items()):
correct, count = 0, 0
for question, answer in tqdm(zip(qa_dict['questions'], qa_dict['answers'])):
response = get_best_sentence(question, [qa_index])
print('Question: ', question, "\n")
print('Answer: ', response, "\n")
print('Answer: ', answer, "\n")
if answer in response:
correct += 1
else:
print('ID: ', qa_index, 'Question: ', question, 'Response: ', response, 'Answer: ', answer)
print()
count += 1
acc = (correct/count)*100
print(f"Accuracy on {qa_index}: {acc}")
print('Overall Accuracy: ', acc)
def sample_check(PATH):
df = pd.read_excel(PATH)
questions, answers, complexity_levels = list(df.question), list(df.answer_sentence), list(df.complex_level)
answers = list(map(str, answers))
complexity_levels = list(map(int, complexity_levels))
return questions, answers, complexity_levels
sample_check("data/sample_check/sample.xlsx")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 12 14:38:37 2021
@author: afo
"""
import asyncio
import json
from os.path import abspath
from inspect import getsourcefile
import os
import aiohttp
import time
import sys
from understat import Understat
import nest_asyncio
nest_asyncio.apply()
# Get the working dir
p = abspath(getsourcefile(lambda:0))
p = p.rsplit('/', 1)[0]
os.chdir(p)
print('Working Directory is: %s' % os.getcwd())
# Main asynch loop for data gathering from Understat
loop = asyncio.get_event_loop()
# Function to collect fixtures from a given year in given league and save to json
async def collect_fixtures(year, league):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
fixtures = await understat.get_league_results(
league, year)
with open(p+'/data/fixtures.json', 'w') as fp:
json.dump(fixtures, fp)
# Function to collect players from a given year in given league and save to json
async def collect_league_players(year, league):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
players = await understat.get_league_players(
league,
year
# team_title="Liverpool"
)
with open(p+'/data/players.json', 'w') as fp:
json.dump(players, fp)
# Function to get details about the player stats of a match given its id
async def get_player_data(match_id):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
players = await understat.get_match_players(match_id)
return players
# Function to collect team data from a given year in given league and save to json
async def get_teams(year, league):
async with aiohttp.ClientSession() as session:
understat = Understat(session)
teams = await understat.get_teams(
league,
year
)
with open(p+'/data/teams.json', 'w') as fp:
json.dump(teams, fp)
# Function to try to get the fixtures from Understat, limited to retry number
def loop_fixtures(league, year, retries):
while retries <= 10:
try:
loop.run_until_complete(collect_fixtures(year, league))
print("Fixtures for " + league + ", " + str(year) + " collected succesfully!")
time.sleep(1)
break
except UnboundLocalError:
retries = retries + 1
if retries == 10:
print("Failed on Fixtures loop!")
sys.exit()
# Function to try to get the players from Understat, limited to retry number
def loop_players(league, year, retries):
while retries <= 10:
try:
loop.run_until_complete(collect_league_players(year, league))
print("League players for " + league + ", " + str(year) + " collected succesfully!")
time.sleep(1)
break
except UnboundLocalError:
retries = retries + 1
if retries == 10:
print("Failed on League players loop!")
sys.exit()
# Function to try to get the teams from Understat, limited to retry number
def loop_teams(league, year, retries):
while retries <= 10:
try:
loop.run_until_complete(get_teams(year, league))
print("Team list for " + league + ", " + str(year) + " collected succesfully!")
time.sleep(1)
break
except UnboundLocalError:
retries = retries + 1
if retries == 10:
print("Failed on Team list loop!")
sys.exit()
def loop_single_match_data(match_id, retries, got_data, i):
while retries < 10:
try:
temp = loop.run_until_complete(get_player_data(match_id))
got_data = True
break
except UnboundLocalError:
retries = retries + 1
if retries == 10:
got_data = False
temp = ''
print("Failed on Player Match data loop! Failed on number nr: " + str(i))
pass
return temp, got_data |
from tictactoe_helper import StartState
beginGame()
|
#!/usr/bin/env python3
#
# - import a csv table of score files (and possibly edf files)
# - strip out spaces in column names
# - consolidate into trial datablocks (with consensus)
# TODO: use relative paths in csv?
#======================================
import pdb
import os
import argparse
import pandas as pd
import numpy as np
import scoreblock as sb
raise Exception('deprecated, replaced by scoreloader.py')
#==============================================================================
pp = argparse.ArgumentParser()
pp.add_argument('-c', required=True, default=None, type=str, help='csv table of score files')
pp.add_argument('--dest', type=str, default='ANL-load-scores', help='output folder')
args = pp.parse_args()
os.makedirs(args.dest, exist_ok=True)
# import table of score files
df = pd.read_csv(args.c, index_col=0)
# for the case for a csv with 'humanScores' and 'edf' files stacked together
if 'filetype' in df.columns:
df = df[df['filetype'] == 'humanScores']
# load scores for each trial/day/scorer
ydata = []
for i, row in df.iterrows():
print(row['trial'], row['genotype'], row['day'], row['scorer'])
dfi = pd.read_csv(row['file'])
dfi.columns = [col.replace(" ","") for col in list(dfi.columns)]
ydata.append(dfi['Score'].values)
# combine all score vectors into a single stacked dataframe
ydata = np.asarray(ydata)
index_cols = ['trial', 'genotype', 'day', 'scorer']
data_cols = ['Epoch-%5.5i' % (i+1) for i in range(ydata.shape[1])]
df_data = pd.DataFrame(ydata, columns=data_cols)
df_index = df[index_cols]
df_scores = pd.concat([df_index, df_data], axis=1)
# dump scoreblocks (with consensus) for each trial/day combo
td_uniq = df_index[['trial','day']].drop_duplicates().values
data = []
for trial, day in td_uniq:
df_td = df_scores[(df_index['trial']==trial) & (df_index['day']==day)].copy()
df_td.reset_index(drop=True, inplace=True)
sb_td = sb.ScoreBlock(df=df_td, index_cols=index_cols)
sb_td.add_const_index_col(name='scoreType', value='human', inplace=True)
sb_cc = sb_td.consensus()
data.append(sb_cc)
jf = 'scoreblock-trial-%s-day-%i.json' % (str(trial), day)
sb_cc.to_json(os.path.join(args.dest, jf))
# make a combined scoreblock and dump it
sb_stack = data[0].stack(others=data[1:])
sb_stack.to_json(os.path.join(args.dest, 'scoreblock-alldata-raw.json'))
# score fractions
sb_count = sb_stack.count(frac=True)
sb_count.to_json(os.path.join(args.dest, 'scoreblock-alldata-frac.json'))
|
'''OpenGL extension EXT.blend_subtract
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_EXT_blend_subtract'
_DEPRECATED = False
GL_FUNC_SUBTRACT_EXT = constant.Constant( 'GL_FUNC_SUBTRACT_EXT', 0x800A )
GL_FUNC_REVERSE_SUBTRACT_EXT = constant.Constant( 'GL_FUNC_REVERSE_SUBTRACT_EXT', 0x800B )
def glInitBlendSubtractEXT():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
import socket
class FakeButton:
def right_click(self):
pass
def left_click(self):
pass
def both_click(self):
pass
def close(self):
pass
class Button:
def __init__(self, server: str, port: int) -> None:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((server, port))
def right_click(self):
self.socket.sendall(b"Rr")
def left_click(self):
self.socket.sendall(b"Ll")
def both_click(self):
self.socket.sendall(b"LRlr")
def close(self):
self.socket.close()
|
from modules import skeleton
from lib.core import utils
from lib.mode import speed
from lib.sender import execute
from lib.sender import polling
from lib.sender import report
from lib.sender import summary
from lib.sender import summary
class SubdomainScanning(skeleton.Skeleton):
"""docstring for subdomain"""
def banner(self):
utils.print_banner("Starting Subdomain Scanning")
utils.make_directory(self.options['WORKSPACE'] + '/subdomain')
# clean up things and join all output path together
def conclude(self):
outputs = utils.get_output_path(self.commands)
# print(outputs)
final_output = utils.replace_argument(
self.options, "$WORKSPACE/subdomain/final-$OUTPUT.txt")
# print(final_output)
outputs = utils.join_files(outputs, final_output)
utils.check_output(final_output)
summary.push_with_file(self.options, final_output)
utils.print_elapsed(self.options)
'''
Start clean part
'''
# clean up gobuster
def clean_gobuster(self, command):
utils.print_good('Cleaning for {0}:{1}'.format(
command.get('banner'), command.get('post_run')))
output = utils.just_read(command.get('output_path'))
if not output:
utils.print_bad('Output not found: {0}'.format(
command.get('output_path')))
return False
cleaned_output = utils.just_write(command.get(
'cleaned_output'), output.replace('Found: ', ''))
if cleaned_output:
utils.check_output(command.get(
'cleaned_output'))
# clean up for massdns result
def clean_massdns(self, command):
utils.print_good('Cleaning for {0}:{1}'.format(command.get('banner'), command.get('post_run')))
output = utils.just_read(command.get('output_path'), get_list=True)
if not output:
utils.print_bad('Output not found: {0}'.format(
command.get('output_path')))
return False
# only get A record
only_A_record = "\n".join([x.split('. A ')[0] for x in output if '. A ' in x])
cleaned_output = utils.just_write(command.get(
'cleaned_output'), only_A_record)
if cleaned_output:
utils.check_output(command.get('cleaned_output'))
|
import tempfile, csv
class Batchinator:
def __init__(self, batchSize:int):
self.workingDir = tempfile.TemporaryDirectory()
self.currentFile = tempfile.NamedTemporaryFile(mode='w',delete=False,dir=self.workingDir.name)
self.csvWriter = csv.writer(self.currentFile, delimiter=',', quotechar='"', dialect='unix', quoting=csv.QUOTE_MINIMAL)
self.batchFiles = []
self.currentBatch = 0
self.batchSize = batchSize
self.entryCount = 0
def __len__(self):
return len(self.batchFiles)
def __iter__(self):
# Close current file
self.currentFile.close()
# Record file
self.batchFiles.append(self.currentFile)
# yeild the file names
for file in self.batchFiles:
self.currentBatch += 1
yield file.name
def recordEntry(self, entry:list):
# Increase the entry count
self.entryCount += 1
# Check if a new file is needed
if self.entryCount > self.batchSize:
# Close current file
self.currentFile.close()
# Record file
self.batchFiles.append(self.currentFile)
# Create new file
self.currentFile = tempfile.NamedTemporaryFile(mode='w',delete=False,dir=self.workingDir.name)
# Update csv writer
self.csvWriter = csv.writer(self.currentFile, delimiter=',', quotechar='"', dialect='unix', quoting=csv.QUOTE_MINIMAL)
# Reset entry count
self.entryCount = 1
# Write entry
self.csvWriter.writerow(entry)
|
import mock
import six
import unittest
from types import FunctionType
from kazoo.handlers.gevent import SequentialGeventHandler
from kazoo.testing.harness import KazooTestHarness
from lymph.core.container import ServiceContainer
from lymph.core.connection import Connection
from lymph.core.interfaces import Interface, Proxy
from lymph.core.rpc import ZmqRPCServer
from lymph.core.messages import Message
from lymph.discovery.static import StaticServiceRegistryHub
from lymph.events.local import LocalEventSystem
from lymph.exceptions import RemoteError
from lymph.client import Client
from lymph.utils.sockets import get_unused_port, create_socket
import werkzeug.test
from werkzeug.wrappers import BaseResponse
def get_side_effect(mocks):
class ProxyCall(object):
def __init__(self, data):
self.data = data
def __call__(self, name, **kwargs):
try:
result = self.data[name]
if isinstance(result, Exception):
raise getattr(RemoteError, result.__class__.__name__)('', '')
if callable(result):
return result(**kwargs)
return result
except KeyError:
return
def update(self, func_name, new_value):
self.data[func_name] = new_value
return ProxyCall(mocks)
class RpcMockTestCase(unittest.TestCase):
def setUp(self):
super(RpcMockTestCase, self).setUp()
self.rpc_patch = mock.patch.object(Proxy, '_call')
self.rpc_mock = self.rpc_patch.start()
def tearDown(self):
super(RpcMockTestCase, self).tearDown()
self.rpc_patch.stop()
def setup_rpc_mocks(self, mocks):
self.rpc_mock.side_effect = get_side_effect(mocks)
def update_rpc_mock(self, func_name, new_value):
self.rpc_mock.side_effect.update(func_name, new_value)
@property
def rpc_mock_calls(self):
return self.rpc_mock.mock_calls
class EventMockTestCase(unittest.TestCase):
def setUp(self):
super(EventMockTestCase, self).setUp()
self.event_patch = mock.patch.object(ServiceContainer, 'emit_event')
self.event_mock = self.event_patch.start()
def tearDown(self):
super(EventMockTestCase, self).tearDown()
self.event_patch.stop()
@property
def events(self):
return self.event_mock.mock_calls
class MockServiceNetwork(object):
def __init__(self):
self.service_containers = {}
self.next_port = 1
self.discovery_hub = StaticServiceRegistryHub()
self.events = LocalEventSystem()
def add_service(self, cls, interface_name=None, **kwargs):
kwargs.setdefault('ip', '300.0.0.1')
kwargs.setdefault('port', self.next_port)
self.next_port += 1
registry = self.discovery_hub.create_registry()
container = MockServiceContainer(registry=registry, events=self.events, **kwargs)
container.install_interface(cls, name=interface_name)
self.service_containers[container.endpoint] = container
container._mock_network = self
return container
def start(self):
for container in six.itervalues(self.service_containers):
container.start()
def stop(self, **kwargs):
for container in six.itervalues(self.service_containers):
container.stop()
def join(self):
for container in six.itervalues(self.service_containers):
container.join()
class MockRPCServer(ZmqRPCServer):
def __init__(self, *args, **kwargs):
super(MockRPCServer, self).__init__(*args, **kwargs)
self._bind()
def _bind(self):
self.endpoint = 'mock://%s:%s' % (self.ip, self.port)
def _close_sockets(self):
pass
def connect(self, endpoint):
if endpoint not in self.connections:
self.connections[endpoint] = Connection(self, endpoint)
return self.connections[endpoint]
def _send_message(self, address, msg):
dst = self.container.lookup(address).connect().endpoint
dst = self.container._mock_network.service_containers[dst]
# Exercise the msgpack packing and unpacking.
frames = msg.pack_frames()
frames.insert(0, self.endpoint.encode('utf-8'))
msg = Message.unpack_frames(frames)
dst.server.recv_message(msg)
def _recv_loop(self):
pass
class MockServiceContainer(ServiceContainer):
server_cls = MockRPCServer
def __init__(self, *args, **kwargs):
super(MockServiceContainer, self).__init__(*args, **kwargs)
self.__shared_sockets = {}
def get_shared_socket_fd(self, port):
try:
return self.__shared_sockets[port].fileno()
except KeyError:
host_port = get_unused_port()
sock = create_socket('127.0.0.1:%s' % host_port, inheritable=True)
self.__shared_sockets[port] = sock
return sock.fileno()
class LymphIntegrationTestCase(KazooTestHarness):
use_zookeeper = False
def setUp(self):
super(LymphIntegrationTestCase, self).setUp()
self._containers = []
if self.use_zookeeper:
self.setup_zookeeper(handler=SequentialGeventHandler())
def tearDown(self):
super(LymphIntegrationTestCase, self).tearDown()
for container in self._containers:
container.stop()
for container in self._containers:
container.join()
if self.use_zookeeper:
self.teardown_zookeeper()
def create_client(self, **kwargs):
container, interface = self.create_container(**kwargs)
return Client(container)
def create_registry(self, **kwargs):
return self.registry
def create_event_system(self, **kwargs):
return self.events
def create_container(self, interface_cls=None, interface_name=None, events=None, registry=None, **kwargs):
if not events:
events = self.create_event_system(**kwargs)
if not registry:
registry = self.create_registry(**kwargs)
container = ServiceContainer(events=events, registry=registry, **kwargs)
interface = None
if interface_cls:
interface = container.install_interface(interface_cls, name=interface_name)
container.start()
self._containers.append(container)
return container, interface
class ClientInterface(Interface):
service_type = 'client'
class LymphServiceTestCase(unittest.TestCase):
client_class = ClientInterface
client_name = 'client'
client_config = {}
service_class = ClientInterface
service_name = 'client'
service_config = {}
def setUp(self):
super(LymphServiceTestCase, self).setUp()
self.network = MockServiceNetwork()
self.service_container = self.network.add_service(
self.service_class,
interface_name=self.service_name
)
self.service = self.service_container.installed_interfaces[
self.service_name
]
self.service.apply_config(self.service_config)
self.client_container = self.network.add_service(
self.client_class,
interface_name=self.client_name
)
self.client = self.client_container.installed_interfaces[
self.client_name
]
self.client.apply_config(self.client_config)
self.network.start()
def tearDown(self):
super(LymphServiceTestCase, self).tearDown()
self.network.stop()
self.network.join()
class APITestCase(unittest.TestCase):
interface_name = None
def setUp(self):
super(APITestCase, self).setUp()
self.network = MockServiceNetwork()
if not self.interface_name:
self.interface_name = self.test_interface.__name__.lower()
container = self.network.add_service(self.test_interface, interface_name=self.interface_name)
webinterface_object = container.installed_interfaces[self.interface_name]
self.network.start()
self.client = werkzeug.test.Client(webinterface_object, BaseResponse)
def tearDown(self):
super(APITestCase, self).tearDown()
self.network.stop()
|
from typing import Union, List, Iterable
from datetime import datetime
from .internals.base_classes import (
BaseSyncCinnamon,
BaseCinnamonField,
QueryFieldSet,
QueryField,
)
from .internals.constants import CinnamonUndefined
from .internals import scalars
from . import fields as fields_module
from . import objects
from . import inputs
class _ARGUMENT_LEGENDS:
class campaign_template:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class campaign_templates_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class campaign_templates_with_current_gcpx_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class catalog:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class catalogs_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class creative_font:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class creative_fonts_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class creative_image:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class creative_images_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class creative_layer:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class creative_layers_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class creative_template:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class creative_templates_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class entitlement:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class marketing_ad:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class marketing_ads_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class marketing_campaign:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class marketing_campaigns_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class marketplace:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class marketplaces_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class media_channel:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class media_channels_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class notification:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class organization:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class organizations_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class product:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class products_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class result:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class vendor:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class vendors_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class vendor_token:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class vendor_tokens_each:
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class create_catalog:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CatalogCreateInput"
python_iterable = None
python_name = "input"
class import_catalog:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CatalogImportInput"
python_iterable = None
python_name = "input"
class update_catalog:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CatalogUpdateInput"
python_iterable = None
python_name = "input"
class update_catalogs_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CatalogUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class sync_catalog:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CatalogSyncInput"
python_iterable = None
python_name = "input"
class sync_catalog_products:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class delete_catalog:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_creative_font:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeFontCreateInput"
python_iterable = None
python_name = "input"
class update_creative_font:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeFontUpdateInput"
python_iterable = None
python_name = "input"
class update_creative_fonts_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeFontUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class delete_creative_font:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_creative_image:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeImageCreateInput"
python_iterable = None
python_name = "input"
class update_creative_image:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeImageUpdateInput"
python_iterable = None
python_name = "input"
class update_creative_images_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeImageUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class delete_creative_image:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_creative_layer:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeLayerCreateInput"
python_iterable = None
python_name = "input"
class update_creative_layer:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeLayerUpdateInput"
python_iterable = None
python_name = "input"
class update_creative_layers_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeLayerUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class delete_creative_layer:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_creative_template:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeTemplateCreateInput"
python_iterable = None
python_name = "input"
class update_creative_template:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeTemplateUpdateInput"
python_iterable = None
python_name = "input"
class update_creative_templates_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "CreativeTemplateUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class delete_creative_template:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_entitlement:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "EntitlementInput"
python_iterable = None
python_name = "input"
class update_entitlement:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "EntitlementUpdateInput"
python_iterable = None
python_name = "input"
class delete_entitlement:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_marketing_campaign:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MarketingCampaignInput"
python_iterable = None
python_name = "input"
class update_marketing_campaign:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MarketingCampaignUpdateInput"
python_iterable = None
python_name = "input"
class update_marketing_campaigns_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MarketingCampaignUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class sync_marketing_campaign:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MarketingCampaignSyncInput"
python_iterable = None
python_name = "input"
class approve_marketing_campaign:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class last_change_date(BaseCinnamonField):
api_name = "lastChangeDate"
api_kind = "SCALAR"
api_kind_name = "DateISO"
python_iterable = None
python_name = "last_change_date"
scalar = scalars.DateISO
class delete_marketing_campaign:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_marketplace:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MarketplaceInput"
python_iterable = None
python_name = "input"
class update_marketplace:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MarketplaceUpdateInput"
python_iterable = None
python_name = "input"
class update_marketplaces_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MarketplaceUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class delete_marketplace:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_media_channel:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MediaChannelCreateInput"
python_iterable = None
python_name = "input"
class import_media_channel:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MediaChannelImportInput"
python_iterable = None
python_name = "input"
class update_media_channel:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MediaChannelUpdateInput"
python_iterable = None
python_name = "input"
class update_media_channels_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "MediaChannelUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class delete_media_channel:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class update_notification:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "NotificationUpdateInput"
python_iterable = None
python_name = "input"
class create_organization:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "OrganizationInput"
python_iterable = None
python_name = "input"
class update_organization:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "OrganizationUpdateInput"
python_iterable = None
python_name = "input"
class update_organizations_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "OrganizationUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class delete_organization:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_product:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "ProductInput"
python_iterable = None
python_name = "input"
class update_product:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "ProductUpdateInput"
python_iterable = None
python_name = "input"
class update_products_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "ProductUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class delete_product:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class request_reset_password:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "RequestResetPasswordInput"
python_iterable = None
python_name = "input"
class reset_password:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "ResetPasswordInput"
python_iterable = None
python_name = "input"
class update_user:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "UserUpdateInput"
python_iterable = None
python_name = "input"
class create_vendor:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "VendorInput"
python_iterable = None
python_name = "input"
class update_vendor:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "VendorUpdateInput"
python_iterable = None
python_name = "input"
class update_vendors_each:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "VendorUpdateInput"
python_iterable = None
python_name = "input"
class sort(BaseCinnamonField):
api_name = "sort"
api_kind = "INPUT_OBJECT"
api_kind_name = "SortInput"
python_iterable = None
python_name = "sort"
class filter(BaseCinnamonField):
api_name = "filter"
api_kind = "SCALAR"
api_kind_name = "FilterInput"
python_iterable = None
python_name = "filter"
scalar = scalars.FilterInput
class show_deleted(BaseCinnamonField):
api_name = "showDeleted"
api_kind = "SCALAR"
api_kind_name = "Boolean"
python_iterable = None
python_name = "show_deleted"
scalar = scalars.Boolean
class delete_vendor:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class create_vendor_token:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "VendorTokenInput"
python_iterable = None
python_name = "input"
class login_vendor:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "LoginVendorInput"
python_iterable = None
python_name = "input"
class set_vendor_password:
class input(BaseCinnamonField):
api_name = "input"
api_kind = "INPUT_OBJECT"
api_kind_name = "SetVendorPasswordInput"
python_iterable = None
python_name = "input"
class delete_vendor_token:
class id(BaseCinnamonField):
api_name = "id"
api_kind = "SCALAR"
api_kind_name = "ObjectId"
python_iterable = None
python_name = "id"
scalar = scalars.ObjectId
class Cinnamon(BaseSyncCinnamon):
def campaign_template(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CampaignTemplateFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CampaignTemplate:
query_args = self._query_builder(
"query",
"campaignTemplate",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.campaign_template,
False,
)
return objects.CampaignTemplate(
self.api(headers=headers, token=token, **query_args)["data"][
"campaignTemplate"
],
)
def campaign_templates_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CampaignTemplateConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CampaignTemplateConnection]:
query_args = self._query_builder(
"query",
"campaignTemplates",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.campaign_templates_each,
True,
)
return self.iterate_edges(
objects.CampaignTemplateConnection,
query_args,
headers,
token,
"campaignTemplates",
)
def campaign_templates_with_current_gcpx_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CampaignTemplateConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CampaignTemplateConnection]:
query_args = self._query_builder(
"query",
"campaignTemplatesWithCurrentGCPX",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.campaign_templates_with_current_gcpx_each,
True,
)
return self.iterate_edges(
objects.CampaignTemplateConnection,
query_args,
headers,
token,
"campaignTemplatesWithCurrentGCPX",
)
def catalog(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CatalogFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Catalog:
query_args = self._query_builder(
"query", "catalog", fields, {"id": id,}, _ARGUMENT_LEGENDS.catalog, False,
)
return objects.Catalog(
self.api(headers=headers, token=token, **query_args)["data"]["catalog"],
)
def catalogs_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CatalogConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CatalogConnection]:
query_args = self._query_builder(
"query",
"catalogs",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.catalogs_each,
True,
)
return self.iterate_edges(
objects.CatalogConnection, query_args, headers, token, "catalogs",
)
def creative_font(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeFontFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeFont:
query_args = self._query_builder(
"query",
"creativeFont",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.creative_font,
False,
)
return objects.CreativeFont(
self.api(headers=headers, token=token, **query_args)["data"][
"creativeFont"
],
)
def creative_fonts_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeFontConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CreativeFontConnection]:
query_args = self._query_builder(
"query",
"creativeFonts",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.creative_fonts_each,
True,
)
return self.iterate_edges(
objects.CreativeFontConnection, query_args, headers, token, "creativeFonts",
)
def creative_image(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeImageFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeImage:
query_args = self._query_builder(
"query",
"creativeImage",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.creative_image,
False,
)
return objects.CreativeImage(
self.api(headers=headers, token=token, **query_args)["data"][
"creativeImage"
],
)
def creative_images_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeImageConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CreativeImageConnection]:
query_args = self._query_builder(
"query",
"creativeImages",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.creative_images_each,
True,
)
return self.iterate_edges(
objects.CreativeImageConnection,
query_args,
headers,
token,
"creativeImages",
)
def creative_layer(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeLayerFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeLayer:
query_args = self._query_builder(
"query",
"creativeLayer",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.creative_layer,
False,
)
return objects.CreativeLayer(
self.api(headers=headers, token=token, **query_args)["data"][
"creativeLayer"
],
)
def creative_layers_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeLayerConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CreativeLayerConnection]:
query_args = self._query_builder(
"query",
"creativeLayers",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.creative_layers_each,
True,
)
return self.iterate_edges(
objects.CreativeLayerConnection,
query_args,
headers,
token,
"creativeLayers",
)
def creative_template(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeTemplateFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeTemplate:
query_args = self._query_builder(
"query",
"creativeTemplate",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.creative_template,
False,
)
return objects.CreativeTemplate(
self.api(headers=headers, token=token, **query_args)["data"][
"creativeTemplate"
],
)
def creative_templates_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeTemplateConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CreativeTemplateConnection]:
query_args = self._query_builder(
"query",
"creativeTemplates",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.creative_templates_each,
True,
)
return self.iterate_edges(
objects.CreativeTemplateConnection,
query_args,
headers,
token,
"creativeTemplates",
)
def entitlement(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.EntitlementFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Entitlement:
query_args = self._query_builder(
"query",
"entitlement",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.entitlement,
False,
)
return objects.Entitlement(
self.api(headers=headers, token=token, **query_args)["data"]["entitlement"],
)
def marketing_ad(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketingAdFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MarketingAd:
query_args = self._query_builder(
"query",
"marketingAd",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.marketing_ad,
False,
)
return objects.MarketingAd(
self.api(headers=headers, token=token, **query_args)["data"]["marketingAd"],
)
def marketing_ads_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketingAdConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.MarketingAdConnection]:
query_args = self._query_builder(
"query",
"marketingAds",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.marketing_ads_each,
True,
)
return self.iterate_edges(
objects.MarketingAdConnection, query_args, headers, token, "marketingAds",
)
def marketing_campaign(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketingCampaignFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MarketingCampaign:
query_args = self._query_builder(
"query",
"marketingCampaign",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.marketing_campaign,
False,
)
return objects.MarketingCampaign(
self.api(headers=headers, token=token, **query_args)["data"][
"marketingCampaign"
],
)
def marketing_campaigns_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketingCampaignConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.MarketingCampaignConnection]:
query_args = self._query_builder(
"query",
"marketingCampaigns",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.marketing_campaigns_each,
True,
)
return self.iterate_edges(
objects.MarketingCampaignConnection,
query_args,
headers,
token,
"marketingCampaigns",
)
def marketplace(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketplaceFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Marketplace:
query_args = self._query_builder(
"query",
"marketplace",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.marketplace,
False,
)
return objects.Marketplace(
self.api(headers=headers, token=token, **query_args)["data"]["marketplace"],
)
def marketplaces_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketplaceConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.MarketplaceConnection]:
query_args = self._query_builder(
"query",
"marketplaces",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.marketplaces_each,
True,
)
return self.iterate_edges(
objects.MarketplaceConnection, query_args, headers, token, "marketplaces",
)
def media_channel(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MediaChannelFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MediaChannel:
query_args = self._query_builder(
"query",
"mediaChannel",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.media_channel,
False,
)
return objects.MediaChannel(
self.api(headers=headers, token=token, **query_args)["data"][
"mediaChannel"
],
)
def media_channels_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MediaChannelConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.MediaChannelConnection]:
query_args = self._query_builder(
"query",
"mediaChannels",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.media_channels_each,
True,
)
return self.iterate_edges(
objects.MediaChannelConnection, query_args, headers, token, "mediaChannels",
)
def notification(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.NotificationFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Notification:
query_args = self._query_builder(
"query",
"notification",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.notification,
False,
)
return objects.Notification(
self.api(headers=headers, token=token, **query_args)["data"][
"notification"
],
)
def organization(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.OrganizationFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Organization:
query_args = self._query_builder(
"query",
"organization",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.organization,
False,
)
return objects.Organization(
self.api(headers=headers, token=token, **query_args)["data"][
"organization"
],
)
def organizations_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.OrganizationConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.OrganizationConnection]:
query_args = self._query_builder(
"query",
"organizations",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.organizations_each,
True,
)
return self.iterate_edges(
objects.OrganizationConnection, query_args, headers, token, "organizations",
)
def product(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.ProductFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Product:
query_args = self._query_builder(
"query", "product", fields, {"id": id,}, _ARGUMENT_LEGENDS.product, False,
)
return objects.Product(
self.api(headers=headers, token=token, **query_args)["data"]["product"],
)
def products_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.ProductConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.ProductConnection]:
query_args = self._query_builder(
"query",
"products",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.products_each,
True,
)
return self.iterate_edges(
objects.ProductConnection, query_args, headers, token, "products",
)
def result(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.ResultFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Result:
query_args = self._query_builder(
"query", "result", fields, {"id": id,}, _ARGUMENT_LEGENDS.result, False,
)
return objects.Result(
self.api(headers=headers, token=token, **query_args)["data"]["result"],
)
def vendor(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Vendor:
query_args = self._query_builder(
"query", "vendor", fields, {"id": id,}, _ARGUMENT_LEGENDS.vendor, False,
)
return objects.Vendor(
self.api(headers=headers, token=token, **query_args)["data"]["vendor"],
)
def vendors_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.VendorConnection]:
query_args = self._query_builder(
"query",
"vendors",
fields,
{"sort": sort, "filter": filter, "show_deleted": show_deleted,},
_ARGUMENT_LEGENDS.vendors_each,
True,
)
return self.iterate_edges(
objects.VendorConnection, query_args, headers, token, "vendors",
)
def vendor_token(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorTokenFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.VendorToken:
query_args = self._query_builder(
"query",
"vendorToken",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.vendor_token,
False,
)
return objects.VendorToken(
self.api(headers=headers, token=token, **query_args)["data"]["vendorToken"],
)
def vendor_tokens_each(
self,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorTokenConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.VendorTokenConnection]:
query_args = self._query_builder(
"query",
"vendorTokens",
fields,
{"sort": sort, "filter": filter,},
_ARGUMENT_LEGENDS.vendor_tokens_each,
True,
)
return self.iterate_edges(
objects.VendorTokenConnection, query_args, headers, token, "vendorTokens",
)
def create_catalog(
self,
input: inputs.CatalogCreateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CatalogFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Catalog:
query_args = self._query_builder(
"mutation",
"createCatalog",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_catalog,
False,
)
return objects.Catalog(
self.api(headers=headers, token=token, **query_args)["data"][
"createCatalog"
],
)
def import_catalog(
self,
input: inputs.CatalogImportInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CatalogFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Catalog:
query_args = self._query_builder(
"mutation",
"importCatalog",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.import_catalog,
False,
)
return objects.Catalog(
self.api(headers=headers, token=token, **query_args)["data"][
"importCatalog"
],
)
def update_catalog(
self,
id: str,
input: inputs.CatalogUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CatalogFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Catalog:
query_args = self._query_builder(
"mutation",
"updateCatalog",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_catalog,
False,
)
return objects.Catalog(
self.api(headers=headers, token=token, **query_args)["data"][
"updateCatalog"
],
)
def update_catalogs_each(
self,
input: inputs.CatalogUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CatalogConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CatalogConnection]:
query_args = self._query_builder(
"mutation",
"updateCatalogs",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_catalogs_each,
True,
)
return self.iterate_edges(
objects.CatalogConnection, query_args, headers, token, "updateCatalogs",
)
def sync_catalog(
self,
id: str,
input: Union[
inputs.CatalogSyncInput, None, CinnamonUndefined
] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CatalogFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Catalog:
query_args = self._query_builder(
"mutation",
"syncCatalog",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.sync_catalog,
False,
)
return objects.Catalog(
self.api(headers=headers, token=token, **query_args)["data"]["syncCatalog"],
)
def sync_catalog_products(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CatalogFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Catalog:
query_args = self._query_builder(
"mutation",
"syncCatalogProducts",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.sync_catalog_products,
False,
)
return objects.Catalog(
self.api(headers=headers, token=token, **query_args)["data"][
"syncCatalogProducts"
],
)
def delete_catalog(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteCatalog",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_catalog,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteCatalog"
],
)
def create_creative_font(
self,
input: inputs.CreativeFontCreateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeFontFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeFont:
query_args = self._query_builder(
"mutation",
"createCreativeFont",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_creative_font,
False,
)
return objects.CreativeFont(
self.api(headers=headers, token=token, **query_args)["data"][
"createCreativeFont"
],
)
def update_creative_font(
self,
id: str,
input: inputs.CreativeFontUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeFontFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeFont:
query_args = self._query_builder(
"mutation",
"updateCreativeFont",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_creative_font,
False,
)
return objects.CreativeFont(
self.api(headers=headers, token=token, **query_args)["data"][
"updateCreativeFont"
],
)
def update_creative_fonts_each(
self,
input: inputs.CreativeFontUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeFontConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CreativeFontConnection]:
query_args = self._query_builder(
"mutation",
"updateCreativeFonts",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_creative_fonts_each,
True,
)
return self.iterate_edges(
objects.CreativeFontConnection,
query_args,
headers,
token,
"updateCreativeFonts",
)
def delete_creative_font(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteCreativeFont",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_creative_font,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteCreativeFont"
],
)
def create_creative_image(
self,
input: inputs.CreativeImageCreateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeImageFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeImage:
query_args = self._query_builder(
"mutation",
"createCreativeImage",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_creative_image,
False,
)
return objects.CreativeImage(
self.api(headers=headers, token=token, **query_args)["data"][
"createCreativeImage"
],
)
def update_creative_image(
self,
id: str,
input: inputs.CreativeImageUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeImageFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeImage:
query_args = self._query_builder(
"mutation",
"updateCreativeImage",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_creative_image,
False,
)
return objects.CreativeImage(
self.api(headers=headers, token=token, **query_args)["data"][
"updateCreativeImage"
],
)
def update_creative_images_each(
self,
input: inputs.CreativeImageUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeImageConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CreativeImageConnection]:
query_args = self._query_builder(
"mutation",
"updateCreativeImages",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_creative_images_each,
True,
)
return self.iterate_edges(
objects.CreativeImageConnection,
query_args,
headers,
token,
"updateCreativeImages",
)
def delete_creative_image(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteCreativeImage",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_creative_image,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteCreativeImage"
],
)
def create_creative_layer(
self,
input: inputs.CreativeLayerCreateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeLayerFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeLayer:
query_args = self._query_builder(
"mutation",
"createCreativeLayer",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_creative_layer,
False,
)
return objects.CreativeLayer(
self.api(headers=headers, token=token, **query_args)["data"][
"createCreativeLayer"
],
)
def update_creative_layer(
self,
id: str,
input: inputs.CreativeLayerUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeLayerFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeLayer:
query_args = self._query_builder(
"mutation",
"updateCreativeLayer",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_creative_layer,
False,
)
return objects.CreativeLayer(
self.api(headers=headers, token=token, **query_args)["data"][
"updateCreativeLayer"
],
)
def update_creative_layers_each(
self,
input: inputs.CreativeLayerUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeLayerConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CreativeLayerConnection]:
query_args = self._query_builder(
"mutation",
"updateCreativeLayers",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_creative_layers_each,
True,
)
return self.iterate_edges(
objects.CreativeLayerConnection,
query_args,
headers,
token,
"updateCreativeLayers",
)
def delete_creative_layer(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteCreativeLayer",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_creative_layer,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteCreativeLayer"
],
)
def create_creative_template(
self,
input: inputs.CreativeTemplateCreateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeTemplateFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeTemplate:
query_args = self._query_builder(
"mutation",
"createCreativeTemplate",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_creative_template,
False,
)
return objects.CreativeTemplate(
self.api(headers=headers, token=token, **query_args)["data"][
"createCreativeTemplate"
],
)
def update_creative_template(
self,
id: str,
input: inputs.CreativeTemplateUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeTemplateFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.CreativeTemplate:
query_args = self._query_builder(
"mutation",
"updateCreativeTemplate",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_creative_template,
False,
)
return objects.CreativeTemplate(
self.api(headers=headers, token=token, **query_args)["data"][
"updateCreativeTemplate"
],
)
def update_creative_templates_each(
self,
input: inputs.CreativeTemplateUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.CreativeTemplateConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.CreativeTemplateConnection]:
query_args = self._query_builder(
"mutation",
"updateCreativeTemplates",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_creative_templates_each,
True,
)
return self.iterate_edges(
objects.CreativeTemplateConnection,
query_args,
headers,
token,
"updateCreativeTemplates",
)
def delete_creative_template(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteCreativeTemplate",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_creative_template,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteCreativeTemplate"
],
)
def create_entitlement(
self,
input: inputs.EntitlementInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.EntitlementFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Entitlement:
query_args = self._query_builder(
"mutation",
"createEntitlement",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_entitlement,
False,
)
return objects.Entitlement(
self.api(headers=headers, token=token, **query_args)["data"][
"createEntitlement"
],
)
def update_entitlement(
self,
id: str,
input: inputs.EntitlementUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.EntitlementFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Entitlement:
query_args = self._query_builder(
"mutation",
"updateEntitlement",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_entitlement,
False,
)
return objects.Entitlement(
self.api(headers=headers, token=token, **query_args)["data"][
"updateEntitlement"
],
)
def delete_entitlement(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteEntitlement",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_entitlement,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteEntitlement"
],
)
def create_marketing_campaign(
self,
input: inputs.MarketingCampaignInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketingCampaignFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MarketingCampaign:
query_args = self._query_builder(
"mutation",
"createMarketingCampaign",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_marketing_campaign,
False,
)
return objects.MarketingCampaign(
self.api(headers=headers, token=token, **query_args)["data"][
"createMarketingCampaign"
],
)
def update_marketing_campaign(
self,
id: str,
input: inputs.MarketingCampaignUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketingCampaignFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MarketingCampaign:
query_args = self._query_builder(
"mutation",
"updateMarketingCampaign",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_marketing_campaign,
False,
)
return objects.MarketingCampaign(
self.api(headers=headers, token=token, **query_args)["data"][
"updateMarketingCampaign"
],
)
def update_marketing_campaigns_each(
self,
input: inputs.MarketingCampaignUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketingCampaignConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.MarketingCampaignConnection]:
query_args = self._query_builder(
"mutation",
"updateMarketingCampaigns",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_marketing_campaigns_each,
True,
)
return self.iterate_edges(
objects.MarketingCampaignConnection,
query_args,
headers,
token,
"updateMarketingCampaigns",
)
def sync_marketing_campaign(
self,
id: str,
input: Union[
inputs.MarketingCampaignSyncInput, None, CinnamonUndefined
] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketingCampaignFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MarketingCampaign:
query_args = self._query_builder(
"mutation",
"syncMarketingCampaign",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.sync_marketing_campaign,
False,
)
return objects.MarketingCampaign(
self.api(headers=headers, token=token, **query_args)["data"][
"syncMarketingCampaign"
],
)
def approve_marketing_campaign(
self,
id: str,
last_change_date: datetime,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketingCampaignFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MarketingCampaign:
query_args = self._query_builder(
"mutation",
"approveMarketingCampaign",
fields,
{"id": id, "last_change_date": last_change_date,},
_ARGUMENT_LEGENDS.approve_marketing_campaign,
False,
)
return objects.MarketingCampaign(
self.api(headers=headers, token=token, **query_args)["data"][
"approveMarketingCampaign"
],
)
def delete_marketing_campaign(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteMarketingCampaign",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_marketing_campaign,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteMarketingCampaign"
],
)
def create_marketplace(
self,
input: inputs.MarketplaceInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketplaceFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Marketplace:
query_args = self._query_builder(
"mutation",
"createMarketplace",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_marketplace,
False,
)
return objects.Marketplace(
self.api(headers=headers, token=token, **query_args)["data"][
"createMarketplace"
],
)
def update_marketplace(
self,
id: str,
input: inputs.MarketplaceUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketplaceFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Marketplace:
query_args = self._query_builder(
"mutation",
"updateMarketplace",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_marketplace,
False,
)
return objects.Marketplace(
self.api(headers=headers, token=token, **query_args)["data"][
"updateMarketplace"
],
)
def update_marketplaces_each(
self,
input: inputs.MarketplaceUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MarketplaceConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.MarketplaceConnection]:
query_args = self._query_builder(
"mutation",
"updateMarketplaces",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_marketplaces_each,
True,
)
return self.iterate_edges(
objects.MarketplaceConnection,
query_args,
headers,
token,
"updateMarketplaces",
)
def delete_marketplace(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteMarketplace",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_marketplace,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteMarketplace"
],
)
def create_media_channel(
self,
input: inputs.MediaChannelCreateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MediaChannelFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MediaChannel:
query_args = self._query_builder(
"mutation",
"createMediaChannel",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_media_channel,
False,
)
return objects.MediaChannel(
self.api(headers=headers, token=token, **query_args)["data"][
"createMediaChannel"
],
)
def import_media_channel(
self,
input: inputs.MediaChannelImportInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MediaChannelFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MediaChannel:
query_args = self._query_builder(
"mutation",
"importMediaChannel",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.import_media_channel,
False,
)
return objects.MediaChannel(
self.api(headers=headers, token=token, **query_args)["data"][
"importMediaChannel"
],
)
def update_media_channel(
self,
id: str,
input: inputs.MediaChannelUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MediaChannelFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.MediaChannel:
query_args = self._query_builder(
"mutation",
"updateMediaChannel",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_media_channel,
False,
)
return objects.MediaChannel(
self.api(headers=headers, token=token, **query_args)["data"][
"updateMediaChannel"
],
)
def update_media_channels_each(
self,
input: inputs.MediaChannelUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.MediaChannelConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.MediaChannelConnection]:
query_args = self._query_builder(
"mutation",
"updateMediaChannels",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_media_channels_each,
True,
)
return self.iterate_edges(
objects.MediaChannelConnection,
query_args,
headers,
token,
"updateMediaChannels",
)
def delete_media_channel(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteMediaChannel",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_media_channel,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteMediaChannel"
],
)
def update_notification(
self,
id: str,
input: inputs.NotificationUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.NotificationFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Notification:
query_args = self._query_builder(
"mutation",
"updateNotification",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_notification,
False,
)
return objects.Notification(
self.api(headers=headers, token=token, **query_args)["data"][
"updateNotification"
],
)
def create_organization(
self,
input: inputs.OrganizationInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.OrganizationFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Organization:
query_args = self._query_builder(
"mutation",
"createOrganization",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_organization,
False,
)
return objects.Organization(
self.api(headers=headers, token=token, **query_args)["data"][
"createOrganization"
],
)
def update_organization(
self,
id: str,
input: inputs.OrganizationUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.OrganizationFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Organization:
query_args = self._query_builder(
"mutation",
"updateOrganization",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_organization,
False,
)
return objects.Organization(
self.api(headers=headers, token=token, **query_args)["data"][
"updateOrganization"
],
)
def update_organizations_each(
self,
input: inputs.OrganizationUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.OrganizationConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.OrganizationConnection]:
query_args = self._query_builder(
"mutation",
"updateOrganizations",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_organizations_each,
True,
)
return self.iterate_edges(
objects.OrganizationConnection,
query_args,
headers,
token,
"updateOrganizations",
)
def delete_organization(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteOrganization",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_organization,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteOrganization"
],
)
def create_product(
self,
input: inputs.ProductInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.ProductFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Product:
query_args = self._query_builder(
"mutation",
"createProduct",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_product,
False,
)
return objects.Product(
self.api(headers=headers, token=token, **query_args)["data"][
"createProduct"
],
)
def update_product(
self,
id: str,
input: inputs.ProductUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.ProductFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Product:
query_args = self._query_builder(
"mutation",
"updateProduct",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_product,
False,
)
return objects.Product(
self.api(headers=headers, token=token, **query_args)["data"][
"updateProduct"
],
)
def update_products_each(
self,
input: inputs.ProductUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.ProductConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.ProductConnection]:
query_args = self._query_builder(
"mutation",
"updateProducts",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_products_each,
True,
)
return self.iterate_edges(
objects.ProductConnection, query_args, headers, token, "updateProducts",
)
def delete_product(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteProduct",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_product,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteProduct"
],
)
def request_reset_password(
self,
input: inputs.RequestResetPasswordInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.RequestResultFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.RequestResult:
query_args = self._query_builder(
"mutation",
"requestResetPassword",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.request_reset_password,
False,
)
return objects.RequestResult(
self.api(headers=headers, token=token, **query_args)["data"][
"requestResetPassword"
],
)
def reset_password(
self,
input: inputs.ResetPasswordInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.UserFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.User:
query_args = self._query_builder(
"mutation",
"resetPassword",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.reset_password,
False,
)
return objects.User(
self.api(headers=headers, token=token, **query_args)["data"][
"resetPassword"
],
)
def update_user(
self,
input: inputs.UserUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.UserFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.User:
query_args = self._query_builder(
"mutation",
"updateUser",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.update_user,
False,
)
return objects.User(
self.api(headers=headers, token=token, **query_args)["data"]["updateUser"],
)
def create_vendor(
self,
input: inputs.VendorInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Vendor:
query_args = self._query_builder(
"mutation",
"createVendor",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_vendor,
False,
)
return objects.Vendor(
self.api(headers=headers, token=token, **query_args)["data"][
"createVendor"
],
)
def update_vendor(
self,
id: str,
input: inputs.VendorUpdateInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Vendor:
query_args = self._query_builder(
"mutation",
"updateVendor",
fields,
{"id": id, "input": input,},
_ARGUMENT_LEGENDS.update_vendor,
False,
)
return objects.Vendor(
self.api(headers=headers, token=token, **query_args)["data"][
"updateVendor"
],
)
def update_vendors_each(
self,
input: inputs.VendorUpdateInput,
sort: Union[inputs.SortInput, None, CinnamonUndefined] = CinnamonUndefined,
filter: Union[dict, None, CinnamonUndefined] = CinnamonUndefined,
show_deleted: Union[bool, None, CinnamonUndefined] = CinnamonUndefined,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorConnectionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> Iterable[objects.VendorConnection]:
query_args = self._query_builder(
"mutation",
"updateVendors",
fields,
{
"input": input,
"sort": sort,
"filter": filter,
"show_deleted": show_deleted,
},
_ARGUMENT_LEGENDS.update_vendors_each,
True,
)
return self.iterate_edges(
objects.VendorConnection, query_args, headers, token, "updateVendors",
)
def delete_vendor(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteVendor",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_vendor,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteVendor"
],
)
def create_vendor_token(
self,
input: inputs.VendorTokenInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorTokenFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.VendorToken:
query_args = self._query_builder(
"mutation",
"createVendorToken",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.create_vendor_token,
False,
)
return objects.VendorToken(
self.api(headers=headers, token=token, **query_args)["data"][
"createVendorToken"
],
)
def login_vendor(
self,
input: inputs.LoginVendorInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorTokenFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.VendorToken:
query_args = self._query_builder(
"mutation",
"loginVendor",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.login_vendor,
False,
)
return objects.VendorToken(
self.api(headers=headers, token=token, **query_args)["data"]["loginVendor"],
)
def set_vendor_password(
self,
input: inputs.SetVendorPasswordInput,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.VendorTokenFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.VendorToken:
query_args = self._query_builder(
"mutation",
"setVendorPassword",
fields,
{"input": input,},
_ARGUMENT_LEGENDS.set_vendor_password,
False,
)
return objects.VendorToken(
self.api(headers=headers, token=token, **query_args)["data"][
"setVendorPassword"
],
)
def delete_vendor_token(
self,
id: str,
fields: List[
Union[QueryField, QueryFieldSet, str]
] = fields_module.DeletionFields._sdk_default_fields,
headers: Union[dict, None] = None,
token: Union[str, None] = None,
) -> objects.Deletion:
query_args = self._query_builder(
"mutation",
"deleteVendorToken",
fields,
{"id": id,},
_ARGUMENT_LEGENDS.delete_vendor_token,
False,
)
return objects.Deletion(
self.api(headers=headers, token=token, **query_args)["data"][
"deleteVendorToken"
],
)
__all__ = ["Cinnamon"]
|
from datetime import date, timedelta
from random import choice
from flask import g
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton
import telebot_login
from app import new_functions as nf
from app.constants import (
emoji, all_stations, loading_text, fast_trail_answer_select_day
)
from tg_bot import bot
from tg_bot.keyboards import (
start_station_keyboard, end_station_keyboard, select_day_keyboard,
update_keyboard
)
# Fast trail messages
@bot.message_handler(
func=lambda mess: mess.text.title() == "В Универ",
content_types=["text"]
)
@bot.message_handler(
func=lambda mess: mess.text.title() == "Домой",
content_types=["text"]
)
@telebot_login.login_required_message
def fast_trail_handler(message):
user = g.current_tbot_user
bot.send_chat_action(user.tg_id, "typing")
if message.text.title() == "В Универ":
from_code = user.home_station_code
to_code = user.univer_station_code
else:
from_code = user.univer_station_code
to_code = user.home_station_code
answer, is_tomorrow, is_error = nf.create_suburbans_answer(
from_code=from_code,
to_code=to_code,
for_date=date.today()
)
if not is_error:
if is_tomorrow:
bot.send_message(
chat_id=user.tg_id,
text=emoji["warning"] + " На сегодня нет электричек"
)
inline_keyboard = update_keyboard(for_tomorrow=is_tomorrow)
else:
inline_keyboard = InlineKeyboardMarkup()
bot.send_message(
chat_id=user.tg_id,
text=answer,
reply_markup=inline_keyboard,
parse_mode='HTML',
disable_web_page_preview=True
)
# Trail message
@bot.message_handler(
func=lambda mess: mess.text.title() == "Маршрут",
content_types=["text"]
)
@telebot_login.login_required_message
def own_trail_handler(message):
user = g.current_tbot_user
bot.send_message(
chat_id=user.tg_id,
text="Выбери начальную станцию:",
reply_markup=start_station_keyboard()
)
# personal trails callbacks
@bot.callback_query_handler(
func=lambda call_back: call_back.data == "Домой"
)
@bot.callback_query_handler(
func=lambda call_back: call_back.data == "В Универ"
)
@telebot_login.login_required_callback
def to_home_or_univer_handler(call_back):
user = g.current_tbot_user
if call_back.data == "В Универ":
from_code = user.home_station_code
to_code = user.univer_station_code
else:
from_code = user.univer_station_code
to_code = user.home_station_code
answer = fast_trail_answer_select_day.format(
from_title=nf.get_key_by_value(all_stations, from_code),
to_title=nf.get_key_by_value(all_stations, to_code)
)
bot.edit_message_text(
text=answer,
chat_id=user.tg_id,
message_id=call_back.message.message_id,
reply_markup=select_day_keyboard(),
parse_mode="HTML"
)
# From station callback
@bot.callback_query_handler(
func=lambda call_back: call_back.message.text == "Выбери начальную станцию:"
)
@telebot_login.login_required_callback
def start_station_handler(call_back):
user = g.current_tbot_user
answer = "Начальная: <b>{0}</b>\nВыбери конечную станцию:".format(
call_back.data
)
bot.edit_message_text(
text=answer,
chat_id=user.tg_id,
message_id=call_back.message.message_id,
reply_markup=end_station_keyboard(call_back.data),
parse_mode="HTML"
)
# Change start station callback
@bot.callback_query_handler(
func=lambda call_back: call_back.data == "Изменить начальную"
)
@telebot_login.login_required_callback
def change_start_station_handler(call_back):
user = g.current_tbot_user
answer = "Выбери начальную станцию:"
bot.edit_message_text(
text=answer,
chat_id=user.tg_id,
message_id=call_back.message.message_id,
reply_markup=start_station_keyboard()
)
# To station callback
@bot.callback_query_handler(
func=lambda call_back: "Выбери конечную станцию:" in call_back.message.text
)
@telebot_login.login_required_callback
def end_station_handler(call_back):
user = g.current_tbot_user
answer = nf.add_end_station(call_back.message.text, call_back.data)
inline_keyboard = select_day_keyboard()
inline_keyboard.row(
*[InlineKeyboardButton(text=name, callback_data=name)
for name in ["Изменить конечную"]]
)
bot.edit_message_text(
text=answer,
chat_id=user.tg_id,
message_id=call_back.message.message_id,
reply_markup=inline_keyboard,
parse_mode="HTML"
)
# Change end station callback
@bot.callback_query_handler(
func=lambda call_back: call_back.data == "Изменить конечную"
)
@telebot_login.login_required_callback
def change_end_station_handler(call_back):
user = g.current_tbot_user
start_station = nf.get_station_title_from_text(call_back.message.text)
answer = "Начальная: <b>{0}</b>\nВыбери конечную станцию:".format(
start_station
)
bot.edit_message_text(
text=answer,
chat_id=user.tg_id,
message_id=call_back.message.message_id,
reply_markup=end_station_keyboard(start_station)
)
# Day callback
@bot.callback_query_handler(
func=lambda call_back: "Выбери день:" in call_back.message.text
)
@telebot_login.login_required_callback
def build_trail_handler(call_back):
user = g.current_tbot_user
bot_msg = bot.edit_message_text(
text="{0}\U00002026".format(choice(loading_text["ya_timetable"])),
chat_id=user.tg_id,
message_id=call_back.message.message_id
)
answer, is_tomorrow, is_error = nf.create_suburbans_answer(
from_code=nf.get_station_code_from_text(
call_back.message.text
),
to_code=nf.get_station_code_from_text(
call_back.message.text, is_end=True
),
for_date=date.today() + timedelta(
days=(1 if call_back.data == "Завтра" else 0)
),
limit=7 if call_back.data == "Завтра" else 3
)
if not is_error:
if call_back.data == "Завтра" or is_tomorrow:
if is_tomorrow:
inline_answer = emoji["warning"] + " На сегодня нет электричек"
bot.answer_callback_query(
call_back.id, inline_answer, show_alert=True
)
inline_keyboard = update_keyboard(for_tomorrow=True)
else:
inline_keyboard = update_keyboard()
else:
inline_keyboard = InlineKeyboardMarkup()
bot.edit_message_text(
text=answer,
chat_id=user.tg_id,
message_id=bot_msg.message_id,
reply_markup=inline_keyboard,
parse_mode="HTML"
)
|
from django.test import TestCase
from django.contrib.auth.models import User
from users.models import User_Profile
class AnimalTestCase(TestCase):
def setUp(self):
user = User.objects.create(username='testcase',
first_name='test',
last_name='case',
email='testcase@ualberta.ca')
User_Profile.objects.create(
displayName='case_1',
profileImage='test_image.jpg',
github='https://github.com/orgs/2021fallCMPUT404/dashboard',
bio='test_bio')
def test_users_model_displayName(self):
"""Animals that can speak are correctly identified"""
test_user_1 = User.objects.get(name="testcase")
test_profile_1 = User_Profile.objects.get(user=test_user_1)
self.assertEqual(test_profile_1.__str__(), 'case_1')
|
import cudamat as cm
import ctc_fast as ctc
import numpy as np
import pdb
class NNet:
def __init__(self,inputDim,outputDim,layerSize,numLayers,maxBatch,
train=True,temporalLayer=-1):
# Initialize cublas
cm.cublas_init()
self.outputDim = outputDim
self.inputDim = inputDim
self.layerSize = layerSize
self.numLayers = numLayers
self.layerSizes = [layerSize]*numLayers
self.maxBatch = maxBatch
self.train = train
if not self.train:
np.seterr(all='ignore')
if temporalLayer <= 0 or temporalLayer >= numLayers:
self.temporalLayer = -1
else:
self.temporalLayer = temporalLayer
self.maxAct = 20.0
def initParams(self):
"""
Initialize parameters using 6/sqrt(fanin+fanout)
"""
sizes = [self.inputDim]+self.layerSizes+[self.outputDim]
scales = [np.sqrt(6)/np.sqrt(n+m) for n,m in zip(sizes[:-1],sizes[1:])]
self.stack = [[np.random.rand(m,n)*2*s-s,np.zeros((m,1))] \
for n,m,s in zip(sizes[:-1],sizes[1:],scales)]
self.hActs_M = [cm.empty((s,self.maxBatch)) for s in sizes]
if self.train:
# Now assuming that all layers are the same size
self.grad = [[cm.empty(w.shape),cm.empty(b.shape)] for w,b in self.stack]
self.deltasC_M = cm.empty((self.outputDim,self.maxBatch))
self.deltasOut_M = cm.empty((sizes[1],self.maxBatch))
self.deltasIn_M = cm.empty((sizes[1],self.maxBatch))
self.tmpGrad_M = cm.empty((self.layerSize,self.maxBatch))
# Allocate memory once here and reuse
# Store probs
self.probs_M = cm.empty((self.outputDim,self.maxBatch))
# Store col max
self.rowVec_M = cm.empty((1,self.maxBatch))
self.stack = [[cm.CUDAMatrix(w),cm.CUDAMatrix(b)]
for w,b in self.stack]
if self.temporalLayer > 0:
# dummy bias used for temporal layer
dummy = cm.empty((1,1))
dummy.assign(0.0)
scale = np.sqrt(6)/np.sqrt(self.layerSize*2)
wt = 2*scale*np.random.rand(self.layerSize,self.layerSize)-scale
wt = cm.CUDAMatrix(wt)
self.stack.append([wt,dummy])
if self.train:
dwt = cm.empty((self.layerSize,self.layerSize))
self.grad.append([dwt,dummy])
self.deltaTemp_M = cm.empty((self.layerSize,self.maxBatch))
def setViews(self,batchSize):
"""
Sets view of gpu memory to be correct size for utterance.
"""
assert batchSize <= self.maxBatch, "Batch size exceeds max batch"
self.hActs = [H.get_col_slice(0,batchSize) for H in self.hActs_M]
self.probs = self.probs_M.get_col_slice(0,batchSize)
self.rowVec = self.rowVec_M.get_col_slice(0,batchSize)
if self.train:
self.deltasC = self.deltasC_M.get_col_slice(0,batchSize)
self.deltasOut = self.deltasOut_M.get_col_slice(0,batchSize)
self.deltasIn = self.deltasIn_M.get_col_slice(0,batchSize)
self.tmpGrad = self.tmpGrad_M.get_col_slice(0,batchSize)
if self.temporalLayer > 0:
self.deltaTemp = self.deltaTemp_M.get_col_slice(0,batchSize)
def costAndGrad(self,data,labels=None):
T = data.shape[1]
self.setViews(T)
if self.temporalLayer > 0:
stack = self.stack[:-1]
wt,_ = self.stack[-1]
if self.train:
grad = self.grad[:-1]
dwt,_ = self.grad[-1]
else:
stack = self.stack
if self.train:
grad = self.grad
# forward prop
self.hActs[0].assign(cm.CUDAMatrix(data))
i = 1
for w,b in stack:
cm.dot(w,self.hActs[i-1],self.hActs[i])
self.hActs[i].add_col_vec(b)
# forward prop through time
if i == self.temporalLayer:
for t in xrange(1,T):
self.hActs[i].minmax(0.0,self.maxAct,col=t-1)
cm.mvdot_col_slice(wt,self.hActs[i],t-1,self.hActs[i],t,beta=1.0)
self.hActs[i].minmax(0.0,self.maxAct,col=T-1)
if i <= self.numLayers and i != self.temporalLayer:
# hard relu
self.hActs[i].maximum(0.0)
i += 1
# Subtract max activation
self.hActs[-1].max(axis=0,target=self.rowVec)
self.hActs[-1].add_row_mult(self.rowVec,-1.0,target=self.probs)
# Softmax
cm.exp(self.probs)
self.probs.sum(axis=0,target=self.rowVec)
cm.pow(self.rowVec,-1.0,target=self.rowVec)
self.probs.mult_by_row(self.rowVec)
self.probs.copy_to_host()
if not self.train:
return ctc.decode_best_path(self.probs.numpy_array.astype(np.float64))
cost, deltas, skip = ctc.ctc_loss(self.probs.numpy_array.astype(np.float64),
labels,blank=0)
if skip:
return cost,self.grad,skip
self.deltasC.assign(cm.CUDAMatrix(deltas))
# back prop
i = self.numLayers
deltasIn,deltasOut = self.deltasC,self.deltasOut
for w,b in reversed(stack):
# compute gradient
cm.dot(deltasIn,self.hActs[i].T,target=grad[i][0])
deltasIn.sum(axis=1,target=grad[i][1])
# compute next layer deltas
if i > 0:
cm.dot(w.T,deltasIn,target=deltasOut)
# backprop through time
if i == self.temporalLayer:
self.hActs[i].within(0.0,self.maxAct,target=self.tmpGrad)
self.deltaTemp.assign(0.0)
for t in xrange(T-1,0,-1):
# Add in temporal delta
cm.mvdot_col_slice(wt.T,self.deltaTemp,t,deltasOut,t,beta=1.0)
# Push through activation fn
deltasOut.mult_slice(t,self.tmpGrad,t)
self.deltaTemp.set_single_col(t-1,deltasOut,t)
# Accumulate temporal gradient
cm.dot(self.deltaTemp,self.hActs[i].T,
target=dwt)
cm.mvdot_col_slice(wt.T,self.deltaTemp,0,deltasOut,0,beta=1.0)
deltasOut.mult_slice(0,self.tmpGrad,0)
if i > 0 and i != self.temporalLayer:
self.hActs[i].sign(target=self.tmpGrad)
deltasOut.mult(self.tmpGrad)
if i == self.numLayers:
deltasIn = self.deltasIn
deltasIn,deltasOut = deltasOut,deltasIn
i -= 1
return cost,self.grad,skip
def updateParams(self,scale,update):
for params,paramsDel in zip(self.stack,update):
w,b = params
dw,db = paramsDel
w.add_mult(dw, alpha=scale)
b.add_mult(db, alpha=scale)
def toFile(self,fid):
"""
Saves only the network parameters to the given fd.
"""
import cPickle as pickle
stack = []
for w,b in self.stack:
w.copy_to_host()
b.copy_to_host()
stack.append([w.numpy_array,b.numpy_array])
pickle.dump(stack,fid)
def fromFile(self,fid):
import cPickle as pickle
stack = pickle.load(fid)
for (w,b),(wi,bi) in zip(self.stack,stack):
w.copy_to_host()
b.copy_to_host()
w.numpy_array[:] = wi[:]
b.numpy_array[:] = bi[:]
w.copy_to_device()
b.copy_to_device()
def check_grad(self,data,labels,epsilon=1e-3):
cost,grad,_ = self.costAndGrad(data,labels)
# TODO randomize grad check selection
for param,delta in zip(self.stack,grad):
w,b = param
dw,db = delta
dw.copy_to_host()
w.copy_to_host()
for i in xrange(w.shape[0]):
for j in xrange(w.shape[1]):
w.numpy_array[i,j] += epsilon
w.copy_to_device()
costP,_,_ = self.costAndGrad(data,labels)
numGrad = (costP - cost) / epsilon
w.numpy_array[i,j] -= epsilon
print "Analytic %f, Numeric %f"%(dw.numpy_array[i,j],numGrad)
if __name__=='__main__':
import dataLoader as dl
np.random.seed(33)
layerSize = 40
numLayers = 3
dataDir = "/scail/group/deeplearning/speech/awni/kaldi-stanford/kaldi-trunk/egs/swbd/s5b/exp/train_ctc/"
inputDim = 41*15
rawDim = 41*15
outputDim = 35
maxUttLen = 1500
loader = dl.DataLoader(dataDir,rawDim,inputDim)
data_dict,alis,keys,_ = loader.loadDataFileDict(1)
data,labels = data_dict[keys[3]],np.array(alis[keys[3]],dtype=np.int32)
rnn = NNet(inputDim,outputDim,layerSize,numLayers,maxUttLen,temporalLayer=2)
rnn.initParams()
cost,grad,_ = rnn.costAndGrad(data,labels)
print "COST %.9f"%cost
rnn.check_grad(data,labels)
|
from models.ae import AutoEncoder
from models.disentangle_vae import DisentangleVAE
from models.parallel_ae import ParallelAE
from models.syntax_guide_vae import SyntaxGuideVAE
from models.syntax_vae import SyntaxVAE
from models.vanilla_vae import VanillaVAE
MODEL_CLS = {
'AutoEncoder': AutoEncoder,
'VanillaVAE': VanillaVAE,
'SyntaxGuideVAE': SyntaxGuideVAE,
'DisentangleVAE': DisentangleVAE,
'SyntaxVAE': SyntaxVAE,
'ParallelAE': ParallelAE,
}
def init_create_model(model: str, **kwargs):
if model not in MODEL_CLS:
raise ValueError(
"Invalid model class \'{}\' provided. Only {} are supported now.".format(
model, list(MODEL_CLS.keys())))
return MODEL_CLS[model](**kwargs)
def load_static_model(model: str, model_path: str):
if model not in MODEL_CLS:
raise ValueError(
"Invalid model class \'{}\' provided. Only {} are supported now.".format(
model, list(MODEL_CLS.keys())))
return MODEL_CLS[model].load(model_path)
|
import os
import common
import constant
import creds
from . import common as ami_common
DEFAULT = {
'PACKER_SSH_AGENT_AUTH': 'false',
'PACKER_SSH_INTERFACE': 'public_ip',
'PACKER_ASSOCIATE_PUBLIC_IP': 'true',
'PACKER_IAM_GROUPS': ami_common.DEFAULT['IAM_GROUPS'],
}
def main(hmrc_role, environment, args, verbose_logging=False):
env = {
'PACKER_LOG': str(1 if verbose_logging else 0),
'PACKER_SSH_USER': (
os.environ.get('PACKER_SSH_USER') or constant.PACKER_SSH_USER
)
}
env_name = (
'PACKER_SSH_AGENT_AUTH',
'PACKER_SSH_INTERFACE',
'PACKER_ASSOCIATE_PUBLIC_IP',
'PACKER_IAM_GROUPS',
)
for name in env_name:
env[name] = os.environ.get(name, DEFAULT[name])
env.update(creds.aws.assume_role(hmrc_role, environment, as_env=True))
env.update(
ami_common.network(creds.aws.assume_role(hmrc_role, environment))
)
cmd = ['packer'] + args[1:]
common.subshell(cmd, env=env, cwd=f"{ami_common.PACKER_ROOT}/{args[0]}")
def main_log(*args, **kwargs):
return main(*args, **kwargs, verbose_logging=True)
|
import app.frequent_rules as frq
from model.dataframe import DataFrameWrapper
def kamiran(input_file, output_file, min_sup, min_conf, alpha, attr, d_attr, d_attr_dep, index, label, rank):
db = DataFrameWrapper(input_file)
rules = frq.get_frequent_rules(db, attr, min_sup, min_conf)
fr = frq.get_frequent_classification_rules(rules.copy(), label)
br = frq.get_frequent_background_rules(rules.copy(), d_attr)
rr = frq.get_redlining_rules(db, fr, br, d_attr, alpha, label)
mr = frq.get_discriminatory_rules(db, fr, d_attr, alpha)
db = massage(db, d_attr_dep, label, rank, index)
rules_new = frq.get_frequent_rules(db, attr, min_sup, min_conf)
fr_new = frq.get_frequent_classification_rules(rules_new.copy(), label)
br_new = frq.get_frequent_background_rules(rules_new.copy(), d_attr)
rr_new = frq.get_redlining_rules(db, fr_new, br_new, d_attr, alpha, label)
mr_new = frq.get_discriminatory_rules(db, fr_new, d_attr, alpha)
output = db.calc_discrimination(d_attr_dep, label, "AFTER (KAMIRAN)")
output += frq.measure_utility(fr, rr, mr, fr_new, rr_new, mr_new, label)
db.export(output_file)
return output
def massage(db, d_attr, label, rank, index_column):
for d_att in d_attr:
s = d_att[0]
expl_attr = d_att[1]
values = [False] * len(d_att[1])
while values is not None:
evidence = list(zip(expl_attr, values))
values = _next_value(values)
grouped = db.df()
for entry in evidence:
grouped = grouped.loc[grouped[entry[0]] == entry[1]]
for s_val in (True, False):
grouped_s = grouped.loc[grouped[s] == s_val]
delta_s = delta(db, (s, s_val), evidence, label)
grouped_del = grouped_s.loc[grouped_s[label] == s_val].sort_values(rank, ascending=s_val)
grouped_dup = grouped_s.loc[grouped_s[label] == (not s_val)].sort_values(rank, ascending=(not s_val))
delta_pos = delta_s
delta_neg = delta_s
for row in grouped_del.iterrows():
if delta_pos == 0:
break
db.delete(row[1][index_column])
delta_pos -= 1
for row in grouped_dup.iterrows():
if delta_neg == 0:
break
db.duplicate(row[1][index_column])
delta_neg -= 1
return db
def delta(db, s, e, label):
p_star = 0.5*(db.prob([(label, True)], e + [(s[0], True)])[1] + db.prob([(label, True)], e + [(s[0], False)])[1])
return int(db.prob([s], e)[0] * abs(db.prob([(label, True)], [s]+e)[1] - p_star))
def _next_value(array):
for i in range(len(array)):
if not array[-i - 1]:
array[-i - 1] = True
return array
else:
array[-i - 1] = False
return None
|
from torch import nn
from catalyst.core.callbacks import CriterionCallback
from catalyst.core import State
from src.models.prediction_heads import HeadOutputKeys
from src.label_encodings import CtcLabelEncoding, SequenceLabelEncoding
class CtcCriterionCallback(CriterionCallback):
"""
Callback for CTC Loss calculation on specified head
"""
def __init__(self, head_key, multiplier=1.0, loss_suffix="loss"):
# Binding between state keys and torch.nn.CTCLoss parameters
# Groundtruth + groundtruth length
targets = CtcLabelEncoding.LABELS_KEY
target_lengths = CtcLabelEncoding.LABEL_LENGTHS_KEY
# Prediction + prediction length
log_probs = "{}.{}".format(head_key, HeadOutputKeys.LOG_PROBS)
input_lengths = "{}.{}".format(head_key, HeadOutputKeys.LOG_PROBS_LEN)
input_key = {
targets: "targets",
target_lengths: "target_lengths"
}
output_key = {
log_probs: "log_probs",
input_lengths: "input_lengths"
}
loss_key = "{}.{}".format(head_key, loss_suffix)
super().__init__(
input_key=input_key,
output_key=output_key,
prefix=loss_key,
multiplier=multiplier
)
self._criterion = nn.CTCLoss(blank=CtcLabelEncoding.BLANK_TOKEN, zero_infinity=True)
def on_stage_start(self, state: State):
# Override basic behaviour of CriterionCallback
pass
class CrossEntropyCriterionCallback(CriterionCallback):
"""
Callback for crossentropy loss calculation on specified head
"""
def __init__(self, head_key, multiplier=1.0, loss_suffix="loss"):
# Binding between state keys and ctc loss parameters
logits = "{}.{}".format(head_key, HeadOutputKeys.LOGITS)
targets = SequenceLabelEncoding.LABELS_KEY
loss_key = "{}.{}".format(head_key, loss_suffix)
super().__init__(
input_key=targets,
output_key=logits,
prefix=loss_key,
multiplier=multiplier
)
self._criterion = nn.CrossEntropyLoss(ignore_index=SequenceLabelEncoding.PAD_TOKEN)
def on_stage_start(self, state: State):
# Override basic behaviour of CriterionCallback
pass
|
from django.apps import AppConfig
class TimerecorderConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'timerecorder'
|
import os
import sys
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='galstreams',
version='1.0.0',
author='C. Mateu',
author_email='cmateu@fisica.edu.uy',
packages=['bovy_coords','galstreams','gcutils','galstreams/lib'],
package_data={'galstreams/lib':['*.dat',
'*.log',
'streams_lib_notes.ipynb',
'globular_cluster_params.harris2010.tableI.csv']},
# scripts=['bin/'],
url='https://github.com/cmateu/galstreams',
license='LICENSE',
description='MW stream library toolkit',
long_description=open('README.md').read(),
install_requires=[
"numpy",
"scipy",
"astropy",
"astro-gala"
],
)
|
from django import forms
from django.db import models
from django.db.models import fields
from .models import CreatorProfile, LearnerProfile, Courses, Modules, Classroom, Reviews, FollowList, ClassroomModules
class CreatorRegisterForm(forms.ModelForm):
class Meta:
model = CreatorProfile
fields = [
'Name',
'Email',
'Date_Of_Birth',
'City',
'State',
'Educational_Qualification'
]
class LearnerRegisterForm(forms.ModelForm):
class Meta:
model = LearnerProfile
fields = [
'Name',
'Email',
'Date_Of_Birth',
'City',
'State'
]
class CourseCreationForm(forms.ModelForm):
class Meta:
model = Courses
fields = [
'Course_Name',
'Course_Desc',
'Course_Tag'
]
class ModuleCreationForm(forms.ModelForm):
class Meta:
model = Modules
fields = [
'Title',
'Content',
'link'
]
class RateAndReviewForm(forms.Form):
ratechoices = [(0,'0'), (1,'1'), (2,'2'), (3,'3'), (4,'4'), (5,'5')]
RateCreator = forms.CharField(label = 'Rate the instructor out of 5 (On the Basis of the ease with you understood the concepts) : ', widget = forms.Select(choices = ratechoices))
Rate = forms.CharField(label = 'Rate the course out of 5 (On the Basis of the structure and Quality of Content Presented): ', widget = forms.Select(choices = ratechoices))
Review = forms.CharField(widget = forms.Textarea)
class SearchByTag(forms.Form):
tag = forms.CharField(max_length = 200) |
blockchain_address = '172.26.71.249'
port = ':5000'
CA_addresses = ["18.191.21.38"]
chain_file = "data/chain"
transactions_file = "data/transactions"
mine_time = 10 |
import unittest
import my_lib
import random
import statistics
class TestLib(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\n Running class setUp...")
@classmethod
def tearDownClass(cls):
print("\n Running class tearDown...")
def setUp(self):
print("\nRunning setUp...")
def tearDown(self):
print("Running tearDown...")
def test_list_avg_null(self):
res = my_lib.list_avg(None)
self.assertEqual(res, None)
self.assertIsNone(res)
def test_list_avg_empty(self):
res = my_lib.list_avg([])
self.assertIsNone(res)
def test_mylib_load(self):
res = my_lib.load_sample()
self.assertIsNone(res)
def test_list_avg_const(self):
print("Running test_list_avg_const...")
res = my_lib.list_avg([5, 5, 5, 5, 5, 5])
self.assertEqual(res, 5)
res = my_lib.list_avg([-10, -10, -10])
self.assertEqual(res, -10)
res = my_lib.list_avg([23])
self.assertEqual(res, 23)
def test_list_avg_floats(self):
for _ in range(10):
vals =[]
size = random.randint(1, 100)
for _ in range(size):
val = random.uniform(-200, 1000)
vals.append(val)
res = my_lib.list_avg(vals)
exp = statistics.mean(vals)
self.assertAlmostEqual(res, exp, places=10)
def test_list_avg_nonlist(self):
self.assertRaises(TypeError, my_lib.list_avg, {'a': 1, 'b': 23.0})
# dictionary input
self.assertRaises(TypeError,my_lib.list_avg,{'a': 1, 'b': 2, 'c': 3})
# set input
self.assertRaises(TypeError,my_lib.list_avg,{4.4, 55, -0.4, 18})
class OtherTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\nOther class setup...")
@classmethod
def tearDownClass(cls):
print("\nOther class tear down...")
def test_other_func_or_lib(self):
print("Running our test for other stuff...")
if __name__ == "__main__":
unittest.main() |
from django.contrib import admin
from .models import CertContent
from .models import ProductContent
from .models import UsesContent
from .models import ProductButton
from home.admin import ContentAdmin
# Register your models here.
admin.site.register(CertContent,ContentAdmin)
admin.site.register(ProductContent,ContentAdmin)
admin.site.register(UsesContent,ContentAdmin)
admin.site.register(ProductButton,ContentAdmin)
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import json
import socket
from airflow.hooks.http_hook import HttpHook
from airflow.exceptions import AirflowException
class SymphonyWebHook(HttpHook):
"""
This hooks allow you to post message to Symphony using webhooks.
Takes both the Symphony webhook or a conn id. If the http conn id is provided then
the hook will use the webhook token provided in the extra.
You can pass the token of the chat room directly either.
:param http_conn_id: connection that has the Symphony webhook on its extra field
:type hhtp_conn_id: str
:param webhook_token: Symphony webhook token
:type webhook_token: str
:param symph_type: default to 'com.symphony.integration.test.event'
:type symph_type: str
:param symph_version: default to '2.0'
:type symh_version: str
"""
def __init__(self,
http_conn_id,
webhook_token=None,
symph_type='com.symphony.integration.test.event',
symph_version='2.0',
*args, **kwargs
):
super(SymphonyWebHook, self).__init__(method='POST',
http_conn_id=http_conn_id,
*args,
**kwargs)
self.webhook_token = self._get_token(http_conn_id, webhook_token)
self.symph_type = symph_type
self.symph_version = symph_version
def _get_token(self, http_conn_id, token=None):
"""
Return either the complete route to the chat room if provided or
search for the webhook token stored in the extra of the connection.
:param http_conn_id: connection that has the Symphony webhook on its extra field
:type http_conn_id: str
:param token: token to the Symphony chat room
:type token: str
"""
if token:
return token
elif http_conn_id:
conn = self.get_connection(http_conn_id)
extra = conn.extra_dejson
try:
return extra['webhook_token']
except KeyError:
raise AirflowException('You indicated to use the webhook in the '
'connection extras but passed none')
else:
raise AirflowException('Cannot get token: no Symphony '
'webhook token nor conn_id provided')
@property
def _msg_pattern(self):
return """<messageML>
<span>${entity['Message'].message.body}</span>
</messageML>"""
def _build_task_message(self,
emoji,
task_status,
task_id,
dag_id,
execution_date,
log_url):
"""
Build a standard log message for a task instance
:param emoji: emoji to display
:type emoji: str
:param task_status: the status of the task
:type task_status: str
:param task_id: id of the task instance
:type task_id: str
:param dag_id: id of the dag
:type dag_id: str
:param execution_date: date of task instance execution
:type execution_date: str
:param log_url: url to access the task instance log
:type log_url: str
:return: json
"""
msg_body = """<emoji shortcode="{}"></emoji>
<b>Task Status: </b>{}<br/>
<b>Task Id: </b>{}<br/>
<b>Dag Id: </b>{}<br/>
<b>Execution Date: </b>{}<br/>
<b>Log Url: </b><a href="{}"/>
<br/>
""".format(emoji, task_status, task_id, dag_id, execution_date, log_url)
payload = {'Message':
{'type': self.symph_type,
'version': self.symph_version,
'message': {'type': "com.symphony.integration.test.message",
'version': self.symph_version,
'header': 'Airflow',
'body': msg_body}
}
}
return json.dumps(payload)
def _build_symphony_message(self,
msg,
header=None,
emoji=None,
username=None,
date=False,
time=False,
hostname=None,
date_fmt='%Y-%m-%d %H:%M:%S.%f'):
"""
Send a simple symphony message
:param msg: message to be sent
:type msg: str
:param header: header to be added to the message
:type header: str
:param emoji: emoji to be added to the message
:type emoji: str
:param username: name that will be used to identify who sent the
message
:type username: str
:param date: if True, will add the current date in the message
:type date: bool
:param time: if True, will add the current date and time in the
message
:type time: bool
:param hostname: if True, will add the name of the machine sending the
message
:type hostname: bool
:param date_fmt: used to format date and time information
"""
msg_body = msg
if sender:
msg_body = f'{username}: {msg_body}'
if emoji:
msg_body = f"<emoji shortcode='{emoji}'></emoji> " + msg_body
if user:
msg_body += f' - by {user}'
if all((date, time)):
msg_body += f' @ {datetime.now().strftime(date_fmt)}'
elif date and not time:
msg_body += f' @ {datetime.now().date().strftime(date_fmt)}'
if hostname:
msg_body += ' on {}'.format(socket.gethostname())
payload = {'Message':
{'type': self.symph_type,
'version': self.symph_version,
'message': {'type': "com.symphony.integration.test.message",
'version': self.symph_version,
'header': header,
'body': msg_body}
}
}
return json.dumps(payload)
def send_simple(self,
msg,
header=None,
emoji=None,
username=None,
user=None,
date=True,
time=True,
hostname=None,
**kwargs):
"""
Send a simple message to the chat room
:param msg: message content, simple text
:type msg: str
:param header: if passed will add a header to the message
:type header: str
:param username: if passed, will add a username to the message
sent
:type username: str
:param user:
:type user:
:param date:
:type date:
:param time:
:type time:
:param hostname:
:type hostname:
"""
symph_msg = self._build_symphony_message(msg,
header=header,
emoji=emoji,
username=sender,
date=user,
time=date,
hostname=hostname,
**kwargs)
self.run(endpoint=self.webhook_token,
files=dict(data=symph_msg,
message=self._msg_pattern))
def send_task_log(self,
emoji,
task_status,
task_id,
dag_id,
execution_date,
log_url):
"""
Send a task log pre-formatted message to the chat room
:param emoji: emoji to display
:type emoji: str
:param task_status: the status of the task
:type task_status: str
:param task_id: id of the airflow task
:type task_id: str
:param dag_id: id of the airflow dag
:type dag_id: str
:param execution_date: date of task instance execution
:type execution_date: str
:param log_url: url to access the task instance log
:type log_url: str
"""
task_msg = self._build_task_message(emoji,
task_status,
task_id,
dag_id,
execution_date,
log_url)
self.run(endpoint=self.webhook_token,
files=dict(data=task_msg, message=self._msg_pattern))
|
#!/usr/bin/env python3
import pytest
from pathlib import Path
from bin import telescope_status
import signal
class TestTelStatus():
@staticmethod
def handler(signum, frame):
print('Exiting call')
raise TimeoutError('The function reached timeout without other errors')
# This is problematic, it needs to be switched to a multiprocessing style
# def test_print(self):
# signal.signal(signal.SIGALRM, self.handler)
# signal.alarm(10)
# telescope_status.main()
if __name__ == '__main__':
pytest.main()
|
import rpyc
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestercore import core_utils
from .ssh_tunnel_pool import sshTunnelPool
# logger
_logger = core_utils.setup_logger('rpc_herder')
# RPC herder
class RpcHerder(PluginBase):
# constructor
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
try:
sshUserName = self.sshUserName
except Exception:
sshUserName = None
try:
sshPassword = self.sshPassword
except Exception:
sshPassword = None
try:
privateKey = self.privateKey
except Exception:
privateKey = None
try:
passPhrase = self.passPhrase
except Exception:
passPhrase = None
try:
jumpHost = self.jumpHost
except Exception:
jumpHost = None
try:
jumpPort = self.jumpPort
except Exception:
jumpPort = 22
try:
remotePort = self.remotePort
except Exception:
remotePort = 22
sshTunnelPool.make_tunnel_server(self.remoteHost, remotePort, self.remoteBindPort, self.numTunnels,
ssh_username=sshUserName, ssh_password=sshPassword,
private_key=privateKey, pass_phrase=passPhrase,
jump_host=jumpHost, jump_port=jumpPort)
tunnelHost, tunnelPort, tunnelCore = sshTunnelPool.get_tunnel(self.remoteHost, remotePort)
self.conn = rpyc.connect(tunnelHost, tunnelPort, config={"allow_all_attrs": True,
"allow_setattr": True,
"allow_delattr": True}
)
# submit workers
def submit_workers(self, workspec_list):
tmpLog = core_utils.make_logger(_logger, method_name='submit_workers')
try:
ret = self.conn.root.submit_workers(self.original_config, workspec_list)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# check workers
def check_workers(self, workspec_list):
tmpLog = core_utils.make_logger(_logger, method_name='check_workers')
try:
ret = self.conn.root.check_workers(self.original_config, workspec_list)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# setup access points
def setup_access_points(self, workspec_list):
tmpLog = core_utils.make_logger(_logger, method_name='setup_access_points')
try:
ret = self.conn.root.setup_access_points(self.original_config, workspec_list)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# feed jobs
def feed_jobs(self, workspec, jobspec_list):
tmpLog = core_utils.make_logger(_logger, method_name='feed_jobs')
try:
ret = self.conn.root.feed_jobs(self.original_config, workspec, jobspec_list)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# request job
def job_requested(self, workspec):
tmpLog = core_utils.make_logger(_logger, method_name='job_requested')
try:
ret = self.conn.root.job_requested(self.original_config, workspec)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# request kill
def kill_requested(self, workspec):
tmpLog = core_utils.make_logger(_logger, method_name='kill_requested')
try:
ret = self.conn.root.kill_requested(self.original_config, workspec)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# is alive
def is_alive(self, workspec, worker_heartbeat_limit):
tmpLog = core_utils.make_logger(_logger, method_name='is_alive')
try:
ret = self.conn.root.is_alive(self.original_config, workspec, worker_heartbeat_limit)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# get work attributes
def get_work_attributes(self, workspec):
tmpLog = core_utils.make_logger(_logger, method_name='get_work_attributes')
try:
ret = self.conn.root.get_work_attributes(self.original_config, workspec)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# get output files
def get_files_to_stage_out(self, workspec):
tmpLog = core_utils.make_logger(_logger, method_name='get_files_to_stage_out')
try:
ret = self.conn.root.get_files_to_stage_out(self.original_config, workspec)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# get events
def events_to_update(self, workspec):
tmpLog = core_utils.make_logger(_logger, method_name='events_to_update')
try:
ret = self.conn.root.events_to_update(self.original_config, workspec)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# request events
def events_requested(self, workspec):
tmpLog = core_utils.make_logger(_logger, method_name='events_requested')
try:
ret = self.conn.root.events_requested(self.original_config, workspec)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# get PandaIDs
def get_panda_ids(self, workspec):
tmpLog = core_utils.make_logger(_logger, method_name='get_panda_ids')
try:
ret = self.conn.root.get_panda_ids(self.original_config, workspec)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# post processing
def post_processing(self, workspec, jobspec_list, map_type):
tmpLog = core_utils.make_logger(_logger, method_name='post_processing')
try:
ret = self.conn.root.post_processing(self.original_config, workspec, jobspec_list, map_type)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
# send ACK
def acknowledge_events_files(self, workspec):
tmpLog = core_utils.make_logger(_logger, method_name='acknowledge_events_files')
try:
ret = self.conn.root.acknowledge_events_files(self.original_config, workspec)
except Exception:
core_utils.dump_error_message(tmpLog)
ret = None
return ret
|
"""
Implementation of k-nearest-neighbor classifier
"""
from numpy import *
from pylab import *
from binary import *
class KNN(BinaryClassifier):
"""
This class defines a nearest neighbor classifier, that support
_both_ K-nearest neighbors _and_ epsilon ball neighbors.
"""
def __init__(self, opts):
"""
Initialize the classifier. There's actually basically nothing
to do here since nearest neighbors do not really train.
"""
# remember the options
self.opts = opts
# just call reset
self.reset()
def reset(self):
self.trX = zeros((0,0)) # where we will store the training examples
self.trY = zeros((0)) # where we will store the training labels
def online(self):
"""
We're not online
"""
return False
def __repr__(self):
"""
Return a string representation of the tree
"""
return "w=" + repr(self.weights)
def predict(self, X):
"""
X is a vector that we're supposed to make a prediction about.
Our return value should be the 'vote' in favor of a positive
or negative label. In particular, if, in our neighbor set,
there are 5 positive training examples and 2 negative
examples, we return 5-2=3.
Everything should be in terms of _Euclidean distance_, NOT
squared Euclidean distance or anything more exotic.
"""
isKNN = self.opts['isKNN'] # true for KNN, false for epsilon balls
N = self.trX.shape[0] # number of training examples
if self.trY.size == 0:
return 0 # if we haven't trained yet, return 0
elif isKNN:
# this is a K nearest neighbor model
# hint: look at the 'argsort' function in numpy
K = self.opts['K'] # how many NN to use
val = 0 # this is our return value: #pos - #neg of the K nearest neighbors of X
# get the dist, a single row vector
dist = sqrt(((X - self.trX) ** 2).sum(axis=1))
# argsort sorts and returns indices
dist_sorted = argsort(dist)
# take the first K indices, get their Y value and take the mode
val = util.mode(self.trY[dist_sorted[0:K]])
return val
else:
# this is an epsilon ball model
eps = self.opts['eps'] # how big is our epsilon ball
val = 0 # this is our return value: #pos - #neg within and epsilon ball of X
# get the euc distance of X to everything else.
dist = sqrt(((X - self.trX) ** 2).sum(axis=1))
# get the index of all pts that's within the eps ball
withinEpsY = self.trY[dist <= eps]
val = util.mode(withinEpsY)
return val
def getRepresentation(self):
"""
Return the weights
"""
return (self.trX, self.trY)
def train(self, X, Y):
"""
Just store the data.
"""
self.trX = X
self.trY = Y
|
import graphene
from uuid import uuid4
from flask_graphql_auth import mutation_jwt_required, get_jwt_identity, AuthInfoField
from app.models import User, ToDo, Milestone, Type
from app.schema.unions import ResponseUnion
from app.schema.fields import ResponseMessageField
from app.schema.utils import todo_activity_logger
TypeEnum = graphene.Enum.from_enum(Type)
class NewToDoMutation(graphene.Mutation):
class Arguments(object):
token = graphene.String()
title = graphene.String()
type = TypeEnum()
milestones = graphene.List(graphene.String)
expiration = graphene.Date()
result = graphene.Field(ResponseUnion)
@classmethod
@mutation_jwt_required
def mutate(cls, _, info, title, milestones, type, expiration):
user = User.objects(email=get_jwt_identity()).first()
if not user:
return NewToDoMutation(ResponseMessageField(is_success=False, message="User not found"))
type_enum = {1: "INFINITY", 2: "STANDARD", 3: "HARD"}
type = type_enum.get(type, None)
new_todo = ToDo(title=title,
type=str(type),
milestones=[Milestone(id=uuid4().hex, name=m) for m in milestones],
expiration=expiration)
new_todo.save()
user.update_one(inc__point=30)
user.update_one(push__todo=new_todo)
todo_activity_logger(user=user, type="new")
return NewToDoMutation(ResponseMessageField(is_success=True,
message="Todo upload success"))
|
import torch
class MatrixFactorization(torch.nn.Module):
def __init__(self, n_users, n_items, n_factors=20):
super().__init__()
self.user_factors = torch.nn.Embedding(n_users, n_factors, sparse=True)
self.item_factors = torch.nn.Embedding(n_items, n_factors, sparse=True)
def forward(self, user, item):
return (self.user_factors(user) * self.item_factors(item)).sum(1)
class BiasedMatrixFactorization(torch.nn.Module):
def __init__(self, n_users, n_items, n_factors=20):
super().__init__()
self.user_factors = torch.nn.Embedding(n_users, n_factors, sparse=True)
self.item_factors = torch.nn.Embedding(n_items, n_factors, sparse=True)
self.user_biases = torch.nn.Embedding(n_users, 1, sparse=True)
self.item_biases = torch.nn.Embedding(n_items, 1, sparse=True)
def forward(self, user, item):
pred = self.user_biases(user) + self.item_biases(item)
pred += (self.user_factors(user) * self.item_factors(item)).sum(1)
return pred
|
import tkinter as tk
import tkinter.font as tkFont
from tkinter import ttk
from config import Config
import ui.main_menu
class SettingPage(tk.Frame):
def __init__(self, parent, controller):
super().__init__(parent)
self.controller = controller
self.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=20)
self.columnconfigure(0, weight=1)
self.create_title()
self.create_label_frame()
self.create_setting_widgets()
self.create_buttons()
def create_title(self):
font = tkFont.Font(**Config.TITLE_FONT)
title_label = tk.Label(self, text="Settings", font=font)
title_label.grid(row=0, column=0, sticky="nsew")
def create_label_frame(self):
self.label_frame = ttk.LabelFrame(self, padding=10)
self.label_frame.grid(row=1, column=0)
def create_setting_widgets(self):
# set up variables for setting labels and spinboxes
font = tkFont.Font(**Config.SETTING_FONT)
self.player_qty = tk.IntVar()
self.player_qty.set(Config.PLAYER_QTY)
self.winning_points = tk.IntVar()
self.winning_points.set(Config.WINNING_POINTS)
widget_properties = {"Player Quantity": {"increment": 1,
"from_": 2,
"to": 4,
"state": "readonly",
"wrap": True,
"textvariable": self.player_qty,
"width": 5},
"Winning Points": {"increment": 1,
"from_": 20,
"to": 500,
"wrap": True,
"textvariable": self.winning_points,
"width": 5}}
# create widgets
for i, setting in enumerate(widget_properties):
label = tk.Label(self.label_frame, text=setting, font=font)
label.grid(row=i, column=0, sticky="e", padx=20)
spinbox = tk.Spinbox(self.label_frame, **
widget_properties[setting], font=font)
spinbox.grid(row=i, column=1, sticky="w", padx=20)
def create_buttons(self):
button_properties = [{"text": "OK",
"command": self.save_settings},
{"text": "Cancel",
"command": self.undo_changes}]
for i, kwargs in enumerate(button_properties):
button = ttk.Button(self.label_frame, **kwargs, width=30)
button.grid(row=i, column=2)
def undo_changes(self):
"""Cancel all the changes and go back to main screen.
"""
self.player_qty.set(Config.PLAYER_QTY)
self.winning_points.set(Config.WINNING_POINTS)
self.controller.show_frame(ui.main_menu.MainScreen)
def save_settings(self):
"""Save changes and go back to main screen.
"""
Config.PLAYER_QTY = self.player_qty.get()
Config.WINNING_POINTS = self.winning_points.get()
self.controller.update_frames()
self.controller.show_frame(ui.main_menu.MainScreen)
|
commandTable = {
#Add: Get Top, Decrement SP, Get Top, Add, Store
'add' : '@SP\nM=M-1\nA=M\nD=M \n@SP\nM=M-1\nA=M \nD=D+M \n@SP\nA=M\nM=D \n@SP\nM=M+1\n',
'sub' : '@SP\nM=M-1\nA=M\nD=M \n@SP\nM=M-1\nA=M \nD=M-D \n@SP\nA=M\nM=D \n@SP\nM=M+1\n',
'and' : '@SP\nM=M-1\nA=M\nD=M \n@SP\nM=M-1\nA=M \nD=D&M \n@SP\nA=M\nM=D \n@SP\nM=M+1\n',
'or' : '@SP\nM=M-1\nA=M\nD=M \n@SP\nM=M-1\nA=M \nD=D|M \n@SP\nA=M\nM=D \n@SP\nM=M+1\n',
'not' : '@SP\nM=M-1\nA=M \nM=!M\n@SP\nM=M+1\n',
'neg' : '@SP\nM=M-1\nA=M \nM=-M\n@SP\nM=M+1\n',
}
segment_symbol = {
"local": "LCL",
"argument": "ARG",
"this": "THIS",
"that": "THAT",
"static": 16,
"temp": 5
}
def get_base_address(segment, index="0"):
if segment == 'constant':
return index
elif segment == 'pointer':
return segment_symbol['this'] if index == "0" else segment_symbol['that']
elif segment in ['static', 'temp']:
return str(int(segment_symbol[segment]) + int(index))
else:
return segment_symbol[segment]
# Push Abstraction for Stack Arithmetic
def push( segment, value, filename=''):
if segment == 'constant':
set_data = "D=A\n"
elif segment in ['pointer', 'static', 'temp']:
set_data = "D=M\n"
else:
set_data = "D=M\n"
set_data += "@%s\n" % value
set_data += "A=D+A\nD=M\n"
str = "@%s\n" % (get_base_address(segment, value) if segment != 'static' else filename+"."+value)
str += set_data
str += "@SP\nM=M+1\nA=M-1\nM=D\n"
return str
# Pop Abstraction
def pop( segment , value, filename = ''):
error = True
# Constant pop error
if segment == 'constant':
return '',error
elif segment in ['pointer', 'temp' , 'static']:
str = '@SP\nAM=M-1\nD=M\n'
str += "%s\n" % get_base_address(segment,value) if segment != 'static' else filename+"."+value
str += 'M=D\n'
return str , not(error)
else :
str = '@%s\n' % get_base_address(segment)
str += 'D=M\n'
str += '@%s\n' % value
str += 'D=D+A\n@15\nM=D\n@SP\nAM=M-1\nD=M\n@15\nA=M\nM=D\n'
return str , not(error)
|
n = int(input())
uid_list = []
for i in range(1,n+1):
uid = input()
uid_list.append(uid)
alpha = '''abcdefghijklmnopqrstuvwxyz'''
Alpha = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
numeric = '0123456789'
N = 0
A = 0
a = 0
valid = []
rep = 0
for i in uid_list:
for j in i:
if j in Alpha:
A += 1
elif j in numeric:
N += 1
elif j in alpha:
a += 1
if j not in valid:
valid.append(j)
elif j in valid:
rep += 1
if A>=2 and N>=3 and len(i)==10 and a + A + N == 10 and rep==0:
print('Valid')
else:
print('Invalid')
A = 0
N = 0
a = 0
rep = 0
valid.clear() |
#!/usr/bin/python
#-*- coding:utf-8 -*-
import sys
import platform
import argparse
try:
from geopy.geocoders import Nominatim
except ImportError:
sys.exit("\033[31m[!] Error Geopy Not Found !")
def get_address(latlong):
geolocator = Nominatim(user_agent="get_address_by_latlong")
location = geolocator.reverse(latlong)
return location.address
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('latitude',type=str,help='Latitude')
parser.add_argument('longitude',type=str, help='Longitude')
args = parser.parse_args()
string = "%s,%s" % (args.latitude,args.longitude)
addr = get_address(string)
print(addr) |
"""
Typing for All Injector Dependencies
:license: MIT
"""
class Boto3SFN():
'''
Boto3 SFN Type
'''
def __init__(self):
super()
class Boto3SQS():
'''
Boto3 SQS Type
'''
def __init__(self):
super()
class Boto3SNS():
'''
Boto3 SNS Type
'''
def __init__(self):
super()
class PynamoDBCheckIn():
'''
PynamoDB Checkin Type
'''
def __init__(self):
super()
class PynamoDBConsultant():
'''
PynamoDB Consultant Type
'''
def __init__(self):
super()
class PynamoDBOnlineStatuses():
'''
PynamoDB OnlineStatuses Type
'''
def __init__(self):
super()
class PynamoDBCustomers():
'''
PynamoDB Customers Type
'''
def __init__(self):
super()
class Requests():
'''
Requests Type
'''
def __init__(self):
super()
class PynamoDBContract():
'''
PynamoDB Contract Type
'''
def __init__(self):
super()
class PynamoDBWeights():
'''
PynamoDB Weights Type
'''
def __init__(self):
super()
|
test_list = [
############################################
# ONLY UNPROCESSED TICKER AS INPUT SECTION #
############################################
{
'input': {
'u_ticker': 'FUN.N',
'summary': None,
'link': None
},
'output': {
'unprocessed_ticker_list': ['FUN.N'],
'ticker_list': ['FUN'],
'exchange_list': ['N'],
'ticker_normal_list': ['FUN US'],
'found_ticker_with_trit_api': False
}
},
{
'input': {
'u_ticker': 'ULBI.OQ',
'summary': None,
'link': None
},
'output': {
'unprocessed_ticker_list': ['ULBI.OQ'],
'ticker_list': ['ULBI'],
'exchange_list': ['OQ'],
'ticker_normal_list': ['ULBI US'],
'found_ticker_with_trit_api': False
}
},
{
'input': {
'u_ticker': 'TSX-V:CIT',
'summary': None,
'link': None
},
'output': {
'unprocessed_ticker_list': ['TSX-V:CIT'],
'ticker_list': ['CIT'],
'exchange_list': ['TSX-V'],
'ticker_normal_list': ['CIT V'],
'found_ticker_with_trit_api': False
}
},
{
'input': {
'u_ticker': 'IONX',
'summary': None,
'link': None
},
'output': {
'unprocessed_ticker_list': [None],
'ticker_list': [None],
'exchange_list': [None],
'ticker_normal_list': [None],
'found_ticker_with_trit_api': False
}
},
{
'input': {
'u_ticker': 'CNSX:LION.CN',
'summary': None,
'link': None
},
'output': {
'unprocessed_ticker_list': ['CNSX:LION.CN'],
'ticker_list': ['LION.CN'],
'exchange_list': ['CNSX'],
'ticker_normal_list': ['LION.CN X'],
'found_ticker_with_trit_api': False
}
},
#######################################################
# UNPROCESSED TICKER WITH SUMMARY AND/OR LINK SECTION #
#######################################################
{
'input': {
'u_ticker': 'PINS.N',
'summary': 'Two-Way Marketplace Unlocks $10B Market for Diamond Investors Seeking Asset Diversification, Dramatically Lowers Bid/Ask Spread Two-Way Marketplace Unlocks $10B Market for Diamond Investors Seeking Asset Diversification, Dramatically Lowers Bid/Ask Spread',
'link': 'http://www.globenewswire.com/news-release/2020/07/15/2062780/0/en/Icecap-Leverages-Tokenization-to-Launch-First-Global-Investment-Grade-Diamond-Marketplace.html'
},
'output': {
'unprocessed_ticker_list': ['PINS.N'],
'ticker_list': ['PINS'],
'exchange_list': ['N'],
'ticker_normal_list': ['PINS US'],
'found_ticker_with_trit_api': False
}
},
{
'input': {
'u_ticker': 'TSX-V:LLG',
'summary': 'Mason Graphite commente les divulgations aux actionnaires triées sur le volet par le dissident Mason Graphite commente les divulgations aux actionnaires triées sur le volet par le dissident',
'link': None
},
'output': {
'unprocessed_ticker_list': ['TSX-V:LLG'],
'ticker_list': ['LLG'],
'exchange_list': ['TSX-V'],
'ticker_normal_list': ['LLG V'],
'found_ticker_with_trit_api': False
}
},
{
'input': {
'u_ticker': 'Irish:IRSH',
'summary': '',
'link': 'http://www.globenewswire.com/news-release/2020/07/20/2064131/0/en/Notice-of-ARYZTA-Extraordinary-General-Meeting.html'
},
'output': {
'unprocessed_ticker_list': ['Irish:IRSH'],
'ticker_list': ['IRSH'],
'exchange_list': ['Irish'],
'ticker_normal_list': ['IRSH IR'],
'found_ticker_with_trit_api': False
}
},
#########################################################################
# FINDING TICKER INFORMATION WITH TRIT API FROM SUMMARY OR LINK SECTION #
#########################################################################
{
'input': {
'u_ticker': None,
'summary': '''Eye Supplements Market Key Players Studied In this Report are Nature's Bounty Co., Amway Corp., Butterflies Healthcare Ltd, Valeant Pharmaceuticals International Inc., Novartis AG, Bausch & Lomb Incorporated, Akorn Incorporated, Alliance Pharma, Pfizer Inc., Vitabiotics Ltd, and others. Eye Supplements Market Key Players Studied In this Report are Nature's Bounty Co., Amway Corp., Butterflies Healthcare Ltd, Valeant Pharmaceuticals International Inc., Novartis AG, Bausch & Lomb Incorporated, Akorn Incorporated, Alliance Pharma, Pfizer Inc., Vitabiotics Ltd, and others.''',
'link': 'http://www.globenewswire.com/news-release/2021/02/10/2173010/0/en/Eye-Supplements-Market-2021-Size-Share-Growth-Trends-Value-Analysis-Market-Dynamics-Forecast-Report-till-2026.html'
},
'output': {
'unprocessed_ticker_list': ['BHC.TO', 'PFE.N', 'ALAPH.L', 'NOVN.S'],
'ticker_list': ['BHC', 'PFE', 'ALAPH', 'NOVN'],
'exchange_list': ['TO', 'N', 'L', 'S'],
'ticker_normal_list': [None, 'PFE US', 'ALAPH LSE', None],
'found_ticker_with_trit_api': True
}
},
{
'input': {
'u_ticker': None,
'summary': '',
'link': 'http://www.globenewswire.com/news-release/2021/02/16/2175743/0/en/Three-Nexen-Tire-OE-tires-approved-for-new-Audi-A3-family.html'
},
'output': {
'unprocessed_ticker_list': ['AUDVF.PK', '002350.KS', 'VOWG_p.DE'],
'ticker_list': ['AUDVF', '002350', 'VOWG_p'],
'exchange_list': ['PK', 'KS', 'DE'],
'ticker_normal_list': ['AUDVF US', None, None],
'found_ticker_with_trit_api': True
}
},
]
test_text = {
'summaries': [
'GEORGE TOWN, Grand Cayman, May 26, 2020 (GLOBE NEWSWIRE) -- StoneCo Ltd. (Nasdaq: STNE) (“Stone” or the “Company”), a leading provider of financial technology solutions that empower merchants to conduct commerce seamlessly across multiple channels, today reports its financial results for its first quarter ended March 31, 2020.',
'GEO Life Extension and LEO De-Orbiting Drive Revenue, while Active Debris Removal and Space Situational Awareness Expand Market',
'GENEVA, May 06, 2020 (GLOBE NEWSWIRE) -- Etrion Corporation (“Etrion” or the “Company”) (TSX: ETX) (OMX: ETX), a solar independent power producer, will release its first quarter 2020 results before the market opens on Friday, May 8, 2020. ',
'General Motors and Ford are struggling to keep workers on the job as coronavirus cases increase, forcing the companies to cut shifts, hire new workers and transfer others to fill vacant roles.',
'Nestlé is launching what it says is the first pet food to reduce allergens on cat hair, seeking an edge in the booming but increasingly competitive pet-food market.',
'''On behalf of Sbanken ASA, DNB Markets has on 22 July 2020 purchased 17 100 shares for use in Sbanken's share purchase programmes for executive managers and board members.''',
''
],
'outputs': [
['STNE.OQ'],
[None],
['ETX.TO'],
['F.N', 'GM.N'],
['NESN.S'],
['SBANK.OL'],
[None]
]
}
test_urls = {
'urls': [
'http://www.globenewswire.com/news-release/2020/10/27/2115372/0/en/CarMax-is-Hiring-for-More-Than-3-500-Positions-by-End-of-2020.html',
'http://www.globenewswire.com/news-release/2021/01/08/2155451/0/en/Chimerix-Acquires-Oncoceutics-to-Expand-Pipeline-with-Late-Stage-Oncology-Program.html',
'http://www.globenewswire.com/news-release/2021/02/02/2168274/0/en/Verizon-Media-joins-championship-winning-DS-TECHEETAH.html',
'https://www.newswire.com/news/remove-unnecessary-data-from-any-windows-pc-ascomp-releases-cleaning-21175570',
'http://www.businesswire.com/news/home/20210216005634/en/Advance-Auto-Parts-Reports-Fourth-Quarter-and-Full-Year-2020-Results/?feedref=JjAwJuNHiystnCoBq_hl-WepL6sRhX6ZA9uIKkLyMC2Ka1ah7uC6RdZY8DBvigxR7fxFuNFTHSunhvli30RlBNXya2izy9YOgHlBiZQk2LO4aHursdTgjq-KNSSWsKUa97ZTKjNldK1STeKrUjqbvg==',
'http://www.businesswire.com/news/home/20210209005874/en/More-Black-Americans-Report-Permanently-Changing-Their-Spending-and-Saving-Habits-as-a-Direct-Result-of-the-Pandemic/?feedref=JjAwJuNHiystnCoBq_hl-bsjWlVyeNLyq_m2tvaHJJaD1w08bW43U_zsPK9s38B4rCOi9QzgjCezTS3Nw_X6kJUrpSBm-Hav1w-UkdSlG3miu0ZZ-LtXjCwD3Ec3ldN_zZCGORvG0LE20YOvo49uqw==',
],
'bodies': [
'''Richmond, Virginia, Oct. 27, 2020 (GLOBE NEWSWIRE) -- CarMax, the nation’s largest retailer of used cars, announced plans to hire for more than 3,500 positions companywide by the end of the year. At a time when many retailers are hiring for temporary seasonal positions, CarMax is hiring for long-term careers. Candidates can apply now for open positions at the CarMax careers website.
CarMax has more than 25,000 associates nationwide and is hiring for a variety of positions among its customer experience centers, corporate locations, and 220 stores nationwide.
Positions in highest demand include the following:
More than 1,300 Auto Technicians, Detailers, Painters and Inventory Associates: Help the company increase its production of vehicles for retail to customers to support the company’s continued growth. CarMax’s highly trained associates will primarily work on reconditioning vehicles and preparing them for sale. Automotive technicians find value in the company’s award-winning training program, strong opportunities to grow long-term careers, reimbursement programs for ASE certification, and free or discounted tools. Sign-on bonus of up to $2,500 available for some positions. Open positions are available at CarMax stores nationwide.
More than 900 Store Sales and Business Office Associates: Store associates are the face of the company and serve customers in-person throughout their car buying journey. Sales consultants work directly with customers to answer questions and help them find the best vehicle option to fit their needs. Business Office associates guide customers through the administrative process associated with vehicle sales and support the functions of all store departments. Open positions are available among CarMax’s 220 store locations nationwide.
More than 600 Customer Experience Consultants: Support customers over the phone or online with shopping and financing until the customer is ready to pick up their vehicle at an area store or receive the vehicle through home delivery. o Average pay of $22.50 an hour with the opportunity to earn $30+ an hour. Sign-on bonuses of $500 - $5,000 depending on location. Open positions are available at CarMax Customer Experience Centers in Atlanta, Ga., Kansas City, Kan., Raleigh, N.C., Richmond, Va., and Phoenix, Ariz.
More than 100 Digital Technology, Product and Data Science: Leverage technology and agile methodologies to deliver exceptional customer and associate experiences that push the automotive retail industry forward. Whether you’re analyzing big data and driving insights; delivering automated, scalable solutions; designing innovative, new products; or articulating the CarMax brand; your work will ensure CarMax stays at the forefront of our field. Most associates at CarMax’s corporate locations are working from home at least through the end of the year due to the COVID-19 pandemic. Going forward, CarMax anticipates that many corporate positions will offer a hybrid work environment with flexibility to work a combination of onsite and remotely during the work week, as well as an option to work 100% remotely in some roles. Open positions are available at CarMax’s Home Office and Digital Innovation Center in Richmond, VA.
"We're looking for high integrity, customer-focused associates to join our team and help us continue to transform the way people buy and sell cars,” said Diane Cafritz, chief human resources officer and senior vice president at CarMax, "Investing in our associates is a top priority for the company and you will be given award-winning training and development opportunities to continue to learn, grow your skills, and build a great career at CarMax."
Why Work at CarMax?
A commitment to taking care of our associates through competitive pay and a comprehensive benefits package, including full and part-time benefits, a retirement savings plan, tuition reimbursement, and discounts on car purchases and services.
A strong focus on the health and safety of our associates, customers, and communities. We’ve put significant measures in place to reduce the risk of exposure and further spread of COVID-19, including requiring associates to wear masks while working closely with others, implementing enhanced cleaning measures at all locations, and practicing social distancing guidelines in all locations.
A growing business fueled by operating with integrity and transparency; and a focus on giving our customers an experience they've never had before.
An ability to make an impact in the communities where our associates live and work through company sponsored events and volunteer team builders.
An award-winning workplace, including FORTUNE magazine’s 100 Best Companies to work For, Best Workplaces in Retail and Best Workplaces for Diversity; Training Magazine’s "Training Top 125" companies in America; and recognition by G.I. Jobs as a Military Friendly Employer. CarMax was also recognized as one of PEOPLE Magazine’s 50 Companies that Care: Employers Who Have Gone Above and Beyond During the Pandemic.
How Can Job Seekers Apply?
If you’re ready to redefine your career journey, we’d love to hear from you. Apply today at https://careers.carmax.com/us/en
# # #
About CarMax
CarMax, the nation’s largest retailer of used cars, revolutionized the automotive retail industry by driving integrity, honesty and transparency in every interaction. The company offers a truly personalized experience with the option for customers to do as much, or as little, online and in-store as they want. CarMax also provides a variety of vehicle delivery methods, including home delivery, contactless curbside pickup and appointments in its stores. During the fiscal year ending February 29, 2020, CarMax sold more than 830,000 used cars and more than 465,000 wholesale vehicles at its in-store auctions. CarMax has 220 stores, over 25,000 Associates, and is proud to have been recognized for 16 consecutive years as one of the Fortune 100 Best Companies to Work For®. For more information, visit www.carmax.com.
Attachments
CarMax Customer Experience Consultant
CarMax Auto Tech
Lindsey Duke
CarMax
(855) 887-2915
PR@carmax.com
''',
'''
ONC201 Registrational Trial for Recurrent H3 K27M-mutant Glioma
Compelling Response Rates to Date; Defined Regulatory Path to Registration
Pivotal Data Anticipated in 2021 to Support Potential Registration, Addressing an Estimated Market Opportunity of Greater than $500 Million
Management to Host Conference Call at 8:30 a.m. ET Today
DURHAM, N.C., Jan. 08, 2021 (GLOBE NEWSWIRE) -- Chimerix (NASDAQ:CMRX), a biopharmaceutical company focused on accelerating the development of medicines to treat cancer and other serious diseases, today announced that the Company has acquired Oncoceutics, Inc., a privately-held, clinical-stage biotechnology company developing imipridones, a novel class of compounds. Oncoceutics’ lead product candidate, ONC201, has been shown in clinical testing to selectively induce cell death in multiple cancer types. ONC201 is currently in a registrational clinical trial for recurrent H3 K27M-mutant glioma and a confirmatory response rate assessment is expected in 2021.
ONC201 is an orally administered small molecule dopamine receptor D2 (DRD2) antagonist and caseinolytic protease (ClpP) agonist in late-stage clinical development for recurrent gliomas that harbor the H3 K27M mutation. Recurrent glioma is a form of brain cancer with a particularly poor prognosis having a median overall survival of approximately eight months. Recurrent pediatric patients, with cancer that carries the H3 K27M mutation, have an even worse prognosis with median overall survival of approximately four months. Compelling responses at this stage of disease are rare and lack durability. Patients with this mutation are considered grade IV by the World Health Organization, regardless of underlying histology or age. Initial evaluation of data from the full 50-subject registration cohort, which remains subject to full maturation and confirmation by Blinded Independent Central Review (BICR), indicate a compelling and particularly durable single agent Overall Response Rate (ORR) of at least 20% as assessed by Response Assessment in Neuro-Oncology-High Grade Glioma (RANO-HGG). The final confirmatory data analysis is expected in 2021.
“Patients with H3 K27M-mutant glioma are in desperate need of better therapeutic alternatives,” said Dr. Patrick Wen, Director, Center for Neuro-Oncology at the Dana-Farber Cancer Institute and professor of Neurology at Harvard Medical School. “The tumor responses and safety profile we have observed with ONC201 in this devastating disease are compelling and I look forward to the possibility of accelerating its delivery to patients.”
“Glioma remains one of the highest areas of unmet need in oncology where even first-line radiation therapy, as well as temozolomide in eligible patients, is not meaningfully effective and subsequent therapies are considered palliative. Further, there are no molecularly-targeted therapies for patients which harbor the H3 K27M mutation in this life-limiting disease. Given the urgent need and based on discussions with the FDA, there is a potential accelerated path to approval based on overall response. With a registration cohort of patients fully enrolled, treated, and preliminary data in hand, ONC201 offers an exciting near-term opportunity to quickly bring a potentially life-saving therapy and hope to patients with limited or no options,” said Mike Sherman, Chief Executive Officer of Chimerix. “Our team is uniquely positioned to advance ONC201 given our considerable experience bringing targeted oncology products through the regulatory process.”
“Oncoceutics represents a transformative acquisition for Chimerix, positioning the company with five assets across all stages of development and delivering on our goal to focus on oncology opportunities, complementing our Phase 3 study in acute myeloid leukemia with DSTAT.
With the upcoming Prescription Drug User Fee Act (PDUFA) date of April 7, 2021 for brincidofovir in smallpox and the confirmatory response rate assessment of ONC201 in 2021, we expect these near-term milestones to accelerate delivery of two new therapies in areas of particularly high unmet need,” concluded Mr. Sherman.
“We are thrilled to join the Chimerix team to help accelerate ONC201 to glioma patients in urgent need of effective treatments. Chimerix has the leadership and resources to bring this program successfully through to approval and to further develop other promising assets in the Oncoceutics pipeline,” said Lee Schalop, M.D., Chief Executive Officer of Oncoceutics. “This acquisition builds upon the vision of my co-founder Wolfgang Oster, M.D., Ph.D., scientific founder Wafik El-Deiry, M.D., Ph.D., FACP and all the employees at Oncoceutics in developing a therapy for patients for which there is no available treatment.”
Clinical Development Plan for ONC201 in H3 K27M-mutant Glioma
The current Phase 2 clinical program for ONC201 includes a 50 subject registration cohort comprised of patients greater than 2 years of age with recurrent diffuse midline glioma that harbor the H3 K27M mutation, that have measurable disease, received radiation at least 90 days prior to enrollment and displayed evidence of progressive disease, and certain other criteria. This registration cohort is comprised of patients from multiple clinical trials and has completed enrollment. A BICR analysis is expected to take place in 2021 which, if favorable, may form the basis for regulatory approval of ONC201 in the United States. A BICR of the first 30 patients was completed and presented at the Society of Neuro-Oncology meeting held in November 2020. ONC201 has demonstrated a favorable safety profile with a database of over 350 treated patients. ONC201 has been generally well tolerated during extended periods of administration and the most commonly reported adverse events (AEs) were nausea/vomiting, fatigue and decreased lymphocyte counts.
The FDA has granted ONC201 Fast Track Designation for the treatment of adult recurrent H3 K27M-mutant high-grade glioma, Rare Pediatric Disease Designation for treatment of H3 K27M-mutant glioma, and Orphan Drug Designations for the treatment of glioblastoma and for the treatment of malignant glioma.
Over 300 subjects with recurrent high-grade gliomas, including gliomas with H3 K27M mutations, have been treated with ONC201 across three company-sponsored studies and an expanded access program.
Transaction Terms
Under the terms of the acquisition, Chimerix will pay Oncoceutics shareholders $78 million, of which $39 million is payable in Chimerix stock and $39 million is payable in cash, subject to certain customary adjustments. The payment of $39 million in cash is split $25 million at closing and $14 million on the first anniversary of closing. Oncoceutics shareholders will also potentially earn development, regulatory and sales milestones totaling up to $360 million across three development programs and royalties on combined sales of ONC201 and ONC206 of 15% up to $750 million in annual revenue and 20% above $750 million in annual revenue.
The Boards of Directors of both companies have approved the transaction and the transaction closed simultaneously with execution of definitive agreements on January 7, 2021.
Cooley LLP served as legal advisor to Chimerix. Evercore and Morgan Lewis served as exclusive financial advisor and legal advisor, respectively, to Oncoceutics. Spring Mountain Capital is the lead Oncoceutics investor.
Conference Call and Webcast
Chimerix will host a conference call and live audio webcast today at 8:30 a.m. ET. Slides that support the conference call are available in the Investors section of the Chimerix website, www.chimerix.com. To access the live conference call, please dial 877-354-4056 (domestic) or 678-809-1043 (international) at least five minutes prior to the start time and refer to conference ID 1877809.
A live audio webcast of the call will also be available on the Investors section of Chimerix’s website, www.chimerix.com. An archived webcast will be available on the Chimerix website approximately two hours after the event.
About Oncoceutics
Oncoceutics, Inc. is a clinical-stage drug discovery and development company with a novel class of compounds called imipridones that selectively induce cell death in cancer cells. ONC201 is an orally active small molecule DRD2 antagonist and ClpP agonist in late-stage clinical development for H3 K27M-mutant glioma with additional indications under clinical investigation. ONC206 is the second clinical-stage imipridone that is under clinical investigation for central nervous system tumors. The company has received grant support from NCI, FDA, The Musella Foundation, Michael Mosier Defeat DIPG Foundation, Dragon Master Foundation, The ChadTough Foundation, the National Brain Tumor Society, and a series of private and public partnerships.
About Chimerix
Chimerix is a development-stage biopharmaceutical company dedicated to accelerating the advancement of innovative medicines that make a meaningful impact in the lives of patients living with cancer and other serious diseases. Its two clinical-stage development programs are dociparstat sodium (DSTAT) and brincidofovir (BCV).
DSTAT is a potential first-in-class glycosaminoglycan compound derived from porcine heparin that, compared to commercially available forms of heparin, may be dosed at higher levels without associated bleeding-related complications. DSTAT is being studied in a Phase 2/3 trial to assess safety and efficacy in adults with acute lung injury with underlying COVID-19. A Phase 3 trial protocol to study DSTAT in acute myeloid leukemia has been developed in alignment with the US Food and Drug Administration (FDA) and the first patient visit is expected in early 2021. BCV is an antiviral drug candidate developed as a potential medical countermeasure for smallpox and is currently under review for regulatory approval in the United States. For further information, please visit the Chimerix website, www.chimerix.com.
Forward Looking Statements
This press release contains forward-looking statements within the meaning of the Private Securities Litigation Reform Act of 1995 that are subject to risks and uncertainties that could cause actual results to differ materially from those projected. Forward-looking statements include those relating to, among other things, the timing of the confirmatory response rate assessment for ONC201; the sufficiency of the data from the current Phase 2 clinical trial of ONC201 to support accelerated regulatory approval; the anticipated benefits of Chimerix’s acquisition of Oncoceutics; the completion of a Phase 3 study in acute myeloid leukemia with DSTAT and Chimerix’s ability to obtain regulatory approval for its clinical candidates, including ONC201 and BCV. Among the factors and risks that could cause actual results to differ materially from those indicated in the forward-looking statements are risks that the current Phase 2 clinical trial data for ONC201 will not support accelerated, or any, regulatory approval; the anticipated benefits of the acquisition of Oncoceutics may not be realized; BCV may not obtain regulatory approval from the FDA or such approval may be delayed or conditioned; risks that Chimerix will not obtain a procurement contract for BCV in smallpox in a timely manner or at all; Chimerix’s reliance on a sole source third-party manufacturer for drug supply; risks that ongoing or future trials may not be successful or replicate previous trial results, or may not be predictive of real-world results or of results in subsequent trials; risks and uncertainties relating to competitive products and technological changes that may limit demand for our drugs; risks that our drugs may be precluded from commercialization by the proprietary rights of third parties; and additional risks set forth in the Company's filings with the Securities and Exchange Commission. These forward-looking statements represent the Company's judgment as of the date of this release. The Company disclaims, however, any intent or obligation to update these forward-looking statements.
CONTACT:
Investor Relations:
Michelle LaSpaluto
919 972-7115
ir@chimerix.com
Will O’Connor
Stern Investor Relations
212-362-1200
will@sternir.com
Media:
David Schull
Russo Partners
858-717-2310
David.Schull@russopartnersllc.com
''',
'''
LONDON, Feb. 02, 2021 (GLOBE NEWSWIRE) -- Reigning double Formula E Champions, DS TECHEETAH, and Verizon Media are delighted to announce a new multi-year strategic partnership. This partnership sees Verizon Media join the team with its premium, global Yahoo brand featuring on the livery of the DS E-TENSE FE20 and across the team ecosystem.
DS TECHEETAH’s sustainable platform, on track success and the incredible growth of Formula E will position the Yahoo brand at the forefront of the exciting world of electric motorsport, aligning with the company’s commitment to innovative, sustainable technology that will drive the world forward. Verizon Media is home to media, technology, and communication brands that (with its global partnership with Microsoft) reach nearly 900 million unique viewers globally per month, as well as industry-leading media streaming and ad platforms that connect consumers and brands all around the world through creative and next-generation, extended reality (XR) content experiences that will be supercharged through the roll-out and adoption of 5G technology.
Keith Smout, Chief Commercial Officer, DS TECHEETAH, said:
“We are delighted to announce our new partnership with Verizon Media. This partnership represents the clear value of being involved with Formula E and the desire of global business leaders to align themselves with our winning team and at the same time be involved in a truly sustainable sport. There has been an incredible amount of hard work done in bringing this opportunity to fruition and I want to personally thank Jon Wilde, our Head of Business Development, and our partners at DS Automobiles, who have all worked tirelessly with everyone at Verizon Media to bring this partnership together.”
Kristiana Carlet, VP International Sales at Verizon Media said:
“Our business is at the forefront of building next-generation, sustainable technology as well as innovative content experiences for our customers and consumers. We create networks that move the world forward by connecting people to their passions. Supporting DS TECHEETAH in this innovative sport not only highlights our commitment to this as Verizon Media but is an exciting environment to reach people who might want to take a new and fresh look at our Yahoo brand and products as we create new experiences in 2021. We’re excited to be cheering the team and sharing content from the sport across our properties to our audiences around the world and would like to thank DS Automobiles, everyone at DS TECHEETAH, as well as our international partnerships team, for this opportunity.”
Verizon Communications Inc. (NYSE, Nasdaq: VZ) was formed on June 30, 2000 and is one of the world’s leading providers of technology, communications, information and entertainment products and services. Headquartered in New York City and with a presence around the world, Verizon generated revenues of $128.3 billion in 2020. The company offers data, video and voice services and solutions on its award-winning networks and platforms, delivering on customers’ demand for mobility, reliable network connectivity, security and control.
VERIZON’S ONLINE MEDIA CENTER: News releases, stories, media contacts and other resources are available at https://www.verizon.com/about/media-center. News releases are also available through an RSS feed. To subscribe, visit www.verizon.com/about/rss-feeds/.
Media contact:
Gareth Jordan
gareth.jordan@verizonmedia.com
+44 7980 942883
''',
'''
LEONBERG, Germany, July 13, 2020 (Newswire.com) - In earlier times, it was one of the annoying duties of every Windows user to reinstall the system at irregular intervals. Braked by programs that have settled into the autostart areas or left behind junk data during the de-installation, a complete system reset was inevitable. Even today, many programs interfere with the loading process of Windows. ASCOMP's Cleaning Suite puts an end to this and now supports cleaning Chrome, Edge and Firefox.
As one of the few programs of its kind, Cleaning Suite can remove startup entries that are in Windows Task Planning and are difficult to access. "If you are not sure, you can only temporarily disable the start of a program," explains Andreas Stroebel, Managing Director of ASCOMP.
In the new version, functions for cleaning browser histories have been integrated. This eliminates browsing traces, cookies, and temporary files from Google Chrome, Microsoft Edge, and Mozilla Firefox. The user interface has also been improved and the program optimized for use on Windows 10.
Cleaning Suite not only cleans internet browsers, Windows itself can also be freed from temporary data, orphaned program shortcuts, and empty folders. An uninstaller provides the ability to uninstall programs or delete outdated program entries.
Too aggressive registry optimization algorithms of some competing products caused malfunctions on many systems in the past. That's why Cleaning Suite optimizes the Windows Registry very carefully and offers the possibility of a reset. Various optimization areas can be selected or de-selected at any time.
For private users, the software is free of charge; occasionally, product information about other products made by the manufacturer is displayed. The paid version for $19.90 removes this information and provides technical product support.
Learn more about Cleaning Suite and download it at http://www.cleaningsuite.com
''',
'''
Advance Auto Parts Reports Fourth Quarter and Full Year 2020 Results
Fourth Quarter Net Sales Increased 12.0% to $2.4B; Comparable Store Sales Increased 4.7%
Diluted EPS Increased 19.6% to $1.65; Adjusted Diluted EPS Increased 14.0% to $1.87 Including $0.22 Impact from COVID-19
Full Year Net Sales Increased 4.1% to $10.1B; Comparable Store Sales Increased 2.4%
Diluted EPS Increased 4.4% to $7.14; Adjusted Diluted EPS increased 3.9% to $8.51 Including $0.66 Impact from COVID-19 Operating Cash Flow Increased 11.9% to $969.7M; Free Cash Flow Increased 17.7% to $702.1M
February 16, 2021 06:30 AM Eastern Standard Time
RALEIGH, N.C.--(BUSINESS WIRE)--Advance Auto Parts, Inc. (NYSE: AAP), a leading automotive aftermarket parts provider in North America that serves both professional installer and do-it-yourself customers, today announced its financial results for the fourth quarter and full year ended January 2, 2021.
"Since the onset of the pandemic, we have prioritized the health, safety and wellbeing of our team members and customers. We are incredibly grateful to our team members and independent partners for their dedication and perseverance. They were an inspiration to all of us as they cared for each other and our customers while balancing numerous obstacles both at work and at home. This enabled us to do our part to keep America moving," said Tom Greco, president and chief executive officer.
"As a result, Advance delivered another quarter of growth in comp sales, margin expansion and free cash flow as we crossed $10B in annual net sales for the first time ever. We believe our DIY omnichannel net sales continued to benefit from the impact COVID-19 had on the economy and resulting consumer behaviors. Meanwhile, we leveraged our scale to differentiate Advance and gain market share in the quarter. This was highlighted by the successful launch of the DieHard® brand, the expansion of our Carquest® brand and continued success from our Advance Same Day™ suite of fulfillment options. We also ramped up execution on our primary initiatives to expand gross margin in the quarter including strategic pricing, owned brand expansion and the streamlining of our supply chain. We believe our actions in the fourth quarter position us well to drive additional top-line growth and further margin expansion in 2021.
"Through the first four weeks of 2021, we are growing comparable store sales low double digits with strength across both DIY omnichannel and Professional. We are also encouraged by improving trends in the Northeast and Mid Atlantic Regions, which are still lagging the country, but closing the gap. In addition, we remain laser focused on the execution of our long term plan to drive growth at or above industry growth rates, deliver meaningful margin expansion, and return excess cash to shareholders. We look forward to sharing more details in our March release of our third annual Sustainability and Social Responsibility Report, as well as an update on our strategic business plan, which we will share with investors on April 20th."
Fourth Quarter 2020 Highlights (a)
Net sales increased 12.0% to $2.4B; Comparable store sales (b) increased 4.7%
Operating income increased 20.4% to $151.8M; Operating income margin expanded 45 bps to 6.4%
Including approximately $19 million in COVID-19 related expenses, Adjusted operating income (b) increased 14.6% to 171.8M; Adjusted operating income margin (b) expanded 17 bps to 7.3%
Including the impact of approximately $0.22 as a result of COVID-19 expenses, Diluted EPS increased 19.6% to $1.65 and Adjusted diluted EPS (b) increased 14.0% to $1.87
Returned $319.9M to shareholders through the Company's share repurchase program
Full Year 2020 Highlights (a)
Net sales increased 4.1% to $10.1B; Comparable store sales (b) increased 2.4%
Operating income increased 10.7% to $749.9M; Operating income margin expanded 45 bps to 7.4%
Including approximately $60M in COVID-19 related expenses, Adjusted operating income (b) increased 4.1% to $827.3M; Adjusted operating income margin (b) was in-line with prior year at 8.2%
Including the impact of approximately $0.66 as a result of COVID-19 expenses, Diluted EPS increased 4.4% to $7.14 and Adjusted diluted EPS (b) increased 3.9% to $8.51
Operating cash flow increased 11.9% to $969.7M; Free cash flow (b) increased 17.7% to $702.1M
Returned $514.9M to shareholders through the combination of share repurchases and the Company's quarterly cash dividends
(a) The fourth quarter and full year 2020 included 13 weeks and 53 weeks, while the fourth quarter and full year 2019 included 12 weeks and 52 weeks.
(b) Comparable store sales exclude sales to independently owned Carquest locations, as well as the impact of the additional week in 2020. For a better understanding of the Company's adjusted results, refer to the reconciliation of non-GAAP adjustments in the accompanying financial tables included herein.
Fourth Quarter and Full Year 2020 Operating Results
Fourth quarter 2020 Net sales totaled $2.4 billion, a 12.0% increase compared to the fourth quarter of the prior year. Comparable store sales growth for the fourth quarter 2020 was 4.7%. For the full year 2020, Net sales were $10.1 billion, an increase of 4.1% from full year 2019 results. Full year 2020 Comparable store sales growth was 2.4%. The fourth quarter and full year 2020 included 13 weeks and 53 weeks compared to 12 week and 52 weeks for the fourth quarter and full year 2019. The additional week in 2020 added $158.5 million to fourth quarter and full year Net sales.
Adjusted gross profit margin was 45.9% of Net sales in the fourth quarter of 2020, a 192 basis point increase from the fourth quarter of 2019. This improvement was primarily driven by price improvements, inventory management, including a reduction in inventory shrink, and supply chain leverage. The Company's GAAP Gross profit margin increased to 45.8% from 44.0% in the fourth quarter of the prior year. Adjusted gross profit margin for the full year 2020 was 44.4%, a 38 basis points improvement from prior year, while full year 2020 GAAP Gross profit margin increased 52 basis points to 44.3%.
Increased costs associated with COVID-19, as well as well as the additional week in the fourth quarter of 2020, resulted in higher SG&A expense compared to the fourth quarter of 2019. Adjusted SG&A as a percent of Net sales increased to 38.6% in the fourth quarter 2020, compared to 36.9% in the prior year. In addition to the COVID-19 related expenses and additional week, the increase in adjusted SG&A as a percent of Net sales was driven by lease termination costs related to the ongoing optimization of our real estate footprint, higher medical claim expenses and investment in marketing in the fourth quarter of 2020. The Company's GAAP SG&A for the fourth quarter 2020 was 39.4% of Net sales compared to 38.0% in the same quarter of the prior year. For the full year 2020, Adjusted SG&A was 36.2%, a 39-basis point increase compared to the full year 2019. The Company's full year 2020 GAAP SG&A was 36.9% of Net sales compared to 36.8% for the full year 2019. The additional week in 2020 contributed $53.5 million to fourth quarter and full year SG&A.
The Company generated Adjusted operating income of $171.8 million in the fourth quarter 2020, an increase of 14.6% from prior year results. Fourth quarter 2020 Adjusted operating income margin increased to 7.3% of Net sales, an improvement of 17 basis points from the prior year. On a GAAP basis, the Company's Operating income was $151.8 million, an increase of 20.4% compared to the fourth quarter of the prior year and Operating income margin was 6.4% of Net sales, which was 45 basis points improved from the prior year. For full year 2020, Adjusted operating income was $827.3 million, an increase of 4.1% from the full year 2019. Full year 2020 Adjusted operating income margin was unchanged from prior year results at 8.2% of Net sales. The Company's full year 2020 GAAP Operating income totaled $749.9 million, 7.4% of Net sales, an increase of 45 basis points compared to the full year 2019. The additional week in 2020 contributed $20.1 million to fourth quarter and full year Operating income.
The Company's effective tax rate in the fourth quarter 2020 was 20.4%. The Company's Adjusted diluted EPS was $1.87 for the fourth quarter 2020, an increase of 14.0% compared to the same quarter in the prior year. On a GAAP basis, the Company's Diluted EPS increased 19.6% to $1.65. The effective tax rate for the full year 2020 was 24.3%. Full year 2020 Adjusted diluted EPS was $8.51, an increase of 3.9% from full year 2019 results. The Company's diluted EPS on a GAAP basis increased 4.4% to $7.14 year over year. The additional week in 2020 contributed $0.23 to the fourth quarter and full year Diluted EPS.
Operating cash flow was $969.7 million for the full year 2020 versus $866.9 million for the full year 2019, an increase of 11.9%. Free cash flow for the full year 2020 was $702.1 million, an increase of 17.7% compared to the full year 2019.
Capital Allocation
During 2020, the Company repurchased a total of 3.0 million shares of its common stock for an aggregate amount of $458.5 million, or an average price of $150.65 per share. At the end of the fourth quarter of 2020, the Company had $432.2 million remaining under the share repurchase program.
On February 10, 2021, the Company's Board of Directors declared a quarterly cash dividend of $0.25 per share to be paid on April 2, 2021 to all common shareholders of record as of March 19, 2021.
Full Year 2021 Guidance
"Given our belief that our economy is beginning to see signs of stabilization and progress is underway with COVID-19 vaccinations, we are optimistic regarding a continued recovery in 2021. While uncertainty remains, we are providing financial guidance for the full year 2021 based on the factors we know today. In addition to our 2021 outlook, we are highlighting key assumptions impacting our current financial models," said Jeff Shepherd, executive vice president and chief financial officer.
The Company provided the following assumptions based on projections for the U.S. and guidance ranges related to its 2021 outlook:
An increase in total vehicle miles driven in the U.S. from 2020 but to remain below 2019
Consistent year-over-year federal tax rate
No material increases in the federal minimum wage
A reduction in COVID-19 related expenses
For a better understanding of the Company's adjusted results, refer to the reconciliation of non-GAAP adjustments in the accompanying financial tables included herein. Because of the forward-looking nature of the 2021 non-GAAP financial measures, specific quantification of the amounts that would be required to reconcile these non-GAAP financial measures to their most directly comparable GAAP financial measures are not available at this time.
Beginning in first quarter 2021, the impact of last in, first out ("LIFO") on the Company's results of operations will be a reconciling item to arrive at its non-GAAP financial measures, as applicable. The Company believes this measure will assist in comparing the Company's operating results with the operational performance of other companies in its industry. For a better understanding of the Company's adjusted results, refer to the reconciliation of non-GAAP adjustments in the accompanying financial tables included herein.
Investor Conference Call
The Company will detail its results for the fourth quarter and full year 2020 via a webcast scheduled to begin at 8 a.m. Eastern Time on Tuesday, February 16, 2021. The webcast will be accessible via the Investor Relations page of the Company's website (ir.AdvanceAutoParts.com).
To join by phone, please pre-register online for dial-in and passcode information. Upon registering, participants will receive a confirmation with call details and a registrant ID. While registration is open through the live call, the company suggests registering a day in advance or at minimum 10 minutes before the start of the call. A replay of the conference call will be available on the Advance website for one year.
About Advance Auto Parts
Advance Auto Parts, Inc. is a leading automotive aftermarket parts provider that serves both professional installer and do-it-yourself customers. As of January 2, 2021, Advance operated 4,806 stores and 170 Worldpac branches in the United States, Canada, Puerto Rico and the U.S. Virgin Islands. The Company also serves 1,277 independently owned Carquest branded stores across these locations in addition to Mexico, Grand Cayman, the Bahamas, Turks and Caicos and British Virgin Islands. Additional information about Advance, including employment opportunities, customer services, and online shopping for parts, accessories and other offerings can be found at www.AdvanceAutoParts.com.
Forward-Looking Statements
Certain statements herein are “forward-looking statements” within the meaning of the Private Securities Litigation Reform Act of 1995. Forward-looking statements are usually identifiable by words such as “anticipate,” “believe,” “could,” “estimate,” “expect,” “forecast,” "guidance," “intend,” “likely,” “may,” “plan,” “position,” “possible,” “potential,” “probable,” “project,” “should,” “strategy,” “will,” or similar language. All statements other than statements of historical fact are forward-looking statements, including, but not limited to, statements about the Company's strategic initiatives, operational plans and objectives, expectations for economic recovery and future business and financial performance, as well as statements regarding underlying assumptions related thereto. Forward-looking statements reflect the Company's views based on historical results, current information and assumptions related to future developments. Except as may be required by law, the Company undertakes no obligation to update any forward-looking statements made herein. Forward-looking statements are subject to a number of risks and uncertainties that could cause actual results to differ materially from those projected or implied by the forward-looking statements. They include, among others, factors related to the timing and implementation of strategic initiatives, the highly competitive nature of the Company's industry, demand for the Company's products and services, complexities in its inventory and supply chain, challenges with transforming and growing its business and factors related to the current global pandemic. Please refer to “Item 1A. Risk Factors.” of the Company's most recent Annual Report on Form 10-K, as updated by its Quarterly Reports on Form 10-Q and other filings made by the Company with the Securities and Exchange Commission for a description of these and other risks and uncertainties that could cause actual results to differ materially from those projected or implied by the forward-looking statements.
Advance Auto Parts, Inc. and Subsidiaries
Condensed Consolidated Balance Sheets
The condensed consolidated statement of cash flows for the year ended December 28, 2019 has been derived from the audited consolidated financial statements at that date, but does not include the footnotes required by GAAP.
Reconciliation of Non-GAAP Financial Measures
The Company's financial results include certain financial measures not derived in accordance with accounting principles generally accepted in the United States of America. Non-GAAP financial measures should not be used as a substitute for GAAP financial measures, or considered in isolation, for the purpose of analyzing the Company's operating performance, financial position or cash flows. The Company has presented these non-GAAP financial measures as it believes that the presentation of its financial results that exclude transformation expenses under the Company's strategic business plan and non-cash amortization related to the acquired General Parts International, Inc. (“GPI”) intangible assets and other non-recurring adjustments is useful and indicative of the Company's base operations because the expenses vary from period to period in terms of size, nature and significance and/or relate to store closure and consolidation activity in excess of historical levels. These measures assist in comparing the Company's current operating results with past periods and with the operational performance of other companies in its industry. The disclosure of these measures allows investors to evaluate the Company's performance using the same measures management uses in developing internal budgets and forecasts and in evaluating management’s compensation. Included below is a description of the expenses that the Company has determined are not normal, recurring cash operating expenses necessary to operate its business and the rationale for why providing these measures is useful to investors as a supplement to the GAAP measures.
Transformation Expenses — Costs incurred in connection with our business plan that focuses on specific transformative activities that relate to the integration and streamlining of our operating structure across the enterprise, that we do not view to be normal cash operating expenses. These expenses will include, but not be limited to the following:
Restructuring costs - Costs primarily relating to the early termination of lease obligations, asset impairment charges, other facility closure costs and Team Member severance in connection with our 2018 Store Rationalization plan and 2017 Store and Supply Chain Rationalization plan.
Third-party professional services - Costs primarily relating to services rendered by vendors for assisting us with the development of various information technology and supply chain projects in connection with our enterprise integration initiatives.
Other significant costs - Costs primarily relating to accelerated depreciation of various legacy information technology and supply chain systems in connection with our enterprise integration initiatives and temporary off-site workspace for project teams who are primarily working on the development of specific transformative activities that relate to the integration and streamlining of our operating structure across the enterprise.
GPI Amortization of Acquired Intangible Assets — As part of our acquisition of GPI, we obtained various intangible assets, including customer relationships, non-compete contracts and favorable leases agreements, which we expect to be subject to amortization through 2025.
Reconciliation of Adjusted Net Income and Adjusted EPS:
NOTE: Management uses Free cash flow as a measure of its liquidity and believes it is a useful indicator to investors or potential investors of the Company's ability to implement growth strategies and service debt. Free cash flow is a non-GAAP measure and should be considered in addition to, but not as a substitute for, information contained in the Company's condensed consolidated statement of cash flows as a measure of liquidity.
2021 Update to Non-GAAP Measures
Beginning Q1 2021, the impact of LIFO on the Company's results of operations will be a reconciling item to arrive at its non-GAAP financial measures, as applicable. The following table summarizes the quarterly and full year LIFO adjustments that were recorded in Cost of sales for 2020 and 2019.
The adjustments to the four quarters ended January 2, 2021 represent charges incurred resulting from the early redemption of the Company's 2022 and 2023 senior unsecured notes. The adjustments to the four quarters ended December 28, 2019 represent an out-of-period correction related to received not invoiced inventory and charges incurred resulting from the early redemption of the Company's 2020 senior unsecured notes.
NOTE: Management believes its Adjusted Debt to Adjusted EBITDAR ratio (“leverage ratio”) is a key financial metric for debt securities, as reviewed by rating agencies, and believes its debt levels are best analyzed using this measure. The Company’s goal is to maintain a 2.5 times leverage ratio and investment grade rating. The Company's credit rating directly impacts the interest rates on borrowings under its existing credit facility and could impact the Company's ability to obtain additional funding. If the Company was unable to maintain its investment grade rating this could negatively impact future performance and limit growth opportunities. Similar measures are utilized in the calculation of the financial covenants and ratios contained in the Company's financing arrangements. The leverage ratio calculated by the Company is a non-GAAP measure and should not be considered a substitute for debt to net earnings, net earnings or debt as determined in accordance with GAAP. The Company adjusts the calculation to remove rent expense and to add back the Company’s existing operating lease liabilities related to their right-of-use assets to provide a more meaningful comparison with the Company’s peers and to account for differences in debt structures and leasing arrangements. The Company’s calculation of its leverage ratio might not be calculated in the same manner as, and thus might not be comparable to, similarly titled measures by other companies.
Store Information:
During the fifty-three weeks ended January 2, 2021, 13 stores and branches were opened and 74 were closed or consolidated, resulting in a total of 4,976 stores and branches as of January 2, 2021, compared to a total of 5,037 stores and branches as of December 28, 2019.
''',
'''
More Black Americans Report Permanently Changing Their Spending and Saving Habits as a Direct Result of the Pandemic
COVID-19 Crisis Has 75% of Black Consumers Thinking Differently About Their Future, Prompting a Greater Interest in Financial Planning Resources
Eric D. Bailey CFP®, founder of Bailey Wealth Advisors in Silver Spring, Md. and a registered representative of Lincoln Financial Advisors (Photo: Business Wire)
Eric D. Bailey CFP®, founder of Bailey Wealth Advisors in Silver Spring, Md. and a registered representative of Lincoln Financial Advisors (Photo: Business Wire)
Eric D. Bailey CFP®, founder of Bailey Wealth Advisors in Silver Spring, Md. and a registered representative of Lincoln Financial Advisors (Photo: Business Wire)
February 09, 2021 10:37 AM Eastern Standard Time
RADNOR, Pa.--(BUSINESS WIRE)--The pandemic continues to create financial challenges for all Americans, but research from Lincoln Financial Group (NYSE: LNC) shows that minorities are among those hit hardest. The company’s October 2020 Consumer Sentiment Tracker found Black consumers (32%) are most likely to have experienced job loss as a result of the pandemic – a situation that inevitably contributes to their top financial concerns of not having enough emergency savings (42%) and not being able to cover day-to-day expenses (41%).
“Our goal is to help Black Americans and all consumers understand the importance of saving for retirement and creating generational wealth, as well as educate on how to take those first steps toward making it reality”
Tweet this
The Crisis Drives Change
Black Americans went on to express their current financial mindset is most impacted by the events of recent months. According to the research, 74% are planning to make permanent changes to the way they spend and save due to the COVID-19 crisis. Furthermore, 75% are planning for their financial future differently as a result of the pandemic, prompting a growing appetite for financial planning resources. Black consumers (67%) are most likely to say they are reading and learning about financial markets and investing, as well as thinking about whether they have the right insurance (61%). This is a solid foundation to build upon in order to create positive financial outcomes.
“Our goal is to help Black Americans and all consumers understand the importance of saving for retirement and creating generational wealth, as well as educate on how to take those first steps toward making it reality,” said Eric D. Bailey CFP®, founder of Bailey Wealth Advisors in Silver Spring, Md. and a registered representative of Lincoln Financial Advisors. “By tapping into online budgeting tools, calculators and other resources, people can make small changes that really add up in the long run. A financial plan doesn’t have to be complicated—I like to think of it as a roadmap to ensure you’re on track to achieving the life you envision for the future.”
Three Tips to Build Wealth
Bailey offers three recommendations to help both Black consumers and all Americans build wealth and achieve the retirement they envision for themselves and their families:
Focus on education and financial literacy – from a young age. In a consumer-driven economy, it is important to recognize the benefits, as well as the challenges, that money presents. For example, first-time credit card users may not understand compound interest rates or the consequences of bad credit until it is much too late. Learning the true value of proper budgeting, credit worthiness and smart money management early is the foundation for a lifetime of good financial habits.
Make longevity planning a priority. Building and sustaining wealth is a process, one in which consumers should match lifelong financial goals to life expectancy. This requires strategic planning and a true desire to create a legacy for oneself, family and community. Consumers should address any unique financial needs early to help ensure that every aspect of their wealth picture is understood and incorporated into their long-term plan. The sooner the process starts, the stronger the outcome.
Meet with a financial professional. A financial professional can provide valued expertise that fits a consumer’s specific situation and goals. Certified Financial Professionals (CFPs) in particular have special training and expertise in interpreting today’s complicated tax strategies, insurance options and economic forecasts in a way that strategically meets consumers’ uniquely personal needs.
Translating Optimism into Outcomes
The study went on to show that Black consumers express the most positive feelings—hopeful (28%), interested (22%) and opportunistic (17%)—when it comes to financial planning. They prefer to learn about financial products by seeking out advice from a financial professional (39%) followed by financial services companies (28%).
“While it’s good news that more Black Americans are feeling optimistic, the research also indicates there is still work to be done,” said Bailey. “Unfortunately, the wealth gap for African Americans remains significant. In addition, when insurance and retirement solutions fall lower on the priority list due to a crisis like job loss, it then affects long-term financial outcomes for people and their families. For that reason, we are committed to educating the community about the importance of planning for their financial future.”
Bailey and his practice are not alone in their commitment to the Black community. In September 2020, Lincoln Financial Group announced its plan to amplify the company’s ongoing commitment to diversity and inclusion and drive meaningful, measurable change. As part of that long-term plan, Lincoln will continue to grow its professional network of Black financial advisors and planners to support advisor recruiting and development efforts, and to help define new strategies for supporting Black clients.
Visit www.lincolnfinancial.com for more tools, resources and other tips on how to protect the ones you love the most.
About Lincoln Financial Group
Lincoln Financial Group provides advice and solutions that help people take charge of their financial lives with confidence and optimism. Today, more than 17 million customers trust our retirement, insurance and wealth protection expertise to help address their lifestyle, savings and income goals, and guard against long-term care expenses. Headquartered in Radnor, Pennsylvania, Lincoln Financial Group is the marketing name for Lincoln National Corporation (NYSE:LNC) and its affiliates. The company had $303 billion in end-of-period account values as of December 31, 2020. Lincoln Financial Group is a committed corporate citizen included on major sustainability indices including the Dow Jones Sustainability Index North America and FTSE4Good. Dedicated to diversity and inclusion, we earned perfect 100 percent scores on the Corporate Equality Index and the Disability Equality Index, and rank among Forbes’ World’s Best Employers, Best Large Employers, Best Employers for Diversity, and Best Employers for Women, and Newsweek’s Most Responsible Companies. Learn more at: www.LincolnFinancial.com. Follow us on Facebook, Twitter, LinkedIn, and Instagram. Sign up for email alerts at http://newsroom.lfg.com.
Eric Bailey is a registered representative of, and Bailey Wealth Advisors is a marketing name for registered representatives of, Lincoln Financial Advisors. Securities and investment advisory services offered through Lincoln Financial Advisors Corp., a broker-dealer and registered investment advisor, member SIPC.
LCN-3441926-020821 '''
]
}
news_items = [
{
'id': 'http://www.globenewswire.com/news-release/2021/02/20/2179148/0/en/INVESTIGATION-ALERT-Halper-Sadeh-LLP-Investigates-RP-CUB-CATM-CHNG-Shareholders-Are-Encouraged-to-Contact-the-Firm.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179148/0/en/INVESTIGATION-ALERT-Halper-Sadeh-LLP-Investigates-RP-CUB-CATM-CHNG-Shareholders-Are-Encouraged-to-Contact-the-Firm.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/20/2179148/0/en/INVESTIGATION-ALERT-Halper-Sadeh-LLP-Investigates-RP-CUB-CATM-CHNG-Shareholders-Are-Encouraged-to-Contact-the-Firm.html'}],
'title': 'INVESTIGATION ALERT: Halper Sadeh LLP Investigates RP, CUB, CATM, CHNG; Shareholders Are Encouraged to Contact the Firm',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'INVESTIGATION ALERT: Halper Sadeh LLP Investigates RP, CUB, CATM, CHNG; Shareholders Are Encouraged to Contact the Firm'},
'summary': '<p align="justify">NEW YORK, Feb. 20, 2021 (GLOBE NEWSWIRE) -- Halper Sadeh LLP, a global investor rights law firm, continues to investigate the following companies:<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="justify">NEW YORK, Feb. 20, 2021 (GLOBE NEWSWIRE) -- Halper Sadeh LLP, a global investor rights law firm, continues to investigate the following companies:<br /></p>'},
'published': 'Sat, 20 Feb 2021 15:53 GMT',
'dc_identifier': '2179148',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Halper Sadeh LLP'}], 'dc_modified': 'Sat, 20 Feb 2021 15:54 GMT',
'tags': [{'term': 'Class Action', 'scheme': None, 'label': None},
{'term': 'Company Announcement', 'scheme': None, 'label': None}], 'dc_keyword': 'Class Action'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/20/2179147/0/en/LifeSave-Transport-Announces-Hiring-Push-for-Flight-Nurses-and-Medics.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179147/0/en/LifeSave-Transport-Announces-Hiring-Push-for-Flight-Nurses-and-Medics.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/20/2179147/0/en/LifeSave-Transport-Announces-Hiring-Push-for-Flight-Nurses-and-Medics.html'}],
'title': 'LifeSave Transport Announces Hiring Push for Flight Nurses and Medics',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'LifeSave Transport Announces Hiring Push for Flight Nurses and Medics'},
'summary': 'Emergency air medical services company announces new career opportunities for fight nurses and medics in Kansas and Nebraska <pre>Emergency air medical services company announces new career opportunities for fight nurses and medics in Kansas and Nebraska</pre>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Emergency air medical services company announces new career opportunities for fight nurses and medics in Kansas and Nebraska <pre>Emergency air medical services company announces new career opportunities for fight nurses and medics in Kansas and Nebraska</pre>'},
'published': 'Sat, 20 Feb 2021 05:59 GMT',
'dc_identifier': '2179147',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Air Methods'}], 'dc_modified': 'Sat, 20 Feb 2021 05:59 GMT',
'tags': [{'term': 'Company Announcement', 'scheme': None, 'label': None}]},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/20/2179145/0/en/SHAREHOLDER-ALERT-BY-FORMER-LOUISIANA-ATTORNEY-GENERAL-KSF-REMINDS-PEN-QS-SWI-INVESTORS-of-Lead-Plaintiff-Deadline-in-Class-Action-Lawsuits.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179145/0/en/SHAREHOLDER-ALERT-BY-FORMER-LOUISIANA-ATTORNEY-GENERAL-KSF-REMINDS-PEN-QS-SWI-INVESTORS-of-Lead-Plaintiff-Deadline-in-Class-Action-Lawsuits.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/20/2179145/0/en/SHAREHOLDER-ALERT-BY-FORMER-LOUISIANA-ATTORNEY-GENERAL-KSF-REMINDS-PEN-QS-SWI-INVESTORS-of-Lead-Plaintiff-Deadline-in-Class-Action-Lawsuits.html'}],
'title': 'SHAREHOLDER ALERT BY FORMER LOUISIANA ATTORNEY GENERAL: KSF REMINDS PEN, QS, SWI INVESTORS of Lead Plaintiff Deadline in Class Action Lawsuits',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'SHAREHOLDER ALERT BY FORMER LOUISIANA ATTORNEY GENERAL: KSF REMINDS PEN, QS, SWI INVESTORS of Lead Plaintiff Deadline in Class Action Lawsuits'},
'summary': '<p align="justify">NEW ORLEANS, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Kahn Swick & Foti, LLC (“KSF”) and KSF partner, former Attorney General of Louisiana, Charles C. Foti, Jr., remind investors of pending deadlines in the following securities class action lawsuits:<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="justify">NEW ORLEANS, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Kahn Swick & Foti, LLC (“KSF”) and KSF partner, former Attorney General of Louisiana, Charles C. Foti, Jr., remind investors of pending deadlines in the following securities class action lawsuits:<br /></p>'},
'published': 'Sat, 20 Feb 2021 03:50 GMT',
'dc_identifier': '2179145',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Kahn Swick & Foti, LLC'}], 'dc_modified': 'Sat, 20 Feb 2021 03:50 GMT',
'tags': [{'term': 'Class Action', 'scheme': None, 'label': None},
{'term': 'Law & Legal Issues', 'scheme': None, 'label': None}], 'dc_keyword': 'Class Action'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/20/2179146/0/en/SHAREHOLDER-ALERT-BY-FORMER-LOUISIANA-ATTORNEY-GENERAL-KSF-REMINDS-CLOV-IRTC-INVESTORS-of-Lead-Plaintiff-Deadline-in-Class-Action-Lawsuits.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179146/0/en/SHAREHOLDER-ALERT-BY-FORMER-LOUISIANA-ATTORNEY-GENERAL-KSF-REMINDS-CLOV-IRTC-INVESTORS-of-Lead-Plaintiff-Deadline-in-Class-Action-Lawsuits.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/20/2179146/0/en/SHAREHOLDER-ALERT-BY-FORMER-LOUISIANA-ATTORNEY-GENERAL-KSF-REMINDS-CLOV-IRTC-INVESTORS-of-Lead-Plaintiff-Deadline-in-Class-Action-Lawsuits.html'}],
'title': 'SHAREHOLDER ALERT BY FORMER LOUISIANA ATTORNEY GENERAL: KSF REMINDS CLOV, IRTC INVESTORS of Lead Plaintiff Deadline in Class Action Lawsuits',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'SHAREHOLDER ALERT BY FORMER LOUISIANA ATTORNEY GENERAL: KSF REMINDS CLOV, IRTC INVESTORS of Lead Plaintiff Deadline in Class Action Lawsuits'},
'summary': '<p align="justify">NEW ORLEANS, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Kahn Swick & Foti, LLC (“KSF”) and KSF partner, former Attorney General of Louisiana, Charles C. Foti, Jr., remind investors of pending deadlines in the following securities class action lawsuits:<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="justify">NEW ORLEANS, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Kahn Swick & Foti, LLC (“KSF”) and KSF partner, former Attorney General of Louisiana, Charles C. Foti, Jr., remind investors of pending deadlines in the following securities class action lawsuits:<br /></p>'},
'published': 'Sat, 20 Feb 2021 03:50 GMT',
'dc_identifier': '2179146',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Kahn Swick & Foti, LLC'}], 'dc_modified': 'Sat, 20 Feb 2021 03:50 GMT',
'tags': [{'term': 'Class Action', 'scheme': None, 'label': None},
{'term': 'Law & Legal Issues', 'scheme': None, 'label': None}], 'dc_keyword': 'Class Action'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/20/2179142/0/en/Rail-Shippers-Defeat-BNSF-CSX-NS-and-UP-s-Attempts-to-Insulate-Anticompetitive-Conduct-from-Liability.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179142/0/en/Rail-Shippers-Defeat-BNSF-CSX-NS-and-UP-s-Attempts-to-Insulate-Anticompetitive-Conduct-from-Liability.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/20/2179142/0/en/Rail-Shippers-Defeat-BNSF-CSX-NS-and-UP-s-Attempts-to-Insulate-Anticompetitive-Conduct-from-Liability.html'}],
'title': 'Rail Shippers Defeat BNSF, CSX, NS, and UP’s Attempts to Insulate Anticompetitive Conduct from Liability',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Rail Shippers Defeat BNSF, CSX, NS, and UP’s Attempts to Insulate Anticompetitive Conduct from Liability'},
'summary': '<p align="left">WASHINGTON, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Today, in the United States District Court for the District of Columbia, Judge Paul Friedman denied a motion by the defendant railroads BNSF, CSX, NS, and UP in <em>In re Rail Freight Fuel Surcharge Antitrust Litigation</em> (Case No. 07-489) to exclude certain evidence from future antitrust trials. The plaintiffs in this multidistrict litigation, which began as a class action and now comprises more than 200 of the country’s largest rail shippers, allege that the railroads unlawfully fixed prices through collusive fuel-surcharge programs and policies, beginning in 2003.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="left">WASHINGTON, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Today, in the United States District Court for the District of Columbia, Judge Paul Friedman denied a motion by the defendant railroads BNSF, CSX, NS, and UP in <em>In re Rail Freight Fuel Surcharge Antitrust Litigation</em> (Case No. 07-489) to exclude certain evidence from future antitrust trials. The plaintiffs in this multidistrict litigation, which began as a class action and now comprises more than 200 of the country’s largest rail shippers, allege that the railroads unlawfully fixed prices through collusive fuel-surcharge programs and policies, beginning in 2003.<br /></p>'},
'published': 'Sat, 20 Feb 2021 02:44 GMT',
'dc_identifier': '2179142',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Hausfeld'}], 'dc_modified': 'Sat, 20 Feb 2021 02:45 GMT',
'tags': [{'term': 'Law & Legal Issues', 'scheme': None, 'label': None}], 'dc_keyword': 'antitrust'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/20/2179136/0/en/Ebix-Shares-Strong-Business-Outlook-and-Discusses-Recent-Events.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179136/0/en/Ebix-Shares-Strong-Business-Outlook-and-Discusses-Recent-Events.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/20/2179136/0/en/Ebix-Shares-Strong-Business-Outlook-and-Discusses-Recent-Events.html'}],
'tags': [{'term': 'Nasdaq:EBIX', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'US2787152063', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Calendar of Events', 'scheme': None, 'label': None}],
'title': 'Ebix Shares Strong Business Outlook and Discusses Recent Events',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Ebix Shares Strong Business Outlook and Discusses Recent Events'},
'summary': '<p align="justify">JOHNS CREEK, Ga., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Ebix, Inc. (NASDAQ: EBIX), a leading international supplier of On-Demand software and E-commerce services to the insurance, financial, healthcare and e-learning industries, today issued a press release to emphasize a strong current business outlook while discussing the auditor resignation, the income materiality of the issues highlighted, and the various related steps being taken by the Company.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="justify">JOHNS CREEK, Ga., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Ebix, Inc. (NASDAQ: EBIX), a leading international supplier of On-Demand software and E-commerce services to the insurance, financial, healthcare and e-learning industries, today issued a press release to emphasize a strong current business outlook while discussing the auditor resignation, the income materiality of the issues highlighted, and the various related steps being taken by the Company.<br /></p>'},
'published': 'Sat, 20 Feb 2021 00:05 GMT',
'dc_identifier': '2179136',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Ebix, Inc.'}], 'dc_modified': 'Sat, 20 Feb 2021 00:06 GMT', 'dc_keyword': 'India'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179134/0/en/FSIS-Recall-Release-005-2021-Without-Inspection.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179134/0/en/FSIS-Recall-Release-005-2021-Without-Inspection.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179134/0/en/FSIS-Recall-Release-005-2021-Without-Inspection.html'}],
'title': 'FSIS Recall Release 005-2021 - Without Inspection',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'FSIS Recall Release 005-2021 - Without Inspection'},
'summary': '<p>WASHINGTON, D.C., Feb. 19, 2021 (GLOBE NEWSWIRE) -- </p> <p>\xa0</p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p>WASHINGTON, D.C., Feb. 19, 2021 (GLOBE NEWSWIRE) -- </p> <p>\xa0</p>'},
'published': 'Fri, 19 Feb 2021 23:31 GMT',
'dc_identifier': '2179134',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'USDA Food Safety and Inspection Service'}],
'dc_modified': 'Fri, 19 Feb 2021 23:32 GMT',
'tags': [{'term': 'Company Announcement', 'scheme': None, 'label': None}]},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179133/0/en/Naropa-University-Celebrates-1st-Black-Futures-Month.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179133/0/en/Naropa-University-Celebrates-1st-Black-Futures-Month.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179133/0/en/Naropa-University-Celebrates-1st-Black-Futures-Month.html'}],
'title': 'Naropa University Celebrates 1st Black Futures Month',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Naropa University Celebrates 1st Black Futures Month'},
'summary': '<p>Boulder, CO, Feb. 19, 2021 (GLOBE NEWSWIRE) -- <em>“Let’s give ourselves the freedom and permission to follow our radical imaginations and visualize the world we deserve because in order to realize a society in which we have healthcare for all, a meaningful wage, self-determination, and true freedom, we have to first imagine it!” ~ Movement For Black Lives (m4bl.org)</em></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p>Boulder, CO, Feb. 19, 2021 (GLOBE NEWSWIRE) -- <em>“Let’s give ourselves the freedom and permission to follow our radical imaginations and visualize the world we deserve because in order to realize a society in which we have healthcare for all, a meaningful wage, self-determination, and true freedom, we have to first imagine it!” ~ Movement For Black Lives (m4bl.org)</em></p>'},
'published': 'Fri, 19 Feb 2021 23:17 GMT',
'dc_identifier': '2179133',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Naropa University'}], 'dc_modified': 'Fri, 19 Feb 2021 23:17 GMT', 'media_content': [
{'medium': 'image', 'type': 'image/jpeg', 'width': '150',
'url': 'https://ml.globenewswire.com/Resource/Download/31b2dc60-cc09-441d-8390-80711fec5767'}],
'media_text': {'type': 'html'},
'media_credit': [{'role': 'publishing company', 'content': 'GlobeNewswire Inc.'}],
'credit': 'GlobeNewswire Inc.', 'tags': [{'term': 'Religion', 'scheme': None, 'label': None},
{'term': 'Press releases', 'scheme': None, 'label': None}]},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179132/0/en/Access-Power-Co-Inc-is-pleased-to-announce-the-Company-has-hired-Ben-Borgers-as-its-PCAOB-CPA-Auditor.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179132/0/en/Access-Power-Co-Inc-is-pleased-to-announce-the-Company-has-hired-Ben-Borgers-as-its-PCAOB-CPA-Auditor.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179132/0/en/Access-Power-Co-Inc-is-pleased-to-announce-the-Company-has-hired-Ben-Borgers-as-its-PCAOB-CPA-Auditor.html'}],
'tags': [{'term': 'Other OTC:ACCR', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'US00431N1081', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Company Announcement', 'scheme': None, 'label': None}],
'title': 'Access-Power & Co., Inc. is pleased to announce the Company has hired Ben Borgers as its PCAOB/CPA Auditor',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Access-Power & Co., Inc. is pleased to announce the Company has hired Ben Borgers as its PCAOB/CPA Auditor'},
'summary': '<p>GRAND HAVEN, Mich., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Access-Power & Co., Inc., (“ACCR or the Company”), a Grand Haven based diversified Company that is now also a soon to be International Marijuana/Hemp Company, is pleased to announce today that the Company has hired Ben Borgers as our Company PCAOB/AUDITOR<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p>GRAND HAVEN, Mich., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Access-Power & Co., Inc., (“ACCR or the Company”), a Grand Haven based diversified Company that is now also a soon to be International Marijuana/Hemp Company, is pleased to announce today that the Company has hired Ben Borgers as our Company PCAOB/AUDITOR<br /></p>'},
'published': 'Fri, 19 Feb 2021 22:57 GMT',
'dc_identifier': '2179132',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Access-Power, Inc.'}], 'dc_modified': 'Fri, 19 Feb 2021 22:57 GMT',
'dc_keyword': 'marijuana'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179131/0/en/Photo-Release-Huntington-Ingalls-Industries-Awarded-2-9-Billion-Contract-To-Execute-USS-John-C-Stennis-CVN-74-Refueling-and-Complex-Overhaul.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179131/0/en/Photo-Release-Huntington-Ingalls-Industries-Awarded-2-9-Billion-Contract-To-Execute-USS-John-C-Stennis-CVN-74-Refueling-and-Complex-Overhaul.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179131/0/en/Photo-Release-Huntington-Ingalls-Industries-Awarded-2-9-Billion-Contract-To-Execute-USS-John-C-Stennis-CVN-74-Refueling-and-Complex-Overhaul.html'}],
'tags': [{'term': 'NYSE:HII', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'US4464131063', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Business Contracts', 'scheme': None, 'label': None}],
'title': 'Photo Release — Huntington Ingalls Industries Awarded $2.9 Billion Contract To Execute USS John C. Stennis (CVN 74) Refueling and Complex Overhaul',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Photo Release — Huntington Ingalls Industries Awarded $2.9 Billion Contract To Execute USS John C. Stennis (CVN 74) Refueling and Complex Overhaul'},
'summary': '<p align="left">NEWPORT NEWS, Va., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Huntington Ingalls Industries (NYSE:HII) announced today that its Newport News Shipbuilding division has been awarded a $2.9 billion contract for the refueling and complex overhaul (RCOH) of the nuclear-powered aircraft carrier USS <em>John C. Stennis</em> (CVN 74).<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="left">NEWPORT NEWS, Va., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Huntington Ingalls Industries (NYSE:HII) announced today that its Newport News Shipbuilding division has been awarded a $2.9 billion contract for the refueling and complex overhaul (RCOH) of the nuclear-powered aircraft carrier USS <em>John C. Stennis</em> (CVN 74).<br /></p>'},
'published': 'Fri, 19 Feb 2021 22:55 GMT',
'dc_identifier': '2179131',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Huntington Ingalls Industries, Inc.'}], 'dc_modified': 'Fri, 19 Feb 2021 22:55 GMT',
'dc_keyword': 'SHIPBUILDING'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179128/0/en/CPS-to-Host-Conference-Call-on-Fourth-Quarter-2020-Earnings.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179128/0/en/CPS-to-Host-Conference-Call-on-Fourth-Quarter-2020-Earnings.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179128/0/en/CPS-to-Host-Conference-Call-on-Fourth-Quarter-2020-Earnings.html'}],
'tags': [{'term': 'Nasdaq:CPSS', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'US2105021008', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Calendar of Events', 'scheme': None, 'label': None}],
'title': 'CPS to Host Conference Call on Fourth Quarter 2020 Earnings',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'CPS to Host Conference Call on Fourth Quarter 2020 Earnings'},
'summary': '<p align="justify">LAS VEGAS, Nevada, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Consumer Portfolio Services, Inc. (Nasdaq: CPSS) (“CPS” or the “Company”) today announced that it will hold a conference call on Wednesday, February 24, 2021 at 1:00 p.m. ET to discuss its fourth quarter 2020 operating results. Those wishing to participate by telephone may dial-in at 877 312-5502, or 253 237-1131 for international participants, approximately 10 minutes prior to the scheduled time. The conference identification number is 3998868.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="justify">LAS VEGAS, Nevada, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Consumer Portfolio Services, Inc. (Nasdaq: CPSS) (“CPS” or the “Company”) today announced that it will hold a conference call on Wednesday, February 24, 2021 at 1:00 p.m. ET to discuss its fourth quarter 2020 operating results. Those wishing to participate by telephone may dial-in at 877 312-5502, or 253 237-1131 for international participants, approximately 10 minutes prior to the scheduled time. The conference identification number is 3998868.<br /></p>'},
'published': 'Fri, 19 Feb 2021 22:31 GMT',
'dc_identifier': '2179128',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Consumer Portfolio Services, Inc.'}], 'dc_modified': 'Fri, 19 Feb 2021 22:32 GMT'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179126/0/en/DCP-Midstream-Files-Form-10-K-for-Fiscal-Year-2020.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179126/0/en/DCP-Midstream-Files-Form-10-K-for-Fiscal-Year-2020.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179126/0/en/DCP-Midstream-Files-Form-10-K-for-Fiscal-Year-2020.html'}],
'tags': [{'term': 'NYSE:DCP', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'US23311P1003', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Company Regulatory Filings', 'scheme': None, 'label': None}],
'title': 'DCP Midstream Files Form 10-K for Fiscal Year 2020',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'DCP Midstream Files Form 10-K for Fiscal Year 2020'},
'summary': '<p align="justify">DENVER, Feb. 19, 2021 (GLOBE NEWSWIRE) -- DCP Midstream, LP (NYSE: DCP) has filed its Form 10-K for the fiscal year ended December 31, 2020 with the Securities and Exchange Commission. A copy of the Form 10-K, which contains our audited financial statements, is available on the investor section of our website at www.dcpmidstream.com. Investors may request a hardcopy of the Form 10-K free of charge by sending a request to the office of the Corporate Secretary of DCP Midstream at 370 17th Street, Suite 2500, Denver, Colorado 80202.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="justify">DENVER, Feb. 19, 2021 (GLOBE NEWSWIRE) -- DCP Midstream, LP (NYSE: DCP) has filed its Form 10-K for the fiscal year ended December 31, 2020 with the Securities and Exchange Commission. A copy of the Form 10-K, which contains our audited financial statements, is available on the investor section of our website at www.dcpmidstream.com. Investors may request a hardcopy of the Form 10-K free of charge by sending a request to the office of the Corporate Secretary of DCP Midstream at 370 17th Street, Suite 2500, Denver, Colorado 80202.<br /></p>'},
'published': 'Fri, 19 Feb 2021 22:30 GMT',
'dc_identifier': '2179126',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'DCP Midstream LP'}], 'dc_modified': 'Fri, 19 Feb 2021 22:30 GMT',
'dc_keyword': 'DCP'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179124/0/en/IMVT-SHAREHOLDER-ALERT-Class-Action-Filed-On-Behalf-Of-Immunovant-Inc-Investors-IMVT-Investors-Who-Have-Suffered-Losses-Greater-Than-100-000-Encouraged-To-Contact-Kehoe-Law-Firm-P-.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179124/0/en/IMVT-SHAREHOLDER-ALERT-Class-Action-Filed-On-Behalf-Of-Immunovant-Inc-Investors-IMVT-Investors-Who-Have-Suffered-Losses-Greater-Than-100-000-Encouraged-To-Contact-Kehoe-Law-Firm-P-.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179124/0/en/IMVT-SHAREHOLDER-ALERT-Class-Action-Filed-On-Behalf-Of-Immunovant-Inc-Investors-IMVT-Investors-Who-Have-Suffered-Losses-Greater-Than-100-000-Encouraged-To-Contact-Kehoe-Law-Firm-P-.html'}],
'title': 'IMVT SHAREHOLDER ALERT - Class Action Filed On Behalf Of Immunovant, Inc. Investors – IMVT Investors Who Have Suffered Losses Greater Than $100,000 Encouraged To Contact Kehoe Law Firm, P.C.',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'IMVT SHAREHOLDER ALERT - Class Action Filed On Behalf Of Immunovant, Inc. Investors – IMVT Investors Who Have Suffered Losses Greater Than $100,000 Encouraged To Contact Kehoe Law Firm, P.C.'},
'summary': '<p>PHILADELPHIA, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Kehoe Law Firm, P.C. is <a href="https://www.globenewswire.com/Tracker?data=NA_ynwzgd31tMf82SFwLhYRzsqc6AywxWUD1oB6y9fnzBRcDFrseBWpUbmyd-QOkTNyZrEww1D_bGG5U5H1pr-FeX0sKnmrMxWmDcCUh7qXFatQobNrYQZYG_4sO60HRyb8aWMbwK1OIZNNhXi_p1KJmsfpOsL1QD9-5X38zy9WNZHOu6cgBQb1zQaoU8ovn" rel="nofollow" target="_blank" title="investigating potential securities claims">investigating potential securities claims</a> on behalf of investors of <strong>Immunovant, Inc., f/k/a Health Sciences Acquisitions Corporation, (“Immunovant” or the “Company”) (</strong><a href="https://www.globenewswire.com/Tracker?data=i5Efr8S7-yAz3qi9QXG04gxvDUvjGz-5hqOxmR7DTrA_FZdTSpoHDkt2Upgq2Oa8oUAE_TZ4S9uyy5zok1Pt4Sr0N12_6ecpTSycbbx_0LEcD6MlONga9wV0-ANsvKDx" rel="nofollow" target="_blank" title="">NASDAQ: IMVT</a><strong>)\xa0</strong><strong>who purchased, or otherwise acquired, IMVT securities between October 2, 2019 and February 1, 2021, both dates inclusive (the “Class Period).\xa0\xa0</strong><br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p>PHILADELPHIA, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Kehoe Law Firm, P.C. is <a href="https://www.globenewswire.com/Tracker?data=NA_ynwzgd31tMf82SFwLhYRzsqc6AywxWUD1oB6y9fnzBRcDFrseBWpUbmyd-QOkTNyZrEww1D_bGG5U5H1pr-FeX0sKnmrMxWmDcCUh7qXFatQobNrYQZYG_4sO60HRyb8aWMbwK1OIZNNhXi_p1KJmsfpOsL1QD9-5X38zy9WNZHOu6cgBQb1zQaoU8ovn" rel="nofollow" target="_blank" title="investigating potential securities claims">investigating potential securities claims</a> on behalf of investors of <strong>Immunovant, Inc., f/k/a Health Sciences Acquisitions Corporation, (“Immunovant” or the “Company”) (</strong><a href="https://www.globenewswire.com/Tracker?data=i5Efr8S7-yAz3qi9QXG04gxvDUvjGz-5hqOxmR7DTrA_FZdTSpoHDkt2Upgq2Oa8oUAE_TZ4S9uyy5zok1Pt4Sr0N12_6ecpTSycbbx_0LEcD6MlONga9wV0-ANsvKDx" rel="nofollow" target="_blank" title="">NASDAQ: IMVT</a><strong>)\xa0</strong><strong>who purchased, or otherwise acquired, IMVT securities between October 2, 2019 and February 1, 2021, both dates inclusive (the “Class Period).\xa0\xa0</strong><br /></p>'},
'published': 'Fri, 19 Feb 2021 22:23 GMT',
'dc_identifier': '2179124',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Kehoe Law Firm'}], 'dc_modified': 'Fri, 19 Feb 2021 22:23 GMT',
'tags': [{'term': 'Class Action', 'scheme': None, 'label': None},
{'term': 'Company Announcement', 'scheme': None, 'label': None}], 'dc_keyword': 'Class Action'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179123/0/en/Gainey-McKenna-Egleston-Announces-A-Class-Action-Lawsuit-Has-Been-Filed-Against-fuboTV-Inc-FUBO.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179123/0/en/Gainey-McKenna-Egleston-Announces-A-Class-Action-Lawsuit-Has-Been-Filed-Against-fuboTV-Inc-FUBO.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179123/0/en/Gainey-McKenna-Egleston-Announces-A-Class-Action-Lawsuit-Has-Been-Filed-Against-fuboTV-Inc-FUBO.html'}],
'title': 'Gainey McKenna & Egleston Announces A Class Action Lawsuit Has Been Filed Against fuboTV Inc. (FUBO)',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Gainey McKenna & Egleston Announces A Class Action Lawsuit Has Been Filed Against fuboTV Inc. (FUBO)'},
'summary': '<p align="justify">NEW YORK, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Gainey McKenna & Egleston announces that a class action lawsuit has been filed against fuboTV Inc. (“fuboTV” or the “Company”) (NYSE: FUBO) in the United States District Court for the Southern District of New York on behalf of those who purchased or acquired the securities of fuboTV between March 23, 2020 and January 4, 2021, inclusive (the “Class Period”). The lawsuit seeks to recover damages for investors under the federal securities laws.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="justify">NEW YORK, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Gainey McKenna & Egleston announces that a class action lawsuit has been filed against fuboTV Inc. (“fuboTV” or the “Company”) (NYSE: FUBO) in the United States District Court for the Southern District of New York on behalf of those who purchased or acquired the securities of fuboTV between March 23, 2020 and January 4, 2021, inclusive (the “Class Period”). The lawsuit seeks to recover damages for investors under the federal securities laws.<br /></p>'},
'published': 'Fri, 19 Feb 2021 22:14 GMT',
'dc_identifier': '2179123',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Gainey McKenna & Egleston'}], 'dc_modified': 'Fri, 19 Feb 2021 22:14 GMT',
'tags': [{'term': 'Class Action', 'scheme': None, 'label': None},
{'term': 'Law & Legal Issues', 'scheme': None, 'label': None}], 'dc_keyword': 'Class Action'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179122/0/en/Mammoth-Energy-Announces-Timing-of-4Q-and-Full-Year-2020-Earnings-Release.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179122/0/en/Mammoth-Energy-Announces-Timing-of-4Q-and-Full-Year-2020-Earnings-Release.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179122/0/en/Mammoth-Energy-Announces-Timing-of-4Q-and-Full-Year-2020-Earnings-Release.html'}],
'tags': [{'term': 'Nasdaq:TUSK', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'US56155L1089', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Calendar of Events', 'scheme': None, 'label': None}],
'title': 'Mammoth Energy Announces Timing of 4Q and Full Year 2020 Earnings Release',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Mammoth Energy Announces Timing of 4Q and Full Year 2020 Earnings Release'},
'summary': '<p align="left">OKLAHOMA CITY, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Mammoth Energy Services, Inc. (“Mammoth”) (NASDAQ:TUSK) today announced that it intends to release financial results for the fourth quarter and full year of 2020 after the market close on February 25, 2021.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="left">OKLAHOMA CITY, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Mammoth Energy Services, Inc. (“Mammoth”) (NASDAQ:TUSK) today announced that it intends to release financial results for the fourth quarter and full year of 2020 after the market close on February 25, 2021.<br /></p>'},
'published': 'Fri, 19 Feb 2021 22:05 GMT',
'dc_identifier': '2179122',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Mammoth Energy Services, Inc.'}], 'dc_modified': 'Fri, 19 Feb 2021 22:05 GMT'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179120/0/en/ReWalk-Robotics-Announces-40-0-Million-Private-Placement-Priced-At-the-Market.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179120/0/en/ReWalk-Robotics-Announces-40-0-Million-Private-Placement-Priced-At-the-Market.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179120/0/en/ReWalk-Robotics-Announces-40-0-Million-Private-Placement-Priced-At-the-Market.html'}],
'tags': [{'term': 'Nasdaq:RWLK', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'IL0011331076', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Press releases', 'scheme': None, 'label': None}],
'title': 'ReWalk Robotics Announces $40.0 Million Private Placement Priced At-the-Market',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'ReWalk Robotics Announces $40.0 Million Private Placement Priced At-the-Market'},
'summary': '<p align="left">MARLBOROUGH, Mass. and BERLIN and YOKNEAM ILIT, Israel, Feb. 19, 2021 (GLOBE NEWSWIRE) -- ReWalk Robotics Ltd. (Nasdaq: RWLK) (“ReWalk” or the “Company”) today announced that it has entered into securities purchase agreements with certain institutional and other accredited investors to raise $40.0 million through the issuance of 10,921,502 ordinary shares and warrants to purchase up to 5,460,751 ordinary shares, at a purchase price of $3.6625 per share and associated warrant, in a private placement priced “at-the-market” under Nasdaq rules. The warrants will have a term of five and one-half years, be exercisable immediately following the issuance date and have an exercise price of $3.60 per ordinary share.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="left">MARLBOROUGH, Mass. and BERLIN and YOKNEAM ILIT, Israel, Feb. 19, 2021 (GLOBE NEWSWIRE) -- ReWalk Robotics Ltd. (Nasdaq: RWLK) (“ReWalk” or the “Company”) today announced that it has entered into securities purchase agreements with certain institutional and other accredited investors to raise $40.0 million through the issuance of 10,921,502 ordinary shares and warrants to purchase up to 5,460,751 ordinary shares, at a purchase price of $3.6625 per share and associated warrant, in a private placement priced “at-the-market” under Nasdaq rules. The warrants will have a term of five and one-half years, be exercisable immediately following the issuance date and have an exercise price of $3.60 per ordinary share.<br /></p>'},
'published': 'Fri, 19 Feb 2021 22:02 GMT',
'dc_identifier': '2179120',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'ReWalk Robotics Ltd.'}], 'dc_modified': 'Fri, 19 Feb 2021 22:03 GMT'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179113/0/en/ROSEN-RESPECTED-INVESTOR-COUNSEL-Continues-its-Investigation-of-Breaches-of-Fiduciary-Duties-by-Management-of-JELD-WEN-Holding-Inc-JELD.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179113/0/en/ROSEN-RESPECTED-INVESTOR-COUNSEL-Continues-its-Investigation-of-Breaches-of-Fiduciary-Duties-by-Management-of-JELD-WEN-Holding-Inc-JELD.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179113/0/en/ROSEN-RESPECTED-INVESTOR-COUNSEL-Continues-its-Investigation-of-Breaches-of-Fiduciary-Duties-by-Management-of-JELD-WEN-Holding-Inc-JELD.html'}],
'title': 'ROSEN, RESPECTED INVESTOR COUNSEL, Continues its Investigation of Breaches of Fiduciary Duties by Management of JELD-WEN Holding, Inc. – JELD',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'ROSEN, RESPECTED INVESTOR COUNSEL, Continues its Investigation of Breaches of Fiduciary Duties by Management of JELD-WEN Holding, Inc. – JELD'},
'summary': '<p align="justify">NEW YORK, Feb. 19, 2021 (GLOBE NEWSWIRE) -- <strong>WHY:\xa0</strong>Rosen Law Firm, a global investor rights law firm, continues to investigate potential breaches of fiduciary duties by management of JELD-WEN Holding, Inc. (NYSE: JELD) resulting from allegations that management may have issued materially misleading business information to the investing public.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="justify">NEW YORK, Feb. 19, 2021 (GLOBE NEWSWIRE) -- <strong>WHY:\xa0</strong>Rosen Law Firm, a global investor rights law firm, continues to investigate potential breaches of fiduciary duties by management of JELD-WEN Holding, Inc. (NYSE: JELD) resulting from allegations that management may have issued materially misleading business information to the investing public.<br /></p>'},
'published': 'Fri, 19 Feb 2021 22:00 GMT',
'dc_identifier': '2179113',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'The Rosen Law Firm PA'}], 'dc_modified': 'Fri, 19 Feb 2021 22:00 GMT',
'tags': [{'term': 'Class Action', 'scheme': None, 'label': None},
{'term': 'Law & Legal Issues', 'scheme': None, 'label': None}], 'dc_keyword': 'Class Action'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179117/0/en/Array-Technologies-Inc-Announces-Fourth-Quarter-Full-Year-2020-Earnings-Release-Date-and-Conference-Call.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179117/0/en/Array-Technologies-Inc-Announces-Fourth-Quarter-Full-Year-2020-Earnings-Release-Date-and-Conference-Call.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179117/0/en/Array-Technologies-Inc-Announces-Fourth-Quarter-Full-Year-2020-Earnings-Release-Date-and-Conference-Call.html'}],
'tags': [{'term': 'Nasdaq:ARRY', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'US04271T1007', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Calendar of Events', 'scheme': None, 'label': None}],
'title': 'Array Technologies, Inc. Announces Fourth Quarter & Full-Year 2020 Earnings Release Date and Conference Call',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Array Technologies, Inc. Announces Fourth Quarter & Full-Year 2020 Earnings Release Date and Conference Call'},
'summary': '<p align="left">ALBUQUERQUE, N.M., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Array Technologies, Inc. (the “Company” or “Array”) (Nasdaq: ARRY) today announced that the company will release its fourth quarter and full-year 2020 results after the market close on Tuesday, March 9<sup>th</sup>, 2021, to be followed by a conference call at 5:00 p.m. (Eastern Time) on the same day.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="left">ALBUQUERQUE, N.M., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Array Technologies, Inc. (the “Company” or “Array”) (Nasdaq: ARRY) today announced that the company will release its fourth quarter and full-year 2020 results after the market close on Tuesday, March 9<sup>th</sup>, 2021, to be followed by a conference call at 5:00 p.m. (Eastern Time) on the same day.<br /></p>'},
'published': 'Fri, 19 Feb 2021 22:00 GMT',
'dc_identifier': '2179117',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Array Technologies, Inc.'}], 'dc_modified': 'Fri, 19 Feb 2021 22:00 GMT'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179112/0/en/HV-Bancorp-Inc-Reports-Record-Results-for-the-Quarter-and-Year-Ended-December-31-2020.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179112/0/en/HV-Bancorp-Inc-Reports-Record-Results-for-the-Quarter-and-Year-Ended-December-31-2020.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179112/0/en/HV-Bancorp-Inc-Reports-Record-Results-for-the-Quarter-and-Year-Ended-December-31-2020.html'}],
'tags': [{'term': 'Nasdaq:HVBC', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'US40441H1059', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Earnings Releases and Operating Results', 'scheme': None, 'label': None}],
'title': 'HV Bancorp, Inc. Reports Record Results for the Quarter and Year Ended December 31, 2020',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'HV Bancorp, Inc. Reports Record Results for the Quarter and Year Ended December 31, 2020'},
'summary': '<p align="left">DOYLESTOWN, Pa., Feb. 19, 2021 (GLOBE NEWSWIRE) -- HV Bancorp, Inc. (the “Company”) (Nasdaq Capital Market: HVBC), the holding company of Huntingdon Valley Bank (the “Bank”), reported results for the Company for the quarter ended December 31, 2020. \xa0At quarter end, the Company held total assets of $861.6 million (143.0% over prior year), total deposits of $730.8 million (157.5% increase over prior year) and total equity of $38.9 million (15.8% increase over prior year). \xa0Highlights in the quarter include a record 895% growth in net earnings over the same period in 2019 of $2.1 million, or $1.02 per basic and diluted share, vs. net earnings of $207,000, or $0.10 per basic and diluted share in 2019. \xa0For the year ended December 31, 2020, net earnings increased 471% over the same period in 2019 of $5.8 million, or $2.84 per basic and diluted share vs. net income of $1.0 million, or $0.49 per basic and diluted share. \xa0For the quarter end December 31, 2020, ROA and ROE totaled 1.54% and 23.74%, respectively. \xa0Shareholders’ equity increased 15.8% from $33.6 million at December 31, 2019, to $38.9 million at December 31, 2020, which increased book value for the Company from $14.81 per share to $17.78 per share over the same period.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p align="left">DOYLESTOWN, Pa., Feb. 19, 2021 (GLOBE NEWSWIRE) -- HV Bancorp, Inc. (the “Company”) (Nasdaq Capital Market: HVBC), the holding company of Huntingdon Valley Bank (the “Bank”), reported results for the Company for the quarter ended December 31, 2020. \xa0At quarter end, the Company held total assets of $861.6 million (143.0% over prior year), total deposits of $730.8 million (157.5% increase over prior year) and total equity of $38.9 million (15.8% increase over prior year). \xa0Highlights in the quarter include a record 895% growth in net earnings over the same period in 2019 of $2.1 million, or $1.02 per basic and diluted share, vs. net earnings of $207,000, or $0.10 per basic and diluted share in 2019. \xa0For the year ended December 31, 2020, net earnings increased 471% over the same period in 2019 of $5.8 million, or $2.84 per basic and diluted share vs. net income of $1.0 million, or $0.49 per basic and diluted share. \xa0For the quarter end December 31, 2020, ROA and ROE totaled 1.54% and 23.74%, respectively. \xa0Shareholders’ equity increased 15.8% from $33.6 million at December 31, 2019, to $38.9 million at December 31, 2020, which increased book value for the Company from $14.81 per share to $17.78 per share over the same period.<br /></p>'},
'published': 'Fri, 19 Feb 2021 21:45 GMT',
'dc_identifier': '2179112',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'HV Bancorp, Inc.'}], 'dc_modified': 'Fri, 19 Feb 2021 21:45 GMT',
'dc_keyword': 'finance'},
{
'id': 'http://www.globenewswire.com/news-release/2021/02/19/2179108/0/en/Exterran-Corporation-Reschedules-Timing-of-Fourth-Quarter-2020-Earnings-Release-and-Conference-Call-Due-to-Inclement-Weather-and-Power-Outages-across-Southeast-Texas.html',
'guidislink': True,
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179108/0/en/Exterran-Corporation-Reschedules-Timing-of-Fourth-Quarter-2020-Earnings-Release-and-Conference-Call-Due-to-Inclement-Weather-and-Power-Outages-across-Southeast-Texas.html',
'links': [{'rel': 'alternate', 'type': 'text/html',
'href': 'http://www.globenewswire.com/news-release/2021/02/19/2179108/0/en/Exterran-Corporation-Reschedules-Timing-of-Fourth-Quarter-2020-Earnings-Release-and-Conference-Call-Due-to-Inclement-Weather-and-Power-Outages-across-Southeast-Texas.html'}],
'tags': [{'term': 'NYSE:EXTN', 'scheme': 'http://www.globenewswire.com/rss/stock', 'label': None},
{'term': 'US30227H1068', 'scheme': 'http://www.globenewswire.com/rss/ISIN', 'label': None},
{'term': 'Calendar of Events', 'scheme': None, 'label': None}],
'title': 'Exterran Corporation Reschedules Timing of Fourth Quarter 2020 Earnings Release and Conference Call Due to Inclement Weather and Power Outages across Southeast Texas',
'title_detail': {'type': 'text/plain', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': 'Exterran Corporation Reschedules Timing of Fourth Quarter 2020 Earnings Release and Conference Call Due to Inclement Weather and Power Outages across Southeast Texas'},
'summary': '<p>HOUSTON, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Exterran Corporation (NYSE: EXTN) (“Exterran” or the “Company”) today announced that it has rescheduled the timing of the release of its fourth quarter 2020 results due to the severe inclement weather causing power outages across Southeast Texas. The Company will now release earnings on Tuesday, March 2<sup>nd</sup>, 2021 before the market opens. The Company has rescheduled its conference call for Tuesday, March 2<sup>nd</sup>, 2021 at 10 a.m. Central Time to discuss the results. The call will be broadcast live over the Internet. Investors may participate either by phone or audio webcast.<br /></p>',
'summary_detail': {'type': 'text/html', 'language': None,
'base': 'http://www.globenewswire.com/RssFeed/country/United%20States/feedTitle/GlobeNewswire%20-%20News%20from%20United%20States',
'value': '<p>HOUSTON, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Exterran Corporation (NYSE: EXTN) (“Exterran” or the “Company”) today announced that it has rescheduled the timing of the release of its fourth quarter 2020 results due to the severe inclement weather causing power outages across Southeast Texas. The Company will now release earnings on Tuesday, March 2<sup>nd</sup>, 2021 before the market opens. The Company has rescheduled its conference call for Tuesday, March 2<sup>nd</sup>, 2021 at 10 a.m. Central Time to discuss the results. The call will be broadcast live over the Internet. Investors may participate either by phone or audio webcast.<br /></p>'},
'published': 'Fri, 19 Feb 2021 21:30 GMT',
'dc_identifier': '2179108',
'language': 'en', 'publisher': 'GlobeNewswire Inc.', 'publisher_detail': {'name': 'GlobeNewswire Inc.'},
'contributors': [{'name': 'Exterran Corporation'}], 'dc_modified': 'Fri, 19 Feb 2021 21:30 GMT'},
]
news_items_outputs = [
[{
'title': 'INVESTIGATION ALERT: Halper Sadeh LLP Investigates RP, CUB, CATM, CHNG; Shareholders Are Encouraged to Contact the Firm',
'summary': 'NEW YORK, Feb. 20, 2021 (GLOBE NEWSWIRE) -- Halper Sadeh LLP, a global investor rights law firm, continues to investigate the following companies:',
'published': 'Sat, 20 Feb 2021 15:53 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179148/0/en/INVESTIGATION-ALERT-Halper-Sadeh-LLP-Investigates-RP-CUB-CATM-CHNG-Shareholders-Are-Encouraged-to-Contact-the-Firm.html',
'keyword': 'Class Action', 'contributor': 'Halper Sadeh LLP', 'company': 'Halper Sadeh LLP', 'language': 'en',
'ticker': 'UNH.N', 'ticker_source': 'TRIT', 'yticker': 'UNH', 'ticker_normal': 'UNH US', 'exchange': 'N',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': -0.007954545454545457,
'provider': 'GlobeNewswire Inc.'}, {
'title': 'INVESTIGATION ALERT: Halper Sadeh LLP Investigates RP, CUB, CATM, CHNG; Shareholders Are Encouraged to Contact the Firm',
'summary': 'NEW YORK, Feb. 20, 2021 (GLOBE NEWSWIRE) -- Halper Sadeh LLP, a global investor rights law firm, continues to investigate the following companies:',
'published': 'Sat, 20 Feb 2021 15:53 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179148/0/en/INVESTIGATION-ALERT-Halper-Sadeh-LLP-Investigates-RP-CUB-CATM-CHNG-Shareholders-Are-Encouraged-to-Contact-the-Firm.html',
'keyword': 'Class Action', 'contributor': 'Halper Sadeh LLP', 'company': 'Halper Sadeh LLP', 'language': 'en',
'ticker': 'CATM.OQ', 'ticker_source': 'TRIT', 'yticker': 'CATM', 'ticker_normal': 'CATM US', 'exchange': 'OQ',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': -0.007954545454545457,
'provider': 'GlobeNewswire Inc.'}, {
'title': 'INVESTIGATION ALERT: Halper Sadeh LLP Investigates RP, CUB, CATM, CHNG; Shareholders Are Encouraged to Contact the Firm',
'summary': 'NEW YORK, Feb. 20, 2021 (GLOBE NEWSWIRE) -- Halper Sadeh LLP, a global investor rights law firm, continues to investigate the following companies:',
'published': 'Sat, 20 Feb 2021 15:53 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179148/0/en/INVESTIGATION-ALERT-Halper-Sadeh-LLP-Investigates-RP-CUB-CATM-CHNG-Shareholders-Are-Encouraged-to-Contact-the-Firm.html',
'keyword': 'Class Action', 'contributor': 'Halper Sadeh LLP', 'company': 'Halper Sadeh LLP', 'language': 'en',
'ticker': 'CHNG.OQ', 'ticker_source': 'TRIT', 'yticker': 'CHNG', 'ticker_normal': 'CHNG US', 'exchange': 'OQ',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': -0.007954545454545457,
'provider': 'GlobeNewswire Inc.'}, {
'title': 'INVESTIGATION ALERT: Halper Sadeh LLP Investigates RP, CUB, CATM, CHNG; Shareholders Are Encouraged to Contact the Firm',
'summary': 'NEW YORK, Feb. 20, 2021 (GLOBE NEWSWIRE) -- Halper Sadeh LLP, a global investor rights law firm, continues to investigate the following companies:',
'published': 'Sat, 20 Feb 2021 15:53 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179148/0/en/INVESTIGATION-ALERT-Halper-Sadeh-LLP-Investigates-RP-CUB-CATM-CHNG-Shareholders-Are-Encouraged-to-Contact-the-Firm.html',
'keyword': 'Class Action', 'contributor': 'Halper Sadeh LLP', 'company': 'Halper Sadeh LLP', 'language': 'en',
'ticker': 'CUB.N', 'ticker_source': 'TRIT', 'yticker': 'CUB', 'ticker_normal': 'CUB US', 'exchange': 'N',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': -0.007954545454545457,
'provider': 'GlobeNewswire Inc.'}, {
'title': 'INVESTIGATION ALERT: Halper Sadeh LLP Investigates RP, CUB, CATM, CHNG; Shareholders Are Encouraged to Contact the Firm',
'summary': 'NEW YORK, Feb. 20, 2021 (GLOBE NEWSWIRE) -- Halper Sadeh LLP, a global investor rights law firm, continues to investigate the following companies:',
'published': 'Sat, 20 Feb 2021 15:53 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179148/0/en/INVESTIGATION-ALERT-Halper-Sadeh-LLP-Investigates-RP-CUB-CATM-CHNG-Shareholders-Are-Encouraged-to-Contact-the-Firm.html',
'keyword': 'Class Action', 'contributor': 'Halper Sadeh LLP', 'company': 'Halper Sadeh LLP', 'language': 'en',
'ticker': 'NCR.N', 'ticker_source': 'TRIT', 'yticker': 'NCR', 'ticker_normal': 'NCR US', 'exchange': 'N',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': -0.007954545454545457,
'provider': 'GlobeNewswire Inc.'}, {
'title': 'INVESTIGATION ALERT: Halper Sadeh LLP Investigates RP, CUB, CATM, CHNG; Shareholders Are Encouraged to Contact the Firm',
'summary': 'NEW YORK, Feb. 20, 2021 (GLOBE NEWSWIRE) -- Halper Sadeh LLP, a global investor rights law firm, continues to investigate the following companies:',
'published': 'Sat, 20 Feb 2021 15:53 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179148/0/en/INVESTIGATION-ALERT-Halper-Sadeh-LLP-Investigates-RP-CUB-CATM-CHNG-Shareholders-Are-Encouraged-to-Contact-the-Firm.html',
'keyword': 'Class Action', 'contributor': 'Halper Sadeh LLP', 'company': 'Halper Sadeh LLP', 'language': 'en',
'ticker': 'RP.OQ', 'ticker_source': 'TRIT', 'yticker': 'RP', 'ticker_normal': 'RP US', 'exchange': 'OQ',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': -0.007954545454545457,
'provider': 'GlobeNewswire Inc.'}],
[{'title': 'LifeSave Transport Announces Hiring Push for Flight Nurses and Medics',
'summary': 'Emergency air medical services company announces new career opportunities for fight nurses and medics in Kansas and Nebraska Emergency air medical services company announces new career opportunities for fight nurses and medics in Kansas and Nebraska',
'published': 'Sat, 20 Feb 2021 05:59 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179147/0/en/LifeSave-Transport-Announces-Hiring-Push-for-Flight-Nurses-and-Medics.html',
'keyword': None, 'contributor': 'Air Methods', 'company': 'Air Methods', 'language': 'en', 'ticker': 'GCCO.PK',
'ticker_source': 'TRIT', 'yticker': 'GCCO', 'ticker_normal': 'GCCO US', 'exchange': 'PK',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': -0.28435909090909095,
'provider': 'GlobeNewswire Inc.'}],
[{
'title': 'SHAREHOLDER ALERT BY FORMER LOUISIANA ATTORNEY GENERAL: KSF REMINDS PEN, QS, SWI INVESTORS of Lead Plaintiff Deadline in Class Action Lawsuits',
'summary': 'NEW ORLEANS, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Kahn Swick & Foti, LLC (“KSF”) and KSF partner, former Attorney General of Louisiana, Charles C. Foti, Jr., remind investors of pending deadlines in the following securities class action lawsuits:',
'published': 'Sat, 20 Feb 2021 03:50 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179145/0/en/SHAREHOLDER-ALERT-BY-FORMER-LOUISIANA-ATTORNEY-GENERAL-KSF-REMINDS-PEN-QS-SWI-INVESTORS-of-Lead-Plaintiff-Deadline-in-Class-Action-Lawsuits.html',
'keyword': 'Class Action', 'contributor': 'Kahn Swick & Foti, LLC', 'company': 'Kahn Swick & Foti, LLC',
'language': 'en', 'ticker': 'FMS.V', 'ticker_source': 'TRIT', 'yticker': 'FMS', 'ticker_normal': None,
'exchange': 'V', 'trading_session': 'pre-market', 'senti_method': 'txtblob_vader',
'senti_score': 0.10518636363636365, 'provider': 'GlobeNewswire Inc.'}],
[{
'title': 'SHAREHOLDER ALERT BY FORMER LOUISIANA ATTORNEY GENERAL: KSF REMINDS CLOV, IRTC INVESTORS of Lead Plaintiff Deadline in Class Action Lawsuits',
'summary': 'NEW ORLEANS, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Kahn Swick & Foti, LLC (“KSF”) and KSF partner, former Attorney General of Louisiana, Charles C. Foti, Jr., remind investors of pending deadlines in the following securities class action lawsuits:',
'published': 'Sat, 20 Feb 2021 03:50 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179146/0/en/SHAREHOLDER-ALERT-BY-FORMER-LOUISIANA-ATTORNEY-GENERAL-KSF-REMINDS-CLOV-IRTC-INVESTORS-of-Lead-Plaintiff-Deadline-in-Class-Action-Lawsuits.html',
'keyword': 'Class Action', 'contributor': 'Kahn Swick & Foti, LLC', 'company': 'Kahn Swick & Foti, LLC',
'language': 'en', 'ticker': 'CLOV.OQ', 'ticker_source': 'TRIT', 'yticker': 'CLOV', 'ticker_normal': 'CLOV US',
'exchange': 'OQ', 'trading_session': 'pre-market', 'senti_method': 'txtblob_vader',
'senti_score': 0.10518636363636365, 'provider': 'GlobeNewswire Inc.'}],
[{
'title': 'Rail Shippers Defeat BNSF, CSX, NS, and UP’s Attempts to Insulate Anticompetitive Conduct from Liability',
'summary': 'WASHINGTON, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Today, in the United States District Court for the District of Columbia, Judge Paul Friedman denied a motion by the defendant railroads BNSF, CSX, NS, and UP in In re Rail Freight Fuel Surcharge Antitrust Litigation (Case No. 07-489) to exclude certain evidence from future antitrust trials. The plaintiffs in this multidistrict litigation, which began as a class action and now comprises more than 200 of the country’s largest rail shippers, allege that the railroads unlawfully fixed prices through collusive fuel-surcharge programs and policies, beginning in 2003.',
'published': 'Sat, 20 Feb 2021 02:44 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179142/0/en/Rail-Shippers-Defeat-BNSF-CSX-NS-and-UP-s-Attempts-to-Insulate-Anticompetitive-Conduct-from-Liability.html',
'keyword': 'antitrust', 'contributor': 'Hausfeld', 'company': 'Hausfeld', 'language': 'en', 'ticker': 'CSX.OQ',
'ticker_source': 'TRIT', 'yticker': 'CSX', 'ticker_normal': 'CSX US', 'exchange': 'OQ',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': -0.08917142857142858,
'provider': 'GlobeNewswire Inc.'}],
[{'title': 'Ebix Shares Strong Business Outlook and Discusses Recent Events',
'summary': 'JOHNS CREEK, Ga., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Ebix, Inc. (NASDAQ: EBIX), a leading international supplier of On-Demand software and E-commerce services to the insurance, financial, healthcare and e-learning industries, today issued a press release to emphasize a strong current business outlook while discussing the auditor resignation, the income materiality of the issues highlighted, and the various related steps being taken by the Company.',
'published': 'Sat, 20 Feb 2021 00:05 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/20/2179136/0/en/Ebix-Shares-Strong-Business-Outlook-and-Discusses-Recent-Events.html',
'keyword': 'India', 'contributor': 'Ebix, Inc.', 'company': 'Ebix, Inc.', 'language': 'en', 'ticker': 'EBIX.OQ',
'ticker_source': 'TRIT', 'yticker': 'EBIX', 'ticker_normal': 'EBIX US', 'exchange': 'OQ',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': 0.1727111111111111,
'provider': 'GlobeNewswire Inc.'}],
[{'title': 'FSIS Recall Release 005-2021 - Without Inspection',
'summary': 'WASHINGTON, D.C., Feb. 19, 2021 (GLOBE NEWSWIRE) -- \xa0',
'published': 'Fri, 19 Feb 2021 23:31 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179134/0/en/FSIS-Recall-Release-005-2021-Without-Inspection.html',
'keyword': None, 'contributor': 'USDA Food Safety and Inspection Service',
'company': 'USDA Food Safety and Inspection Service', 'language': 'en', 'ticker': None, 'ticker_source': 'NA',
'yticker': None, 'ticker_normal': None, 'exchange': None, 'trading_session': 'pre-market',
'senti_method': 'txtblob_vader', 'senti_score': 0.0, 'provider': 'GlobeNewswire Inc.'}],
[{'title': 'Naropa University Celebrates 1st Black Futures Month',
'summary': 'Boulder, CO, Feb. 19, 2021 (GLOBE NEWSWIRE) -- “Let’s give ourselves the freedom and permission to follow our radical imaginations and visualize the world we deserve because in order to realize a society in which we have healthcare for all, a meaningful wage, self-determination, and true freedom, we have to first imagine it!” ~ Movement For Black Lives (m4bl.org)',
'published': 'Fri, 19 Feb 2021 23:17 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179133/0/en/Naropa-University-Celebrates-1st-Black-Futures-Month.html',
'keyword': None, 'contributor': 'Naropa University', 'company': 'Naropa University', 'language': 'en',
'ticker': None, 'ticker_source': 'NA', 'yticker': None, 'ticker_normal': None, 'exchange': None,
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': 0.5894291666666667,
'provider': 'GlobeNewswire Inc.'}],
[{
'title': 'Access-Power & Co., Inc. is pleased to announce the Company has hired Ben Borgers as its PCAOB/CPA Auditor',
'summary': 'GRAND HAVEN, Mich., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Access-Power & Co., Inc., (“ACCR or the Company”), a Grand Haven based diversified Company that is now also a soon to be International Marijuana/Hemp Company, is pleased to announce today that the Company has hired Ben Borgers as our Company PCAOB/AUDITOR',
'published': 'Fri, 19 Feb 2021 22:57 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179132/0/en/Access-Power-Co-Inc-is-pleased-to-announce-the-Company-has-hired-Ben-Borgers-as-its-PCAOB-CPA-Auditor.html',
'keyword': 'marijuana', 'contributor': 'Access-Power, Inc.', 'company': 'Access-Power, Inc.', 'language': 'en',
'ticker': 'ACCR.PK', 'ticker_source': 'TRIT', 'yticker': 'ACCR', 'ticker_normal': 'ACCR US', 'exchange': 'PK',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': 0.6193,
'provider': 'GlobeNewswire Inc.'}],
[{
'title': 'Photo Release — Huntington Ingalls Industries Awarded $2.9 Billion Contract To Execute USS John C. Stennis (CVN 74) Refueling and Complex Overhaul',
'summary': 'NEWPORT NEWS, Va., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Huntington Ingalls Industries (NYSE:HII) announced today that its Newport News Shipbuilding division has been awarded a $2.9 billion contract for the refueling and complex overhaul (RCOH) of the nuclear-powered aircraft carrier USS John C. Stennis (CVN 74).',
'published': 'Fri, 19 Feb 2021 22:55 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179131/0/en/Photo-Release-Huntington-Ingalls-Industries-Awarded-2-9-Billion-Contract-To-Execute-USS-John-C-Stennis-CVN-74-Refueling-and-Complex-Overhaul.html',
'keyword': 'SHIPBUILDING', 'contributor': 'Huntington Ingalls Industries, Inc.',
'company': 'Huntington Ingalls Industries, Inc.', 'language': 'en', 'ticker': 'HII.N', 'ticker_source': 'TRIT',
'yticker': 'HII', 'ticker_normal': 'HII US', 'exchange': 'N', 'trading_session': 'pre-market',
'senti_method': 'txtblob_vader', 'senti_score': 0.050949999999999995, 'provider': 'GlobeNewswire Inc.'}],
[{'title': 'CPS to Host Conference Call on Fourth Quarter 2020 Earnings',
'summary': 'LAS VEGAS, Nevada, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Consumer Portfolio Services, Inc. (Nasdaq: CPSS) (“CPS” or the “Company”) today announced that it will hold a conference call on Wednesday, February 24, 2021 at 1:00 p.m. ET to discuss its fourth quarter 2020 operating results. Those wishing to participate by telephone may dial-in at 877 312-5502, or 253 237-1131 for international participants, approximately 10 minutes prior to the scheduled time. The conference identification number is 3998868.',
'published': 'Fri, 19 Feb 2021 22:31 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179128/0/en/CPS-to-Host-Conference-Call-on-Fourth-Quarter-2020-Earnings.html',
'keyword': None, 'contributor': 'Consumer Portfolio Services, Inc.',
'company': 'Consumer Portfolio Services, Inc.', 'language': 'en', 'ticker': 'CPSS.OQ', 'ticker_source': 'TRIT',
'yticker': 'CPSS', 'ticker_normal': 'CPSS US', 'exchange': 'OQ', 'trading_session': 'pre-market',
'senti_method': 'txtblob_vader', 'senti_score': 0.09799999999999999, 'provider': 'GlobeNewswire Inc.'}],
[{'title': 'DCP Midstream Files Form 10-K for Fiscal Year 2020',
'summary': 'DENVER, Feb. 19, 2021 (GLOBE NEWSWIRE) -- DCP Midstream, LP (NYSE: DCP) has filed its Form 10-K for the fiscal year ended December 31, 2020 with the Securities and Exchange Commission. A copy of the Form 10-K, which contains our audited financial statements, is available on the investor section of our website at www.dcpmidstream.com. Investors may request a hardcopy of the Form 10-K free of charge by sending a request to the office of the Corporate Secretary of DCP Midstream at 370 17th Street, Suite 2500, Denver, Colorado 80202.',
'published': 'Fri, 19 Feb 2021 22:30 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179126/0/en/DCP-Midstream-Files-Form-10-K-for-Fiscal-Year-2020.html',
'keyword': 'DCP', 'contributor': 'DCP Midstream LP', 'company': 'DCP Midstream LP', 'language': 'en',
'ticker': 'DCP.N', 'ticker_source': 'TRIT', 'yticker': 'DCP', 'ticker_normal': 'DCP US', 'exchange': 'N',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': 0.43525,
'provider': 'GlobeNewswire Inc.'}],
[{
'title': 'IMVT SHAREHOLDER ALERT - Class Action Filed On Behalf Of Immunovant, Inc. Investors – IMVT Investors Who Have Suffered Losses Greater Than $100,000 Encouraged To Contact Kehoe Law Firm, P.C.',
'summary': 'PHILADELPHIA, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Kehoe Law Firm, P.C. is investigating potential securities claims on behalf of investors of Immunovant, Inc., f/k/a Health Sciences Acquisitions Corporation, (“Immunovant” or the “Company”) (NASDAQ: IMVT)\xa0who purchased, or otherwise acquired, IMVT securities between October 2, 2019 and February 1, 2021, both dates inclusive (the “Class Period).\xa0\xa0',
'published': 'Fri, 19 Feb 2021 22:23 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179124/0/en/IMVT-SHAREHOLDER-ALERT-Class-Action-Filed-On-Behalf-Of-Immunovant-Inc-Investors-IMVT-Investors-Who-Have-Suffered-Losses-Greater-Than-100-000-Encouraged-To-Contact-Kehoe-Law-Firm-P-.html',
'keyword': 'Class Action', 'contributor': 'Kehoe Law Firm', 'company': 'Kehoe Law Firm', 'language': 'en',
'ticker': 'IMVT.OQ', 'ticker_source': 'TRIT', 'yticker': 'IMVT', 'ticker_normal': 'IMVT US', 'exchange': 'OQ',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': 0.21334999999999998,
'provider': 'GlobeNewswire Inc.'}],
[{'title': 'Gainey McKenna & Egleston Announces A Class Action Lawsuit Has Been Filed Against fuboTV Inc. (FUBO)',
'summary': 'NEW YORK, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Gainey McKenna & Egleston announces that a class action lawsuit has been filed against fuboTV Inc. (“fuboTV” or the “Company”) (NYSE: FUBO) in the United States District Court for the Southern District of New York on behalf of those who purchased or acquired the securities of fuboTV between March 23, 2020 and January 4, 2021, inclusive (the “Class Period”). The lawsuit seeks to recover damages for investors under the federal securities laws.',
'published': 'Fri, 19 Feb 2021 22:14 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179123/0/en/Gainey-McKenna-Egleston-Announces-A-Class-Action-Lawsuit-Has-Been-Filed-Against-fuboTV-Inc-FUBO.html',
'keyword': 'Class Action', 'contributor': 'Gainey McKenna & Egleston', 'company': 'Gainey McKenna & Egleston',
'language': 'en', 'ticker': 'FUBO.N', 'ticker_source': 'TRIT', 'yticker': 'FUBO', 'ticker_normal': 'FUBO US',
'exchange': 'N', 'trading_session': 'pre-market', 'senti_method': 'txtblob_vader',
'senti_score': 0.11059090909090909, 'provider': 'GlobeNewswire Inc.'}],
[{'title': 'Mammoth Energy Announces Timing of 4Q and Full Year 2020 Earnings Release',
'summary': 'OKLAHOMA CITY, Feb. 19, 2021 (GLOBE NEWSWIRE) -- Mammoth Energy Services, Inc. (“Mammoth”) (NASDAQ:TUSK) today announced that it intends to release financial results for the fourth quarter and full year of 2020 after the market close on February 25, 2021.',
'published': 'Fri, 19 Feb 2021 22:05 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179122/0/en/Mammoth-Energy-Announces-Timing-of-4Q-and-Full-Year-2020-Earnings-Release.html',
'keyword': None, 'contributor': 'Mammoth Energy Services, Inc.', 'company': 'Mammoth Energy Services, Inc.',
'language': 'en', 'ticker': 'TUSK.OQ', 'ticker_source': 'TRIT', 'yticker': 'TUSK', 'ticker_normal': 'TUSK US',
'exchange': 'OQ', 'trading_session': 'pre-market', 'senti_method': 'txtblob_vader',
'senti_score': 0.19493333333333332, 'provider': 'GlobeNewswire Inc.'}],
[{'title': 'ReWalk Robotics Announces $40.0 Million Private Placement Priced At-the-Market',
'summary': 'MARLBOROUGH, Mass. and BERLIN and YOKNEAM ILIT, Israel, Feb. 19, 2021 (GLOBE NEWSWIRE) -- ReWalk Robotics Ltd. (Nasdaq: RWLK) (“ReWalk” or the “Company”) today announced that it has entered into securities purchase agreements with certain institutional and other accredited investors to raise $40.0 million through the issuance of 10,921,502 ordinary shares and warrants to purchase up to 5,460,751 ordinary shares, at a purchase price of $3.6625 per share and associated warrant, in a private placement priced “at-the-market” under Nasdaq rules. The warrants will have a term of five and one-half years, be exercisable immediately following the issuance date and have an exercise price of $3.60 per ordinary share.',
'published': 'Fri, 19 Feb 2021 22:02 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179120/0/en/ReWalk-Robotics-Announces-40-0-Million-Private-Placement-Priced-At-the-Market.html',
'keyword': None, 'contributor': 'ReWalk Robotics Ltd.', 'company': 'ReWalk Robotics Ltd.', 'language': 'en',
'ticker': 'RWLK.OQ', 'ticker_source': 'TRIT', 'yticker': 'RWLK', 'ticker_normal': 'RWLK US', 'exchange': 'OQ',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': 0.4049061224489796,
'provider': 'GlobeNewswire Inc.'}],
[{
'title': 'ROSEN, RESPECTED INVESTOR COUNSEL, Continues its Investigation of Breaches of Fiduciary Duties by Management of JELD-WEN Holding, Inc. – JELD',
'summary': 'NEW YORK, Feb. 19, 2021 (GLOBE NEWSWIRE) -- WHY:\xa0Rosen Law Firm, a global investor rights law firm, continues to investigate potential breaches of fiduciary duties by management of JELD-WEN Holding, Inc. (NYSE: JELD) resulting from allegations that management may have issued materially misleading business information to the investing public.',
'published': 'Fri, 19 Feb 2021 22:00 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179113/0/en/ROSEN-RESPECTED-INVESTOR-COUNSEL-Continues-its-Investigation-of-Breaches-of-Fiduciary-Duties-by-Management-of-JELD-WEN-Holding-Inc-JELD.html',
'keyword': 'Class Action', 'contributor': 'The Rosen Law Firm PA', 'company': 'The Rosen Law Firm PA',
'language': 'en', 'ticker': 'JELD.N', 'ticker_source': 'TRIT', 'yticker': 'JELD', 'ticker_normal': 'JELD US',
'exchange': 'N', 'trading_session': 'pre-market', 'senti_method': 'txtblob_vader',
'senti_score': -0.22291969696969696, 'provider': 'GlobeNewswire Inc.'}],
[{
'title': 'Array Technologies, Inc. Announces Fourth Quarter & Full-Year 2020 Earnings Release Date and Conference Call',
'summary': 'ALBUQUERQUE, N.M., Feb. 19, 2021 (GLOBE NEWSWIRE) -- Array Technologies, Inc. (the “Company” or “Array”) (Nasdaq: ARRY) today announced that the company will release its fourth quarter and full-year 2020 results after the market close on Tuesday, March 9th, 2021, to be followed by a conference call at 5:00 p.m. (Eastern Time) on the same day.',
'published': 'Fri, 19 Feb 2021 22:00 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179117/0/en/Array-Technologies-Inc-Announces-Fourth-Quarter-Full-Year-2020-Earnings-Release-Date-and-Conference-Call.html',
'keyword': None, 'contributor': 'Array Technologies, Inc.', 'company': 'Array Technologies, Inc.',
'language': 'en', 'ticker': 'ARRY.OQ', 'ticker_source': 'TRIT', 'yticker': 'ARRY', 'ticker_normal': 'ARRY US',
'exchange': 'OQ', 'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': 0.0,
'provider': 'GlobeNewswire Inc.'}],
[{'title': 'HV Bancorp, Inc. Reports Record Results for the Quarter and Year Ended December 31, 2020',
'summary': 'DOYLESTOWN, Pa., Feb. 19, 2021 (GLOBE NEWSWIRE) -- HV Bancorp, Inc. (the “Company”) (Nasdaq Capital Market: HVBC), the holding company of Huntingdon Valley Bank (the “Bank”), reported results for the Company for the quarter ended December 31, 2020. \xa0At quarter end, the Company held total assets of $861.6 million (143.0% over prior year), total deposits of $730.8 million (157.5% increase over prior year) and total equity of $38.9 million (15.8% increase over prior year). \xa0Highlights in the quarter include a record 895% growth in net earnings over the same period in 2019 of $2.1 million, or $1.02 per basic and diluted share, vs. net earnings of $207,000, or $0.10 per basic and diluted share in 2019. \xa0For the year ended December 31, 2020, net earnings increased 471% over the same period in 2019 of $5.8 million, or $2.84 per basic and diluted share vs. net income of $1.0 million, or $0.49 per basic and diluted share. \xa0For the quarter end December 31, 2020, ROA and ROE totaled 1.54% and 23.74%, respectively. \xa0Shareholders’ equity increased 15.8% from $33.6 million at December 31, 2019, to $38.9 million at December 31, 2020, which increased book value for the Company from $14.81 per share to $17.78 per share over the same period.',
'published': 'Fri, 19 Feb 2021 21:45 GMT',
'link': 'http://www.globenewswire.com/news-release/2021/02/19/2179112/0/en/HV-Bancorp-Inc-Reports-Record-Results-for-the-Quarter-and-Year-Ended-December-31-2020.html',
'keyword': 'finance', 'contributor': 'HV Bancorp, Inc.', 'company': 'HV Bancorp, Inc.', 'language': 'en',
'ticker': 'HVBC.OQ', 'ticker_source': 'TRIT', 'yticker': 'HVBC', 'ticker_normal': 'HVBC US', 'exchange': 'OQ',
'trading_session': 'pre-market', 'senti_method': 'txtblob_vader', 'senti_score': 0.4894,
'provider': 'GlobeNewswire Inc.'}],
]
|
"""Tests DataProcessor object serialization during dataset publication."""
import os
import shutil
from typing import Dict
import numpy as np
from mlops.dataset.data_processor import DataProcessor
from mlops.dataset.versioned_dataset_builder import VersionedDatasetBuilder
from mlops.dataset.versioned_dataset import VersionedDataset
TEST_DATASET_PATH = "sample_data/pokemon/trainvaltest"
TEST_PUBLICATION_PATH = "/tmp/test_serialization/datasets"
class DataProcessorThatWillChange(DataProcessor):
"""A DataProcessor subclass that will change, simulating a user redefining
how data should enter the model pipeline."""
def get_raw_features_and_labels(
self, dataset_path: str
) -> (Dict[str, np.ndarray], Dict[str, np.ndarray]):
"""Returns dummy features and labels
:param dataset_path: Unused.
:return: A 2-tuple of the features dictionary and labels dictionary,
with matching keys and ordered tensors.
"""
return ({"X": np.array([1, 2, 3])}, {"y": np.array([1, 2, 3])})
def get_raw_features(self, dataset_path: str) -> Dict[str, np.ndarray]:
"""Returns dummy features.
:param dataset_path: Unused.
:return: A dictionary whose values are feature tensors and whose
corresponding keys are the names by which those tensors should be
referenced. For example, the training features (value) may be
called 'X_train' (key).
"""
return {"X": np.array([1, 2, 3])}
def preprocess_features(
self, raw_feature_tensor: np.ndarray
) -> np.ndarray:
"""Returns features multiplied by 2.
:param raw_feature_tensor: The raw features to be preprocessed.
:return: The preprocessed feature tensor. This tensor is ready for
downstream model consumption.
"""
return 2 * raw_feature_tensor
def preprocess_labels(self, raw_label_tensor: np.ndarray) -> np.ndarray:
"""Returns labels multiplied by 2.
:param raw_label_tensor: The raw labels to be preprocessed.
:return: The preprocessed label tensor. This tensor is ready for
downstream model consumption.
"""
return 2 * raw_label_tensor
def _redefine_class() -> None:
"""Redefines DataProcessorThatWillChange."""
# pylint: disable=redefined-outer-name
# pylint: disable=global-statement
# pylint: disable=global-variable-undefined
# pylint: disable=global-variable-not-assigned
# pylint: disable=invalid-name
# pylint: disable=unused-variable
global DataProcessorThatWillChange
class DataProcessorThatWillChange(DataProcessor):
"""A DataProcessor subclass that will change, simulating a user
redefining how data should enter the model pipeline."""
def get_raw_features_and_labels(
self, dataset_path: str
) -> (Dict[str, np.ndarray], Dict[str, np.ndarray]):
"""Returns dummy features and labels
:param dataset_path: Unused.
:return: A 2-tuple of the features dictionary and labels
dictionary, with matching keys and ordered tensors.
"""
raise ValueError("The new implementation is different.")
def get_raw_features(self, dataset_path: str) -> Dict[str, np.ndarray]:
"""Returns dummy features.
:param dataset_path: Unused.
:return: A dictionary whose values are feature tensors and whose
corresponding keys are the names by which those tensors should
be referenced. For example, the training features (value) may
be called 'X_train' (key).
"""
raise ValueError("The new implementation is different.")
def preprocess_features(
self, raw_feature_tensor: np.ndarray
) -> np.ndarray:
"""Returns features multiplied by 2.
:param raw_feature_tensor: The raw features to be preprocessed.
:return: The preprocessed feature tensor. This tensor is ready for
downstream model consumption.
"""
raise ValueError("The new implementation is different.")
def preprocess_labels(
self, raw_label_tensor: np.ndarray
) -> np.ndarray:
"""Returns labels multiplied by 2.
:param raw_label_tensor: The raw labels to be preprocessed.
:return: The preprocessed label tensor. This tensor is ready for
downstream model consumption.
"""
raise ValueError("The new implementation is different.")
def test_serialized_data_processor_uses_original_methods() -> None:
"""Tests that the serialized data processor object obtained during dataset
publication uses the original methods, not those of the class definition at
the time of deserialization, which may be different."""
try:
shutil.rmtree(TEST_PUBLICATION_PATH)
except FileNotFoundError:
pass
data_processor = DataProcessorThatWillChange()
builder = VersionedDatasetBuilder(TEST_DATASET_PATH, data_processor)
builder.publish(TEST_PUBLICATION_PATH, version="v1")
dataset = VersionedDataset(os.path.join(TEST_PUBLICATION_PATH, "v1"))
features, labels = dataset.data_processor.get_raw_features_and_labels(
"dne"
)
assert np.array_equal(features["X"], [1, 2, 3])
assert np.array_equal(labels["y"], [1, 2, 3])
_redefine_class()
dataset = VersionedDataset(os.path.join(TEST_PUBLICATION_PATH, "v1"))
features, labels = dataset.data_processor.get_raw_features_and_labels(
"dne"
)
assert np.array_equal(features["X"], [1, 2, 3])
assert np.array_equal(labels["y"], [1, 2, 3])
|
class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
if len(A) != len(B):
return False
char_count = {}
indexes_to_swap = []
dup = False
for idx, string in enumerate(A):
curr_char_count = char_count.get(string, 0)
curr_char_count += 1
char_count[string] = curr_char_count
if (curr_char_count > 1):
dup = True
if string != B[idx]:
indexes_to_swap.append(idx)
if len(indexes_to_swap) > 2:
return False
if len(indexes_to_swap) == 1:
return False
if len(indexes_to_swap) == 2:
return A[indexes_to_swap[0]] == B[indexes_to_swap[1]] and A[indexes_to_swap[1]] == B[indexes_to_swap[0]]
return dup
|
import numpy
import os
import bz2
import gzip
import multiprocessing
import time
import zipfile
class scio:
def __init__(self,fname,arr=None,status='w',compress=None,diff=False):
if not(compress is None):
if len(compress)==0:
compress=None
self.fid=open(fname,status)
self.fname=fname
self.diff=diff
self.last=None
self.compress=compress
self.closed=False
if arr is None:
self.dtype=None
self.shape=None
self.initialized=False
else:
self.dtype=arr.dtype
self.shape=arr.shape
self.initialized=True
self.write_header(arr)
self.append(arr)
def __del__(self):
if self.closed==False:
print('closing scio file ' + self.fname)
self.fid.flush()
self.fid.close()
self.closed=True
if not(self.compress is None):
to_exec=self.compress + ' ' + self.fname
os.system(to_exec)
def close(self):
self.__del__()
def write_header(self,arr):
sz=arr.shape
myvec=numpy.zeros(len(sz)+2,dtype='int32')
myvec[0]=len(sz)
if self.diff:
myvec[0]=-1*myvec[0]
for i in range(len(sz)):
myvec[i+1]=sz[i]
myvec[-1]=dtype2int(arr)
myvec.tofile(self.fid)
def append(self,arr):
if self.initialized==False:
self.dtype=arr.dtype
self.shape=arr.shape
self.write_header(arr)
self.initialized=True
if (arr.shape==self.shape):
pass
else:
print("shape mismatch in scio.append")
if (arr.dtype==self.dtype):
if (self.diff):
if self.last is None:
arr_use=arr
else:
arr_use=arr-self.last
self.last=arr.copy()
else:
arr_use=arr
arr_use.tofile(self.fid)
self.fid.flush()
else:
print('dtype mismatch in scio.append on file ' + self.fname)
#def append(arr,fname,overwrite=False):
# asdf='abc'
# assert(type(fname)==type(asdf))
# asdf=numpy.zeros(2)
# assert(type(arr)==type(asdf))
# if overwrite:
# os.system('rm ' + fname)
#
# if (os.path.isfile(fname)):
# f=open(fname,'a')
# arr.tofile(f)
# f.close()
# else:
# print 'creating ' + fname
# f=open(fname,'w')
# sz=arr.shape
# myvec=numpy.zeros(len(sz)+2,dtype='int32')
# myvec[0]=len(sz)
# for i in range(len(sz)):
# myvec[i+1]=sz[i]
# myvec[-1]=dtype2int(arr)
# #print myvec
# #print sz
# #print type(myvec)
# myvec.tofile(f)
# arr.tofile(f)
# f.close()
def _read_from_string(mystr):
icur=0;
ndim=numpy.fromstring(mystr[icur:icur+4],dtype='int32')[0]
icur=icur+4
if (ndim<0):
diff=True
ndim=-1*ndim
else:
diff=False
#print 'ndim is ',ndim
sz=numpy.fromstring(mystr[icur:icur+4*ndim],'int32')
icur=icur+4*ndim
mytype=numpy.fromstring(mystr[icur:icur+4],'int32')[0]
icur=icur+4
#check for file size sanity
bytes_per_frame=int2nbyte(mytype)*numpy.product(sz)
cur_bytes=len(mystr)-icur
n_to_cut=numpy.remainder(cur_bytes,bytes_per_frame)
if n_to_cut>0:
#print 'current len: ',len(mystr)
print('We have a byte mismatch in reading scio file. Truncating ' + repr(n_to_cut) + ' bytes.')
mystr=mystr[:-n_to_cut]
#print 'new len: ',len(mystr)
vec=numpy.fromstring(mystr[icur:],dtype=int2dtype(mytype))
nmat=vec.size/numpy.product(sz)
new_sz=numpy.zeros(sz.size+1,dtype='int32')
new_sz[0]=nmat
new_sz[1:]=sz
mat=numpy.reshape(vec,new_sz)
if diff:
mat=numpy.cumsum(mat,0)
return mat
def _read_file_as_string(fname):
if fname[-4:]=='.bz2':
f=bz2.BZ2File(fname,'r')
mystr=f.read()
f.close()
return mystr
if fname[-3:]=='.gz':
f=gzip.GzipFile(fname,'r')
mystr=f.read()
f.close()
return mystr
#if we get here, assume it's raw binary
f=open(fname,'rb')
mystr=f.read()
f.close()
return mystr
def read_from_archive(fname,arcname,strict=False):
if isinstance(arcname,str):
f=zipfile.ZipFile(arcname)
else:
f=arcname
fname=f.namelist()[0]+fname
mystr=None
if fname in f.namelist():
mystr=f.read(fname)
elif fname+'.bz2' in f.namelist():
tmp=f.read(fname+'.bz2')
mystr=bz2.decompress(tmp)
elif fname+'.gz' in f.namelist():
tmp=f.read(fname+'.bz2')
mystr=gzip.decompress(tmp)
if mystr is None:
print(fname,' not found in ',arcname)
return None
return _read_from_string(mystr)
def read(fname,strict=False):
if True:
if strict:
#only read the filename passed in
mystr=_read_file_as_string(fname)
return _read_from_string(mystr)
else:
#try some guesses about what other sane filenames might be based on the input filename
fnames=[fname]
if fname[-4:]=='.bz2':
fnames.append(fname[:-4])
if fname[-3:]=='.gz':
fnames.append(fname[:-3])
fnames.append(fname+'.bz2')
fnames.append(fname+'.gz')
for fname in fnames:
try:
mystr=_read_file_as_string(fname)
if len(mystr)>0:
try: #try/except loop added by JLS 11 June 2019 to catch cases where string length is unexpected
return _read_from_string(mystr)
except:
print('File ',fname,' appears to be garbled when parsing string of length ',len(mystr))
return None
else:
return None
except:
pass
return None
if fname[-4:]=='.bz2':
return read_bz2(fname)
f=open(fname)
ndim=numpy.fromfile(f,'int32',1)
if (ndim<0):
diff=True
ndim=-1*ndim
else:
diff=False
sz=numpy.fromfile(f,'int32',ndim)
mytype=numpy.fromfile(f,'int32',1)
vec=numpy.fromfile(f,dtype=int2dtype(mytype))
nmat=vec.size/numpy.product(sz)
new_sz=numpy.zeros(sz.size+1,dtype='int32')
new_sz[0]=nmat
new_sz[1:]=sz
mat=numpy.reshape(vec,new_sz)
if diff:
mat=numpy.cumsum(mat,0)
return mat
def read_files(fnames,ncpu=0):
t1=time.time()
if ncpu==0:
ncpu=multiprocessing.cpu_count()
p=multiprocessing.Pool(ncpu)
data=p.map(read,fnames)
#without the p.terminate, the pool seems to last, which can cause the system to run out of processes.
#this isn't what the documentation says should happen (terminate is supposed to get called when p
#gets garbage collected), but oh well...
p.terminate()
t2=time.time()
#print 'took ',t2-t1, ' seconds to read files in scio.'
return data
def int2dtype(myint):
if (myint==8):
return 'float64'
if (myint==4):
return 'float32'
if (myint==-4):
return 'int32'
if (myint==-8):
return 'int64'
if (myint==-104):
return 'uint32'
if (myint==-108):
return 'uint64'
def int2nbyte(myint):
nbyte=numpy.abs(myint)
if nbyte>100:
nbyte=nbyte-100
return nbyte
def dtype2int(dtype_str):
if (type(dtype_str)!=numpy.dtype):
dtype_str=dtype_str.dtype
aa=numpy.zeros(1,dtype='float64')
if (dtype_str==aa.dtype):
return 8
aa=numpy.zeros(1,dtype='float32')
if (dtype_str==aa.dtype):
return 4
aa=numpy.zeros(1,dtype='int32')
if (dtype_str==aa.dtype):
return -4
aa=numpy.zeros(1,dtype='int64')
if (dtype_str==aa.dtype):
return -8
aa=numpy.zeros(1,dtype='uint32')
if (dtype_str==aa.dtype):
return -104
aa=numpy.zeros(1,dtype='uint64')
if (dtype_str==aa.dtype):
return -108
print('unknown dtype')
return 0
|
class ConsoleCancelEventArgs(EventArgs):
""" Provides data for the System.Console.CancelKeyPress event. This class cannot be inherited. """
Cancel = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets or sets a value indicating whether simultaneously pressing the System.ConsoleModifiers.Control modifier key and System.ConsoleKey.C console key (CTRL+C) terminates the current process.
Get: Cancel(self: ConsoleCancelEventArgs) -> bool
Set: Cancel(self: ConsoleCancelEventArgs)=value
"""
SpecialKey = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the combination of modifier and console keys that interrupted the current process.
Get: SpecialKey(self: ConsoleCancelEventArgs) -> ConsoleSpecialKey
"""
|
install = []
import os
import sys
try: from colorama import Fore, init
except: install.append("colorama")
# Trying to install the needed modules if there not already
if install:
to_install = " ".join(install)
os.system(sys.executable + " -m pip install " + to_install)
print("[STARTUP] INSTALLED MODULES: "+str(install))
quit()
# colors!
init()
################## Functions ######################################
def warn(text):
print("["+Fore.YELLOW+"WARN"+Fore.RESET+"] "+str(text))
def error(text):
print("["+Fore.RED+"ERROR"+Fore.RESET+"] "+str(text))
def done_task(text):
print("["+Fore.GREEN+"DONE"+Fore.RESET+"] "+str(text))
def info(text):
print("["+Fore.WHITE+"INFO"+Fore.RESET+"] "+str(text))
def critical(text):
print("["+Fore.RED+"CRITICAL"+Fore.RESET+"] "+str(text))
quit()
################## END LOGGING ###################################
|
from types import ClassMethodDescriptorType
import selenium
from bs4 import BeautifulSoup
import requests
import time
import random
import sys
id = sys.argv[1]
print(sys.argv[1])
url = f'https://github-readme-stats.vercel.app/api?username={id}&count_private=true'
langurl = f'https://github-readme-stats.vercel.app/api/top-langs/?username={id}'
header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_5_2) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Safari/605.1.15',
}
source = requests.get(url, headers=header).text
langsource = requests.get(langurl, headers=header).text
soup = BeautifulSoup(source, 'lxml')
langsoup = BeautifulSoup(langsource, 'lxml')
countlist = []
langlist = []
for field in soup.findAll('text', class_='stat'):
countlist.append(field.text)
for language in langsoup.findAll('text', class_='lang-name'):
langlist.append(language.text)
print(countlist)
print(langlist) |
#! /usr/bin/env python
from __future__ import print_function
import logging
import json
import os
import posixpath
import shutil
import sys
try:
from urllib import unquote
except Exception:
from urllib.parse import unquote
try:
from BaseHTTPServer import BaseHTTPRequestHandler
except ImportError:
from http.server import BaseHTTPRequestHandler
try:
import SocketServer as socketserver
except ImportError:
import socketserver
import _project
__version__ = '0.1'
class HelperHandler(BaseHTTPRequestHandler):
'''Based on SimpleHTTPRequestHandler'''
server_version = "HelperHTTP/" + __version__
def do_POST(self):
"""Serve a POST request."""
if self.path[1:] not in ('newProject', 'updateProject',
'buildProject', 'removeProject',
'queryProject', 'queryVersion',
'newLicense', 'removeLicense'):
self.send_error(404, "File not found")
return
n = int(self.headers.get('Content-Length', 0))
t = self.headers.get('Content-Type', 'text/json;charset=UTF-8')
if n == 0:
arguments = ''
else:
arguments = self.rfile.read(n).decode()
self.log_message("Arguments '%s'", arguments)
result = self.run_command(self.path[1:], arguments)
if result is not None:
response = json.dumps(result).encode()
self.send_response(200)
self.send_header("Content-type", "text/json")
self.send_header("Content-Length", str(len(response)))
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Last-Modified", self.date_time_string())
self.end_headers()
self.wfile.write(response)
else:
self.send_error(501, "Server internal error")
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
self.send_error(404, "File not found")
return None
# if os.path.basename(path) not in (
# 'bootstrap.min.css', 'bootstrap.min.js', 'jquery.min.js',
# 'pyarmor.js', 'index.html'):
# self.send_error(404, "File not found")
# return None
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def run_command(self, command, arguments):
try:
data = json.loads(arguments)
result = getattr(_project, command)(data)
errcode = 0
except Exception as e:
errcode = 1
result = "Unhandle Server Error: %s" % str(e)
logging.exception("Unhandle Server Error")
return dict(errcode=errcode, result=result)
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
extensions_map = {
'': 'application/octet-stream', # Default
'.css': 'text/css',
'.html': 'text/html',
'.js': 'application/x-javascript',
}
def main():
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(message)s',
)
try:
PORT = int(sys.argv[1])
except Exception:
PORT = 9096
server = socketserver.TCPServer(("", PORT), HelperHandler)
print("Serving HTTP on %s port %s ..." % server.server_address)
try:
from webbrowser import open_new_tab
open_new_tab("http://localhost:%d" % server.server_address[1])
except Exception:
pass
server.serve_forever()
if __name__ == '__main__':
main()
|
from . import drawer
class Screen(drawer.Screen):
def render_frame(self):
self.rendered_frame = ""
if self.border:
self.rendered_frame += self.default_symbol * (self.field_width + 2)
self.rendered_frame += "\n"
for row in range(self.field_height):
if self.border:
self.rendered_frame += self.default_symbol
for column in range(self.field_width):
self.rendered_frame += self.frame[(row * self.field_width) + column]
if self.border:
self.rendered_frame += self.default_symbol
self.rendered_frame += "\n"
if self.border:
self.rendered_frame += self.default_symbol * (self.field_width + 2)
def draw(self, objects):
self.frame = [" "] * (self.field_width * self.field_height)
for x in objects:
try:
self.frame[(x[1] * self.field_width) + x[0]] = x[2]
except IndexError:
self.frame.append(x[2])
self.render_frame()
|
from peak.util.addons import AddOn
import protocols
class IAliasProvider(protocols.Interface):
def get_alias(name, service, protocol=None):
pass
def set_alias(name, service, protocol, alias):
pass
class StubAliasProvider(AddOn):
protocols.advise(instancesProvide=(IAliasProvider,), asAdapterForTypes=(object,))
def get_alias(self, name, service, protocol=None):
return None #"Foo"
def set_alias(self, name, service, protocol, alias):
pass
|
# here are the main analytics functions
import random
def score_ideas(ideas):
"""
This is a function that assigns scores to the ideas.
Inputs:
ideas (list): a list of ideas to be scored.
Output:
ranked_ideas (list): a list of idea and score tuple ranked descending.
"""
ranked_ideas = []
# get score for each idea
for idea in ideas:
score = run_score(idea)
ranked_ideas.append([idea, score])
# rank
ranked_ideas = sorted(ranked_ideas, key=get_key, reverse=True)
return ranked_ideas
def run_score(idea):
"""
This function runs analysis and assigns a score to an idea.
Inputs:
idea (str): an idea to be score
Output:
score (int): a score between 0 and 100.
"""
# TODO: develop the scoring function
score = random.randint(0, 100)
return score
def get_key(item):
return item[1]
|
# terrascript/data/shell.py
import terrascript
class shell_script(terrascript.Data):
pass
__all__ = [
"shell_script",
]
|
# -*- coding: utf-8 -*-
""" Sahana Eden Staff Search Module Automated Tests
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class SearchStaff(SeleniumUnitTest):
def test_hrm002_search_staff_simple(self):
"""
@case: hrm002
@description: Search Staff - Simple Search
"""
print "\n"
self.login(account="admin", nexturl="hrm/staff/search")
self.browser.find_element_by_id("human_resource_search_simple").clear()
self.browser.find_element_by_id("human_resource_search_simple").send_keys("Mariana")
self.browser.find_element_by_css_selector("input[type=\"submit\"]").click()
def test_hrm002_search_staff_advance_by_organisation(self):
"""
@case: hrm002
@description: Advanced Search Staff by Organisation (Timor-Leste Red Cross Society (CVTL)
"""
print "\n"
self.login(account="admin", nexturl="hrm/staff/search")
self.browser.find_element_by_link_text("Advanced Search").click()
#self.browser.find_element_by_id("id-human_resource_search_org-3").click()
self.browser.find_element_by_xpath("//label[contains(text(),'Timor-Leste Red Cross Society')]").click()
# "Timor-Leste Red Cross Society"
self.browser.find_element_by_css_selector("input[type=\"submit\"]").click()
def test_hrm002_search_staff_advance_by_stateProvince(self):
"""
@case: hrm002
@description: Advanced Search Staff by State/Province (Dili & Viqueque)
"""
print "\n"
self.login(account="admin", nexturl="hrm/staff/search")
self.browser.find_element_by_link_text("Advanced Search").click()
self.browser.find_element_by_id("id-human_resource_search_L1-1").click()
self.browser.find_element_by_id("id-human_resource_search_L1-5").click()
self.browser.find_element_by_css_selector("input[type=\"submit\"]").click()
def test_hrm002_search_staff_advance_by_district(self):
"""
@case: hrm002
@description: Advanced Search Staff by County/District (Ainaro & Viqueque)
"""
print "\n"
self.login(account="admin", nexturl="hrm/staff/search")
self.browser.find_element_by_link_text("Advanced Search").click()
self.browser.find_element_by_id("id-human_resource_search_L2-0").click()
self.browser.find_element_by_id("id-human_resource_search_L2-5").click()
self.browser.find_element_by_css_selector("input[type=\"submit\"]").click()
def test_hrm002_search_staff_advance_by_facility(self):
"""
@case: hrm002
@description: Advanced Search Staff by Facility (Lospalos Branch Office (Office))
"""
print "\n"
self.login(account="admin", nexturl="hrm/staff/search")
self.browser.find_element_by_link_text("Advanced Search").click()
self.browser.find_element_by_id("id-human_resource_search_site-6").click()
self.browser.find_element_by_css_selector("input[type=\"submit\"]").click()
def test_hrm002_search_staff_advance_by_training(self):
"""
@case: hrm002
@description: Advanced Search Staff by Training (Basics of First Aid)
"""
print "\n"
self.login(account="admin", nexturl="hrm/staff/search")
self.browser.find_element_by_link_text("Advanced Search").click()
self.browser.find_element_by_id("id-human_resource_search_training-2").click()
self.browser.find_element_by_css_selector("input[type=\"submit\"]").click()
def test_hrm002_search_staff_advance_by_training_and_facility(self):
"""
@case: hrm002
@description: Advanced Search Staff by Training (Basics of First Aid)
"""
print "\n"
self.login(account="admin", nexturl="hrm/staff/search")
self.browser.find_element_by_link_text("Advanced Search").click()
self.browser.find_element_by_id("id-human_resource_search_site-6").click()
self.browser.find_element_by_id("id-human_resource_search_training-2").click()
self.browser.find_element_by_css_selector("input[type=\"submit\"]").click()
|
# Make me coffee!
# maybe someday we'll actually make this order coffee using a phone-connection service :D
from espresso.main import robot
@robot.respond(r'(?i)make (me )?(a )?coffee')
def make_coffee(res):
res.reply(res.msg.user, "http://dreamatico.com/data_images/coffee/coffee-3.jpg")
|
import yaml
import pprint
import os
def generate_cass_yaml():
with open('../res/cassandra.yaml') as inFile:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
cassandra_yaml = yaml.load(inFile, Loader=yaml.FullLoader)
del cassandra_yaml['listen_address']
del cassandra_yaml['rpc_address']
cassandra_yaml['listen_interface'] = 'ens33'
cassandra_yaml['rpc_interface'] = 'ens33'
cassandra_yaml['auto_snapshot'] = False
cassandra_yaml['seed_provider'][0]['parameters'][0]['seeds'] = "192.168.197.130"
with open('../stage/cassandra.yaml', 'w') as outFile:
documents = yaml.dump(cassandra_yaml, outFile)
generate_cass_yaml()
os.system("cp ~/CCM/CCM2/stage/cassandra.yaml ~/cassandra/conf/")
# CREATE KEYSPACE ycsb WITH REPLICATION = {'class' : 'SimpleStrategy', 'replication_factor': 1};
# CREATE TABLE ycsb.usertable ( y_id varchar primary key, field0 varchar); |
import time
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils_multi import *
def build_targets(pred_corners, target, num_keypoints, anchors, num_anchors, num_classes, nH, nW, noobject_scale,
object_scale, sil_thresh, seen):
nB = target.size(0)
nA = num_anchors
nC = num_classes
anchor_step = len(anchors) // num_anchors
conf_mask = torch.ones(nB, nA, nH, nW) * noobject_scale
coord_mask = torch.zeros(nB, nA, nH, nW)
cls_mask = torch.zeros(nB, nA, nH, nW)
txs = list()
tys = list()
for i in range(num_keypoints):
txs.append(torch.zeros(nB, nA, nH, nW))
tys.append(torch.zeros(nB, nA, nH, nW))
tconf = torch.zeros(nB, nA, nH, nW)
tcls = torch.zeros(nB, nA, nH, nW)
num_labels = 2 * num_keypoints + 3 # +2 for width, height and +1 for class within label files
nAnchors = nA * nH * nW
nPixels = nH * nW
for b in range(nB):
cur_pred_corners = pred_corners[b * nAnchors:(b + 1) * nAnchors].t()
cur_confs = torch.zeros(nAnchors)
for t in range(50):
if target[b][t * num_labels + 1] == 0:
break
g = list()
for i in range(num_keypoints):
g.append(target[b][t * num_labels + 2 * i + 1])
g.append(target[b][t * num_labels + 2 * i + 2])
cur_gt_corners = torch.FloatTensor(g).repeat(nAnchors, 1).t() # 18 x nAnchors
cur_confs = torch.max(cur_confs.view_as(conf_mask[b]),
corner_confidences(cur_pred_corners, cur_gt_corners).view_as(conf_mask[
b])) # some irrelevant areas are filtered, in the same grid multiple anchor boxes might exceed the threshold
conf_mask[b][cur_confs > sil_thresh] = 0
nGT = 0
nCorrect = 0
for b in range(nB):
for t in range(50):
if target[b][t * num_labels + 1] == 0:
break
nGT = nGT + 1
best_iou = 0.0
best_n = -1
min_dist = sys.maxsize
gx = list()
gy = list()
gt_box = list()
for i in range(num_keypoints):
gt_box.extend([target[b][t * num_labels + 2 * i + 1], target[b][t * num_labels + 2 * i + 2]])
gx.append(target[b][t * num_labels + 2 * i + 1] * nW)
gy.append(target[b][t * num_labels + 2 * i + 2] * nH)
if i == 0:
gi0 = int(gx[i])
gj0 = int(gy[i])
pred_box = pred_corners[b * nAnchors + best_n * nPixels + gj0 * nW + gi0]
conf = corner_confidence(gt_box, pred_box)
# Decide which anchor to use during prediction
gw = target[b][t * num_labels + num_labels - 2] * nW
gh = target[b][t * num_labels + num_labels - 1] * nH
gt_2d_box = [0, 0, gw, gh]
for n in range(nA):
aw = anchors[anchor_step * n]
ah = anchors[anchor_step * n + 1]
anchor_box = [0, 0, aw, ah]
iou = bbox_iou(anchor_box, gt_2d_box, x1y1x2y2=False)
if iou > best_iou:
best_iou = iou
best_n = n
coord_mask[b][best_n][gj0][gi0] = 1
cls_mask[b][best_n][gj0][gi0] = 1
conf_mask[b][best_n][gj0][gi0] = object_scale
# Update targets
for i in range(num_keypoints):
txs[i][b][best_n][gj0][gi0] = gx[i] - gi0
tys[i][b][best_n][gj0][gi0] = gy[i] - gj0
tconf[b][best_n][gj0][gi0] = conf
tcls[b][best_n][gj0][gi0] = target[b][t * num_labels]
if conf > 0.5:
nCorrect = nCorrect + 1
return nGT, nCorrect, coord_mask, conf_mask, cls_mask, txs, tys, tconf, tcls
class RegionLoss(nn.Module):
def __init__(self, num_keypoints=9, num_classes=13, anchors=[], num_anchors=5, pretrain_num_epochs=15):
super(RegionLoss, self).__init__()
self.num_classes = num_classes
self.anchors = anchors
self.num_anchors = num_anchors
self.anchor_step = len(anchors) / num_anchors
self.num_keypoints = num_keypoints
self.coord_scale = 1
self.noobject_scale = 1
self.object_scale = 5
self.class_scale = 1
self.thresh = 0.6
self.seen = 0
self.pretrain_num_epochs = pretrain_num_epochs
def forward(self, output, target, epoch):
# Parameters
t0 = time.time()
nB = output.data.size(0)
nA = self.num_anchors
nC = self.num_classes
nH = output.data.size(2)
nW = output.data.size(3)
# Activation
output = output.view(nB, nA, (2 * self.num_keypoints + 1 + nC), nH, nW)
x = list()
y = list()
x.append(torch.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([0]))).view(nB, nA, nH, nW)))
y.append(torch.sigmoid(output.index_select(2, Variable(torch.cuda.LongTensor([1]))).view(nB, nA, nH, nW)))
for i in range(1, self.num_keypoints):
x.append(output.index_select(2, Variable(torch.cuda.LongTensor([2 * i + 0]))).view(nB, nA, nH, nW))
y.append(output.index_select(2, Variable(torch.cuda.LongTensor([2 * i + 1]))).view(nB, nA, nH, nW))
conf = torch.sigmoid(
output.index_select(2, Variable(torch.cuda.LongTensor([2 * self.num_keypoints]))).view(nB, nA, nH, nW))
cls = output.index_select(2, Variable(
torch.linspace(2 * self.num_keypoints + 1, 2 * self.num_keypoints + 1 + nC - 1, nC).long().cuda()))
cls = cls.view(nB * nA, nC, nH * nW).transpose(1, 2).contiguous().view(nB * nA * nH * nW, nC)
t1 = time.time()
# Create pred boxes
pred_corners = torch.cuda.FloatTensor(2 * self.num_keypoints, nB * nA * nH * nW)
grid_x = torch.linspace(0, nW - 1, nW).repeat(nH, 1).repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda()
grid_y = torch.linspace(0, nH - 1, nH).repeat(nW, 1).t().repeat(nB * nA, 1, 1).view(nB * nA * nH * nW).cuda()
for i in range(self.num_keypoints):
pred_corners[2 * i + 0] = (x[i].data.view_as(grid_x) + grid_x) / nW
pred_corners[2 * i + 1] = (y[i].data.view_as(grid_y) + grid_y) / nH
gpu_matrix = pred_corners.transpose(0, 1).contiguous().view(-1, 2 * self.num_keypoints)
pred_corners = convert2cpu(gpu_matrix)
t2 = time.time()
# Build targets
nGT, nCorrect, coord_mask, conf_mask, cls_mask, txs, tys, tconf, tcls = \
build_targets(pred_corners, target.data, self.num_keypoints, self.anchors, nA, nC, nH, nW,
self.noobject_scale, self.object_scale, self.thresh, self.seen)
cls_mask = (cls_mask == 1)
nProposals = int((conf > 0.25).sum().item())
for i in range(self.num_keypoints):
txs[i] = Variable(txs[i].cuda())
tys[i] = Variable(tys[i].cuda())
tconf = Variable(tconf.cuda())
tcls = Variable(tcls[cls_mask].long().cuda())
coord_mask = Variable(coord_mask.cuda())
conf_mask = Variable(conf_mask.cuda().sqrt())
cls_mask = Variable(cls_mask.view(-1, 1).repeat(1, nC).cuda())
cls = cls[cls_mask].view(-1, nC)
t3 = time.time()
# Create loss
loss_xs = list()
loss_ys = list()
for i in range(self.num_keypoints):
loss_xs.append(
self.coord_scale * nn.MSELoss(size_average=False)(x[i] * coord_mask, txs[i] * coord_mask) / 2.0)
loss_ys.append(
self.coord_scale * nn.MSELoss(size_average=False)(y[i] * coord_mask, tys[i] * coord_mask) / 2.0)
loss_conf = nn.MSELoss(size_average=False)(conf * conf_mask, tconf * conf_mask) / 2.0
# used in original implementation:
# loss_x = np.sum(loss_xs)
# loss_y = np.sum(loss_ys)
# adapted to be used in PyTorch >= 1.7
loss_x = torch.stack(loss_xs)
loss_y = torch.stack(loss_ys)
loss_x = torch.sum(loss_x)
loss_y = torch.sum(loss_y)
loss_cls = self.class_scale * nn.CrossEntropyLoss(size_average=False)(cls, tcls)
if epoch > self.pretrain_num_epochs:
loss = loss_x + loss_y + loss_cls + loss_conf # in single object pose estimation, there is no classification loss
else:
# pretrain initially without confidence loss
# once the coordinate predictions get better, start training for confidence as well
loss = loss_x + loss_y + loss_cls
print('%d: nGT %d, recall %d, proposals %d, loss: x %f, y %f, conf %f, cls %f, total %f' % (
self.seen, nGT, nCorrect, nProposals, loss_x.item(), loss_y.item(), loss_conf.item(), loss_cls.item(),
loss.item()))
t4 = time.time()
if False:
print('-----------------------------------')
print(' activation : %f' % (t1 - t0))
print(' create pred_corners : %f' % (t2 - t1))
print(' build targets : %f' % (t3 - t2))
print(' create loss : %f' % (t4 - t3))
print(' total : %f' % (t4 - t0))
return loss
|
from sensors.gyroscope import get_orientation
from sensors.barometer import get_pressure
from sensors.hygrometer import get_humidity
from sensors.thermometer import get_temperature
from sensors.magnetometer import get_compass
from sensors.accelerometer import get_acceleration
def get_readings(sense_hat):
readings = {
"orientation": get_orientation(sense_hat),
"pressure": get_pressure(sense_hat),
"humidity": get_humidity(sense_hat),
"temperature": get_temperature(sense_hat),
"compass": get_compass(sense_hat),
"acceleration": get_acceleration(sense_hat)
}
return readings
|
import logging
import os
import shutil
import pytest
import salt.serializers.json as jsonserializer
import salt.serializers.msgpack as msgpackserializer
import salt.serializers.plist as plistserializer
import salt.serializers.python as pythonserializer
import salt.serializers.yaml as yamlserializer
import salt.states.file as filestate
import salt.utils.files
import salt.utils.json
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.yaml
from tests.support.mock import MagicMock, patch
log = logging.getLogger(__name__)
@pytest.fixture
def configure_loader_modules():
return {
filestate: {
"__env__": "base",
"__salt__": {"file.manage_file": False},
"__serializers__": {
"yaml.serialize": yamlserializer.serialize,
"yaml.seserialize": yamlserializer.serialize,
"python.serialize": pythonserializer.serialize,
"json.serialize": jsonserializer.serialize,
"plist.serialize": plistserializer.serialize,
"msgpack.serialize": msgpackserializer.serialize,
},
"__opts__": {"test": False, "cachedir": ""},
"__instance_id__": "",
"__low__": {},
"__utils__": {},
}
}
# 'copy' function tests: 1
def test_copy():
"""
Test if the source file exists on the system, copy it to the named file.
"""
name = "/tmp/salt"
source = "/tmp/salt/salt"
user = "salt"
group = "saltstack"
ret = {"name": name, "result": False, "comment": "", "changes": {}}
comt = "Must provide name to file.copy"
ret.update({"comment": comt, "name": ""})
assert filestate.copy_("", source) == ret
mock_t = MagicMock(return_value=True)
mock_f = MagicMock(return_value=False)
mock_uid = MagicMock(side_effect=["", "1000", "1000"])
mock_gid = MagicMock(side_effect=["", "1000", "1000"])
mock_user = MagicMock(return_value=user)
mock_grp = MagicMock(return_value=group)
mock_io = MagicMock(side_effect=IOError)
with patch.object(os.path, "isabs", mock_f):
comt = "Specified file {} is not an absolute path".format(name)
ret.update({"comment": comt, "name": name})
assert filestate.copy_(name, source) == ret
with patch.object(os.path, "isabs", mock_t):
with patch.object(os.path, "exists", mock_f):
comt = 'Source file "{}" is not present'.format(source)
ret.update({"comment": comt, "result": False})
assert filestate.copy_(name, source) == ret
with patch.object(os.path, "exists", mock_t):
with patch.dict(
filestate.__salt__,
{
"file.user_to_uid": mock_uid,
"file.group_to_gid": mock_gid,
"file.get_user": mock_user,
"file.get_group": mock_grp,
"file.get_mode": mock_grp,
"file.check_perms": mock_t,
},
):
# Group argument is ignored on Windows systems. Group is set
# to user
if salt.utils.platform.is_windows():
comt = "User salt is not available Group salt is not available"
else:
comt = "User salt is not available Group saltstack is not available"
ret.update({"comment": comt, "result": False})
assert filestate.copy_(name, source, user=user, group=group) == ret
comt1 = (
'Failed to delete "{}" in preparation for'
" forced move".format(name)
)
comt2 = (
'The target file "{}" exists and will not be '
"overwritten".format(name)
)
comt3 = 'File "{}" is set to be copied to "{}"'.format(source, name)
with patch.object(os.path, "isdir", mock_f):
with patch.object(os.path, "lexists", mock_t):
with patch.dict(filestate.__opts__, {"test": False}):
with patch.dict(
filestate.__salt__, {"file.remove": mock_io}
):
ret.update({"comment": comt1, "result": False})
assert (
filestate.copy_(
name, source, preserve=True, force=True
)
== ret
)
with patch.object(os.path, "isfile", mock_t):
ret.update({"comment": comt2, "result": True})
assert (
filestate.copy_(name, source, preserve=True) == ret
)
with patch.object(os.path, "lexists", mock_f):
with patch.dict(filestate.__opts__, {"test": True}):
ret.update({"comment": comt3, "result": None})
assert filestate.copy_(name, source, preserve=True) == ret
with patch.dict(filestate.__opts__, {"test": False}):
comt = "The target directory /tmp is not present"
ret.update({"comment": comt, "result": False})
assert filestate.copy_(name, source, preserve=True) == ret
check_perms_ret = {
"name": name,
"changes": {},
"comment": [],
"result": True,
}
check_perms_perms = {}
if salt.utils.platform.is_windows():
mock_check_perms = MagicMock(return_value=check_perms_ret)
else:
mock_check_perms = MagicMock(
return_value=(check_perms_ret, check_perms_perms)
)
with patch.dict(
filestate.__salt__,
{
"file.user_to_uid": mock_uid,
"file.group_to_gid": mock_gid,
"file.get_user": mock_user,
"file.get_group": mock_grp,
"file.get_mode": mock_grp,
"file.check_perms": mock_check_perms,
},
):
comt = 'Copied "{}" to "{}"'.format(source, name)
with patch.dict(filestate.__opts__, {"user": "salt"}), patch.object(
os.path, "isdir", mock_t
), patch.object(os.path, "lexists", mock_f), patch.dict(
filestate.__opts__, {"test": False}
), patch.dict(
filestate.__salt__, {"file.remove": mock_io}
), patch.object(
shutil, "copytree", MagicMock()
):
group = None
ret.update(
{
"comment": comt,
"result": True,
"changes": {"/tmp/salt": "/tmp/salt/salt"},
}
)
res = filestate.copy_(name, source, group=group, preserve=False)
assert res == ret
comt = 'Copied "{}" to "{}"'.format(source, name)
with patch.dict(filestate.__opts__, {"user": "salt"}), patch.object(
os.path, "isdir", MagicMock(side_effect=[False, True, False])
), patch.object(os.path, "lexists", mock_f), patch.dict(
filestate.__opts__, {"test": False}
), patch.dict(
filestate.__salt__, {"file.remove": mock_io}
), patch.object(
shutil, "copy", MagicMock()
):
group = None
ret.update(
{
"comment": comt,
"result": True,
"changes": {"/tmp/salt": "/tmp/salt/salt"},
}
)
res = filestate.copy_(name, source, group=group, preserve=False)
assert res == ret
|
import os
import time
from datetime import datetime, timedelta
from botocore.vendored import requests
from src.stations import stations
API_KEY = os.environ.get('API_KEY')
BASE_URL = 'https://api-v3.mbta.com/'
class TrainCalculator:
"""
Class for calculating train arrivals
"""
@staticmethod
def get_station_id(intent: dict):
"""
Gets Id for a station based on intent
:param intent: Intent provided by alexa invocation
:return: tuple of station_id, line, direction, stop, and message
"""
line = intent['slots']['Line']['value'].lower()
Stop = intent['slots']['Stop']
# If the stop maps to the Stop Intent, we use that mapping instead of the literal string
if 'resolutions' in Stop and 'resolutionsPerAuthority' in Stop['resolutions'] and 'values' in \
Stop['resolutions']['resolutionsPerAuthority'][0]:
stop = Stop['resolutions']['resolutionsPerAuthority'][0]['values'][0]['value']['name']
else:
stop = intent['slots']['Stop']['value']
if line not in stations:
message = '{} train line not found, please specify a valid route. For example green, red, blue, orange'.format(
line)
return None, None, None, None, message
if stop not in stations[line]:
message = 'No stations on {} line with name {} were found, please specify a valid train station'.format(
line, stop)
return None, None, None, None, message
# If the direction is not specified we provide all possible destination for that route
if not intent['slots'].get('Direction') or not intent['slots']['Direction'].get('value'):
station_id = {}
for direction in stations[line][stop]:
station_id[direction] = stations[line][stop][direction]
# Returning None for direction tells us that we want to provide all possible destinations
return station_id, line, None, stop, ''
else:
Direction = intent['slots']['Direction']
# If the direction maps to the Direction Intent, we use that mapping instead of the literal string
if 'resolutions' in Direction and 'resolutionsPerAuthority' in Direction['resolutions'] and 'values' in \
Direction['resolutions']['resolutionsPerAuthority'][0]:
direction = Direction['resolutions']['resolutionsPerAuthority'][0]['values'][0]['value']['name']
else:
direction = intent['slots']['Direction']['value']
if direction not in stations[line][stop]:
message = 'No routes for {} line trains to {} from {} found. '.format(line, direction, stop)
station_id = {}
for direction in stations[line][stop]:
station_id[direction] = stations[line][stop][direction]
return station_id, line, None, stop, message
station_id = stations[line][stop][direction]
return station_id, line, direction, stop, ''
@staticmethod
def calculate_arrival(response_json: dict):
"""
Calculates the time difference between current time and estimated train arrival time
:param response_json:
:return: float of time difference between current time and expected arrival time
"""
if response_json['data'][0]['attributes']['arrival_time']:
next_train = str(response_json['data'][0]['attributes']['arrival_time']).split('-05:00')[0]
arrive_depart = 'arrive at'
else:
next_train = str(response_json['data'][0]['attributes']['departure_time']).split('-05:00')[0]
arrive_depart = 'depart from'
train_arrival = datetime.strptime(next_train, '%Y-%m-%dT%H:%M:%S') + timedelta(hours=5)
current_time = int(time.time() * 1000)
arrival_time = int(train_arrival.timestamp() * 1000)
time_difference = (arrival_time - current_time) / 1000
# When time difference is negative, means train is at the station and need to look for next train
if time_difference <= 0:
next_train = str(response_json['data'][1]['attributes']['arrival_time']).split('-05:00')[0]
train_arrival = datetime.strptime(next_train, '%Y-%m-%dT%H:%M:%S') + timedelta(hours=5)
arrival_time = int(train_arrival.timestamp() * 1000)
time_difference = (arrival_time - current_time) / 1000
return time_difference, arrive_depart
@staticmethod
def get_train_arrival(intent):
"""
Gets the expected arrival time for a train at a particular stop/direction provided an intent
:param intent:
:return: string of expected arrival time
"""
if not API_KEY:
return 'No API key specified'
# Retrieve the station_id, line, direction, and stop for the provided intent
station_id, line, direction, stop, message = TrainCalculator.get_station_id(intent)
if not station_id:
return message
if direction is None:
directions = {}
for station in station_id:
if station_id[station] not in directions.values():
directions[station] = station_id[station]
response_speech = message
for direction in directions:
# Call API to get train predictions for station_id
url = '{}predictions/?api_key={}&filter[stop]={}'.format(BASE_URL, API_KEY, directions[direction])
response = requests.get(url)
response_json = response.json()
# Get the time difference between current time and estimated train arrival time
time_difference, arrive_depart = TrainCalculator.calculate_arrival(response_json)
time_difference = time_difference / 60
minutes = str(time_difference).split('.')[0]
seconds = '.' + str(time_difference).split('.')[1]
seconds = round(float(seconds), 4)
seconds = str(int(round((seconds * 60), 0)))
if int(minutes) == 0:
arrival_time = '{} seconds.'.format(seconds)
elif int(minutes) == 1:
arrival_time = '{} minute, and {} seconds.'.format(minutes, seconds)
else:
arrival_time = '{} minutes, and {} seconds '.format(minutes, seconds)
response_speech += 'The next {} line train to {} will {} {} in {}. '.format(line, direction,
arrive_depart,
stop, arrival_time)
else:
# Call API to get train predictions for station_id
url = '{}predictions/?api_key={}&filter[stop]={}'.format(BASE_URL, API_KEY, station_id)
response = requests.get(url)
response_json = response.json()
# Get the time difference between current time and estimated train arrival time
time_difference, arrive_depart = TrainCalculator.calculate_arrival(response_json)
time_difference = time_difference / 60
minutes = str(time_difference).split('.')[0]
seconds = '.' + str(time_difference).split('.')[1]
seconds = round(float(seconds), 4)
seconds = str(int(round((seconds * 60), 0)))
if int(minutes) == 0:
arrival_time = '{} seconds.'.format(seconds)
elif int(minutes) == 1:
arrival_time = '{} minute, and {} seconds.'.format(minutes, seconds)
else:
arrival_time = '{} minutes, and {} seconds '.format(minutes, seconds)
response_speech = 'The next {} line train to {} will {} {} in {}. '.format(line, direction, arrive_depart,
stop, arrival_time)
return response_speech
|
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from naclports.error import Error, PkgFormatError, DisabledError
from naclports.pkg_info import ParsePkgInfoFile, ParsePkgInfo
from naclports.util import Log, Trace, Warn, DownloadFile, SetVerbose
from naclports.util import GetInstallRoot, InstallLock, BuildLock, IsInstalled
from naclports.util import GS_BUCKET, GS_URL
from naclports.paths import NACLPORTS_ROOT, OUT_DIR, TOOLS_DIR, PACKAGES_ROOT
from naclports.paths import BUILD_ROOT, STAMP_DIR, PUBLISH_ROOT
import colorama
colorama.init()
|
import numbers
class IntegralField(object):
"""data descriptor:数据描述符"""
def __init__(self):
self.valuexx = 0
def __get__(self, instance, owner):
print("__get__",instance)
return self.valuexx
def __set__(self, instance, value):
# print("__set__",instance,value)
if isinstance(value,numbers.Integral):
print("属性的数值合法")
self.valuexx = value
else:
raise ValueError("属性必须是Ingeral类型!")
def __delete__(self, instance):
print("__delete__")
class NonIntegralField(object):
"""non-data descriptor:非数据描述符"""
def __init__(self):
self.valuexx = 0
def __get__(self, instance, owner):
print("__get__",instance)
return self.valuexx
class Student(object):
# age = IntegralField()
age = NonIntegralField()
# age = 90
# year = IntegralField()
# friends = IntegralField()
def __init__(self,age):
self.age = 0
if __name__ == "__main__":
stu = Student()
print(stu.age)
print(getattr(stu,"age"))
stu.age = 12
print(stu.age)
# del stu.age
# stu.year = 2000
# print(stu.year)
'''
如果stu是Student类的实例,那么stu.age(以及等价的getattr(stu,'age’))
首先调用__getattribute__。如果类定义了__getattr__方法,
那么在__getattribute__抛出 AttributeError 的时候就会调用到__getattr__,
而对于描述符(__get__)的调用,则是发生在__getattribute__内部的。
stu = Student(), 那么stu.age 顺序如下:
(1)如果“age”是出现在Student或其基类的__dict__中, 且age是data descriptor, 那么调用其__get__方法, 否则
(2)如果“age”出现在stu这个对象的__dict__中, 那么直接返回 stu.__dict__[‘age’], 否则
(3)如果“age”出现在Student或其基类的__dict__中
(3.1)如果age是non-data descriptor,那么调用其__get__方法, 否则
(3.2)返回 Student.__dict__[‘age’]
(4)如果Student有__getattr__方法,调用__getattr__方法,否则
(5)抛出AttributeError
''' |
import math
from pathlib import Path
from typing import Union
import pytest
from planingfsi import config
from planingfsi.config import SubConfig, ConfigItem
class TestClass(SubConfig):
"""A simple configuration class to test behavior of attribute descriptors."""
float_attr = ConfigItem(default=0.0)
int_attr = ConfigItem(type=int)
bool_attr = ConfigItem(default=True)
@pytest.fixture()
def config_instance() -> TestClass:
"""An instance of a configuration class containing some attributes."""
return TestClass()
def test_config_init(config_instance: TestClass) -> None:
"""Given an instance of the TestClass, attributes are None unless a default is provided."""
assert config_instance is not None
assert config_instance.float_attr == 0.0
assert isinstance(config_instance.float_attr, float)
def test_config_attribute_without_default_raises_exception(config_instance: TestClass) -> None:
"""An AttributeError is raised if there is no default."""
with pytest.raises(AttributeError):
_ = config_instance.int_attr
def test_config_type_conversion(config_instance: TestClass) -> None:
"""Conversion is performed based on the specified type."""
config_instance.int_attr = 55.0
assert config_instance.int_attr == 55
assert isinstance(config_instance.int_attr, int)
@pytest.mark.parametrize(
"value,expected",
[
("False", False),
("false", False),
("True", True),
("true", True),
(False, False),
(True, True),
],
)
def test_config_bool_setter(
config_instance: TestClass, value: Union[str, bool], expected: bool
) -> None:
"""When setting a Boolean value, True and False can be passed in as strings."""
config_instance.bool_attr = value
assert config_instance.bool_attr == expected
def test_flow_defaults() -> None:
"""Test the raw default values in the FlowConfig class are set correctly."""
flow = config.flow
assert flow.density == 998.2
assert flow.gravity == 9.81
assert flow.kinematic_viscosity == 1e-6
assert flow.waterline_height == 0.0
assert flow.num_dim == 2
assert not flow.include_friction
def test_flow_speed_requires_value() -> None:
"""If Froude number and flow speed are both unset, access should raise ValueError."""
flow = config.flow
flow._froude_num = None
flow._flow_speed = None
with pytest.raises(ValueError):
_ = flow.flow_speed
with pytest.raises(ValueError):
_ = flow.froude_num
def test_set_flow_speed_only_once() -> None:
"""The Froude number and flow speed can't both be set, otherwise a ValueError is raised."""
flow = config.flow
flow._froude_num = 1.0
flow._flow_speed = 1.0
with pytest.raises(ValueError):
_ = flow.flow_speed
def test_set_flow_speed() -> None:
"""Setting the flow speed directly, Froude number will be calculated."""
flow = config.flow
flow._froude_num = None
flow._flow_speed = 1.0
assert flow.flow_speed == 1.0
assert flow.froude_num == pytest.approx(
flow.flow_speed / math.sqrt(flow.gravity * flow.reference_length)
)
def test_set_froude_number() -> None:
"""Setting the Froude number, flow speed will be calculated."""
flow = config.flow
flow._froude_num = 1.0
flow._flow_speed = None
assert flow.flow_speed == pytest.approx(
flow.froude_num * math.sqrt(flow.gravity * flow.reference_length)
)
assert flow.froude_num == 1.0
def test_flow_derived_quantities() -> None:
"""Derived quantities should return a value once flow speed is set."""
flow = config.flow
flow.froude_num = 1.0
assert flow.stagnation_pressure is not None
assert flow.k0 is not None
assert flow.lam is not None
def test_body_defaults() -> None:
body = config.body
assert body.xCofG == 0.0
assert body.yCofG == 0.0
assert body.xCofR == 0.0
assert body.yCofR == 0.0
assert body.reference_length == 1.0
assert body.mass == 1.0
assert body.weight == config.flow.gravity
assert body.relax_draft == 1.0
assert body.relax_trim == 1.0
def test_body_pressure_calculations() -> None:
body = config.body
assert body.Pc == 0.0
assert body.PcBar == 0.0
assert body.Ps == 0.0
assert body.PsBar == 0.0
body.weight = 5.0
body.PcBar = 10.0
assert body.Pc == 50.0
@pytest.fixture()
def config_from_file(test_dir: Path) -> None:
config.load_from_file(test_dir / "input_files" / "configDict")
@pytest.mark.usefixtures("config_from_file")
def test_load_config_from_file() -> None:
"""Configuration loaded from file overrides defaults."""
assert config.flow.density == 998.2
assert config.flow.kinematic_viscosity == 1.0048e-6
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the abbreviation function below.
def abbreviation(a, b):
A = [[None for j in range(len(b))] for i in range(len(a))]
if a[0].upper() == b[0]:
A[0][0] = True
else:
A[0][0] = False
upper_encountered = a[0].isupper()
for i in range(1, len(a)):
if a[i].isupper() and upper_encountered:
A[i][0] = False
elif a[i].isupper() and not upper_encountered and a[i] == b[0]:
A[i][0] = True
upper_encountered = True
elif a[i].isupper() and not upper_encountered and a[i] != b[0]:
A[i][0] = False
upper_encountered = True
elif a[i].islower() and a[i].upper() == b[0] and not upper_encountered:
A[i][0] = True
else:
A[i][0] = A[i-1][0]
for j in range(1, len(b)):
A[0][j] = False
for i in range(1, len(a)):
for j in range(1, len(b)):
if a[i].upper() == b[j] and a[i].islower():
A[i][j] = A[i-1][j-1] or A[i-1][j]
elif a[i].upper() == b[j] and a[i].isupper():
A[i][j] = A[i-1][j-1]
elif a[i].upper() != b[j] and a[i].islower():
A[i][j] = A[i-1][j]
else:
A[i][j] = False
if A[len(a)-1][len(b)-1]:
return "YES"
return "NO"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
a = input()
b = input()
result = abbreviation(a, b)
fptr.write(result + '\n')
fptr.close()
|
# code-checked
# server-checked
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
m.weight.data.normal_(0, 1e-3)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0, 1e-3)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def conv_bn_relu(in_channels, out_channels, kernel_size, stride=1, padding=0, bn=True, relu=True):
bias = not bn
layers = []
layers.append(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias))
if bn:
layers.append(nn.BatchNorm2d(out_channels))
if relu:
layers.append(nn.LeakyReLU(0.2, inplace=True))
layers = nn.Sequential(*layers)
# initialize the weights:
for m in layers.modules():
init_weights(m)
return layers
def convt_bn_relu(in_channels, out_channels, kernel_size, stride=1, padding=0, output_padding=0, bn=True, relu=True):
bias = not bn
layers = []
layers.append(nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, output_padding, bias=bias))
if bn:
layers.append(nn.BatchNorm2d(out_channels))
if relu:
layers.append(nn.LeakyReLU(0.2, inplace=True))
layers = nn.Sequential(*layers)
# initialize the weights:
for m in layers.modules():
init_weights(m)
return layers
class DepthCompletionNet(nn.Module):
def __init__(self, pretrained=False):
super(DepthCompletionNet, self).__init__()
print ("model.py")
self.layers = 34
self.pretrained = pretrained
self.conv1_d = conv_bn_relu(1, 32, kernel_size=3, stride=1, padding=1)
self.conv1_img = conv_bn_relu(1, 32, kernel_size=3, stride=1, padding=1)
pretrained_model = resnet.__dict__['resnet{}'.format(self.layers)](pretrained=self.pretrained)
if not self.pretrained:
pretrained_model.apply(init_weights)
self.conv2 = pretrained_model._modules['layer1']
self.conv3 = pretrained_model._modules['layer2']
self.conv4 = pretrained_model._modules['layer3']
self.conv5 = pretrained_model._modules['layer4']
del pretrained_model # (clear memory)
if self.layers <= 34:
num_channels = 512
elif self.layers >= 50:
num_channels = 2048
self.conv6 = conv_bn_relu(num_channels, 512, kernel_size=3, stride=2, padding=1)
self.convt5 = convt_bn_relu(in_channels=512, out_channels=256, kernel_size=3, stride=2, padding=1, output_padding=1)
self.convt4 = convt_bn_relu(in_channels=768, out_channels=128, kernel_size=3, stride=2, padding=1, output_padding=1)
self.convt3 = convt_bn_relu(in_channels=(256+128), out_channels=64, kernel_size=3, stride=2, padding=1, output_padding=1)
self.convt2 = convt_bn_relu(in_channels=(128+64), out_channels=64, kernel_size=3, stride=2, padding=1, output_padding=1)
self.convt1 = convt_bn_relu(in_channels=128, out_channels=64, kernel_size=3, stride=1, padding=1)
self.convtf_mean = conv_bn_relu(in_channels=128, out_channels=1, kernel_size=1, stride=1, bn=False, relu=False)
self.convtf_var = conv_bn_relu(in_channels=128, out_channels=1, kernel_size=1, stride=1, bn=False, relu=False)
def forward(self, img, sparse):
# (img has shape: (batch_size, h, w)) (grayscale)
# (sparse has shape: (batch_size, h, w))
img = torch.unsqueeze(img, 1) # (shape: (batch_size, 1, h, w)))
sparse = torch.unsqueeze(sparse, 1) # (shape: (batch_size, 1, h, w)))
conv1_d = self.conv1_d(sparse) # (shape: (batch_size, 32, h, w)))
conv1_img = self.conv1_img(img) # (shape: (batch_size, 32, h, w)))
conv1 = torch.cat((conv1_d, conv1_img), 1) # (shape: (batch_size, 64, h, w)))
conv2 = self.conv2(conv1) # (shape: (batch_size, 64, h, w)))
conv3 = self.conv3(conv2) # (shape: (batch_size, 128, h/2, w/2)))
conv4 = self.conv4(conv3) # (shape: (batch_size, 256, h/4, w/4)))
conv5 = self.conv5(conv4) # (shape: (batch_size, 512, h/8, w/8)))
conv6 = self.conv6(conv5) # (shape: (batch_size, 512, h/16, w/16)))
convt5 = self.convt5(conv6) # (shape: (batch_size, 256, h/8, w/8)))
y = torch.cat((convt5, conv5), 1) # (shape: (batch_size, 256+512, h/8, w/8)))
convt4 = self.convt4(y) # (shape: (batch_size, 128, h/4, w/4)))
y = torch.cat((convt4, conv4), 1) # (shape: (batch_size, 128+256, h/4, w/4)))
convt3 = self.convt3(y) # (shape: (batch_size, 64, h/2, w/2)))
y = torch.cat((convt3, conv3), 1) # (shape: (batch_size, 64+128, h/2, w/2)))
convt2 = self.convt2(y) # (shape: (batch_size, 64, h, w)))
y = torch.cat((convt2, conv2), 1) # (shape: (batch_size, 64+64, h, w)))
convt1 = self.convt1(y) # (shape: (batch_size, 64, h, w)))
y = torch.cat((convt1,conv1), 1) # (shape: (batch_size, 64+64, h, w)))
mean = self.convtf_mean(y) # (shape: (batch_size, 1, h, w))
log_var = self.convtf_var(y) # (shape: (batch_size, 1, h, w))
mean = 100*mean
return (mean, log_var)
|
# coding=utf-8
class TMDbType:
pass
|
import contextlib
import os
import sys
import click
import yaml
from dagster.utils.indenting_printer import IndentingStringIoPrinter
if sys.version_info.major >= 3:
from io import StringIO # pylint:disable=import-error
else:
from StringIO import StringIO # pylint:disable=import-error
def _construct_yml(environment_yaml_file, dag_name):
environment_dict = {}
if environment_yaml_file is not None:
with open(environment_yaml_file, 'rb') as f:
environment_dict = yaml.safe_load(f)
if 'storage' not in environment_dict:
environment_dict['storage'] = {
'filesystem': {'config': {'base_dir': '/tmp/dagster-airflow/{}'.format(dag_name)}}
}
# See http://bit.ly/309sTOu
with contextlib.closing(StringIO()) as f:
yaml.dump(environment_dict, f, default_flow_style=False, allow_unicode=True)
return f.getvalue()
@click.group()
def main():
pass
@main.command()
@click.option('--dag-name', help='The name of the output Airflow DAG', required=True)
@click.option('--module-name', '-m', help='The name of the source module', required=True)
@click.option('--pipeline-name', '-n', help='The name of the pipeline', required=True)
@click.option(
'--output-path',
'-o',
help='Optional. If unset, $AIRFLOW_HOME will be used.',
default=os.getenv('AIRFLOW_HOME'),
)
@click.option(
'--environment-file',
'-c',
help='''Optional. Path to a YAML file to install into the Dagster environment.''',
)
def scaffold(dag_name, module_name, pipeline_name, output_path, environment_file):
'''Creates a DAG file for a specified dagster pipeline'''
# Validate output path
if not output_path:
raise Exception('You must specify --output-path or set AIRFLOW_HOME to use this script.')
# We construct the YAML environment and then put it directly in the DAG file
environment_yaml = _construct_yml(environment_file, dag_name)
printer = IndentingStringIoPrinter(indent_level=4)
printer.line('\'\'\'')
printer.line(
'The airflow DAG scaffold for {module_name}.{pipeline_name}'.format(
module_name=module_name, pipeline_name=pipeline_name
)
)
printer.blank_line()
printer.line('Note that this docstring must contain the strings "airflow" and "DAG" for')
printer.line('Airflow to properly detect it as a DAG')
printer.line('See: http://bit.ly/307VMum')
printer.line('\'\'\'')
printer.line('import datetime')
printer.line('import yaml')
printer.blank_line()
printer.line('from dagster_airflow.factory import make_airflow_dag')
printer.blank_line()
printer.blank_line()
printer.line('ENVIRONMENT = \'\'\'')
printer.line(environment_yaml)
printer.line('\'\'\'')
printer.blank_line()
printer.blank_line()
printer.comment('NOTE: these arguments should be edited for your environment')
printer.line('DEFAULT_ARGS = {')
with printer.with_indent():
printer.line("'owner': 'airflow',")
printer.line("'depends_on_past': False,")
printer.line("'start_date': datetime.datetime(2019, 5, 7),")
printer.line("'email': ['airflow@example.com'],")
printer.line("'email_on_failure': False,")
printer.line("'email_on_retry': False,")
printer.line('}')
printer.blank_line()
printer.line('dag, tasks = make_airflow_dag(')
with printer.with_indent():
printer.comment(
'NOTE: you must ensure that {module_name} is installed or available on sys.path, '
'otherwise, this import will fail.'.format(module_name=module_name)
)
printer.line('module_name=\'{module_name}\','.format(module_name=module_name))
printer.line('pipeline_name=\'{pipeline_name}\','.format(pipeline_name=pipeline_name))
printer.line("environment_dict=yaml.load(ENVIRONMENT),")
printer.line("dag_kwargs={'default_args': DEFAULT_ARGS, 'max_active_runs': 1}")
printer.line(')')
# Ensure output_path/dags exists
dags_path = os.path.join(output_path, 'dags')
if not os.path.isdir(dags_path):
os.makedirs(dags_path)
dag_file = os.path.join(output_path, 'dags', dag_name + '.py')
with open(dag_file, 'wb') as f:
f.write(printer.read().encode())
if __name__ == '__main__':
main() # pylint:disable=no-value-for-parameter
|
import functools
import re
import os
from jsonschema import validate, draft7_format_checker
import jsonschema.exceptions
import json
from flask import (
Blueprint, g, request, session, current_app, session
)
from passlib.hash import argon2
from sqlalchemy.exc import DBAPIError
from sqlalchemy import or_
from flaskr.db import session_scope
from flaskr.models.Order import Order, OrderLine, OrderStatus
from flaskr.models.Cart import Cart, CartLine
from flaskr.models.User import User
from flaskr.models.Product import Product
from flaskr.models.Revenue import Revenue
from flaskr.email import send
from flaskr.routes.utils import login_required, not_login, cross_origin, is_logged_in
from datetime import date
bp = Blueprint('orders', __name__, url_prefix='/orders')
@bp.route("/mine", methods=['GET', 'OPTIONS'])
@cross_origin(methods=['GET'])
@login_required
def get_my_orders():
try:
with session_scope() as db_session:
orders = db_session.query(Order).filter(Order.user_id == g.user.id)
if orders.count() < 1:
return {
'code': 404,
'message': 'User has no orders'
}, 404
return {'orders': [order.to_json() for order in orders.all()]}, 200
return {
'code': 200,
'message': 'success'
}, 200
except DBAPIError as db_error:
# Returns an error in case of a integrity constraint not being followed.
return {
'code': 400,
'message': re.search('DETAIL: (.*)', db_error.args[0]).group(1)
}, 400
@bp.route("/<int:order_id>", methods=['GET', 'OPTIONS'])
@cross_origin(methods=['GET', 'PATCH'])
@login_required
def get_order(order_id):
try:
with session_scope() as db_session:
order = db_session.query(Order).filter(Order.id == order_id).first()
if order is None:
return {
'code': 404,
'message': 'Order does not exist'
}, 404
if order.user_id != g.user.id:
return {
'code': 403,
'message': 'Order does not belong to user.'
}, 403
return order.to_json(), 200
return {
'code': 200,
'message': 'success'
}, 200
except DBAPIError as db_error:
# Returns an error in case of a integrity constraint not being followed.
return {
'code': 400,
'message': re.search('DETAIL: (.*)', db_error.args[0]).group(1)
}, 400
@bp.route("/<int:order_id>/items/<int:product_id>", methods=[ 'PATCH', 'OPTIONS' ])
@cross_origin(methods=['PATCH'])
@login_required
def update_order_line(order_id, product_id):
# Validate that only the valid CartLine properties from the JSON schema cart_line.schema.json
schemas_directory = os.path.join(current_app.root_path, current_app.config['SCHEMA_FOLDER'])
schema_filepath = os.path.join(schemas_directory, 'update_order_line_shipping_status.schema.json')
try:
with open(schema_filepath) as schema_file:
schema = json.loads(schema_file.read())
validate(instance=request.json, schema=schema, format_checker=draft7_format_checker)
except jsonschema.exceptions.ValidationError as validation_error:
return {
'code': 400,
'message': validation_error.message
}, 400
with session_scope() as db_session:
order_line = db_session.query(OrderLine).filter(OrderLine.order_id == order_id).filter(OrderLine.product_id == product_id).first()
if order_line is None:
return {
'code': 404,
'message': 'Order does not exist'
}, 404
if order_line.product.user_id != g.user.id:
return {
'code': 404,
'message': 'Not products seller'
}, 404
order_line.date_fulfilled = request.json['dateFulfilled']
send(current_app.config['SMTP_USERNAME'], order_line.buyer.email, "Shipping Notification", "<html><body><p>%s x %d has been shipped on %s</p></body></html>"%(order_line.product.name, order_line.quantity, str(order_line.date_fulfilled)) ,"%s x %d has been shipped on %s" % (order_line.product.name, order_line.quantity, str(order_line.date_fulfilled)))
return '', 200
@bp.route("", methods=[ 'POST', 'OPTIONS' ])
@cross_origin(methods=['POST'])
@login_required
def create_order():
# Validate that only the valid CartLine properties from the JSON schema cart_line.schema.json
schemas_directory = os.path.join(current_app.root_path, current_app.config['SCHEMA_FOLDER'])
schema_filepath = os.path.join(schemas_directory, 'new_order.schema.json')
try:
with open(schema_filepath) as schema_file:
schema = json.loads(schema_file.read())
validate(instance=request.json, schema=schema, format_checker=draft7_format_checker)
except jsonschema.exceptions.ValidationError as validation_error:
return {
'code': 400,
'message': validation_error.message
}, 400
# Create the order for the current cart
try:
with session_scope() as db_session:
user = db_session.merge(g.user)
cart = user.cart
if cart is None:
return {
'code': 400,
'message': 'User has no cart'
}, 400
if len(cart.cart_lines) == 0:
return {
'code': 400,
'message': 'User cannot checkout an empty cart'
}, 400
order = Order(user_id=g.user.id, full_name=request.json['fullName'], line1=request.json['line1'], is_express_shipping=request.json['isExpressShipping'], city=request.json['city'], country=request.json['country'])
if 'line2' in request.json:
order.line2 = request.json['line2']
total_cost = 0
dict_sellers_items_sold = {}
db_session.begin_nested()
#Lock TABLE
db_session.execute('LOCK TABLE product IN ROW EXCLUSIVE MODE')
for line in cart.cart_lines:
product = db_session.query(Product).filter(Product.id == line.product_id).with_for_update().one()
order.order_lines.append(OrderLine(product_id=product.id, quantity=min(product.quantity, line.quantity), cost=line.cost))
total_cost += order.order_lines[-1].cost
dict_seller_items_sold = dict_sellers_items_sold.setdefault(product.user_id, {})
dict_seller_items_sold['seller'] = line.product.user
dict_seller_items_sold.setdefault('items', []).append((product, min(product.quantity, line.quantity)))
order.total_cost = total_cost
db_session.add(order)
db_session.commit()
db_session.query(CartLine).filter(CartLine.cart_id == cart.id).delete()
items_bought = []
for v in dict_sellers_items_sold.values():
items_sold= []
for item_sold in v['items']:
email_line = '%d x %s %.2f' % (item_sold[1], item_sold[0].name, item_sold[0].price)
items_sold.append(email_line)
items_bought.append(email_line)
item_sold[0].quantity -= item_sold[1]
# create a revenue entry for this product sold.
profits = computeProfit(item_sold[0].price, v['seller'].id)
revenue_entry = Revenue(seller_id= v['seller'].id, product_id=item_sold[0].id, order_id=order.id, profit=profits, purchased_on=order.date)
db_session.add(revenue_entry)
db_session.commit()
send(current_app.config['SMTP_USERNAME'], v['seller'].email, "Sale Notification", "<html><body><p>Here is an overview of your sale:<ul><li>%s</li></ul></p></body></html>"%'</li><li>'.join(items_sold) ,'Here is an overview of your sale:\n%s'% '\n'.join(items_sold))
send(current_app.config['SMTP_USERNAME'], g.user.email, "Purchase Notification", "<html><body><p>Here is an overview of your purchase:<ul><li>%s</li></ul></p></body></html>"%'</li><li>'.join(items_bought) ,'Here is an overview of your purchase:\n%s'% '\n'.join(items_bought))
return order.to_json(), 200
except DBAPIError as db_error:
# Returns an error in case of a integrity constraint not being followed.
return {
'code': 400,
'message': re.search('DETAIL: (.*)', db_error.args[0]).group(1)
}, 400
def computeProfit(price, seller_id):
try:
with session_scope() as db_session:
revenue_list = db_session.query(Revenue).filter(Revenue.seller_id == seller_id).all()
fee_new = 0.03
fee_normal = 0.08
if len(revenue_list) <= 10:
return "%.2f" % (float(price) * float(fee_new))
else:
return "%.2f" % (float(price) * float(fee_normal))
except DBAPIError as db_error:
# Returns an error in case of a integrity constraint not being followed.
return {
'code': 400,
'message': re.search('DETAIL: (.*)', db_error.args[0]).group(1)
}, 400
@bp.route("/view/<string:type>", methods=['GET', 'OPTIONS'])
@cross_origin(methods='GET')
@login_required
def view(type):
try:
with session_scope() as db_session:
queryOrder = db_session.query(Order).filter(Order.user_id == session['user_id'])
queryOrderLine = db_session.query(OrderLine)
totalitem =[]
for item in queryOrder:
queryLine = item.order_lines
myitem = item.to_json()
line=[]
if type=="complete":
for itemline in queryLine:
if itemline.order_id == item.id and itemline.date_fulfilled != None:
myline = {
"id": itemline.order_id,
"product_id": itemline.product_id,
"quantity": itemline.quantity,
"fulfilled": itemline.date_fulfilled,
"price": float(itemline.cost)
}
line.append(myline)
elif type=="pending":
for itemline in queryOrderLine:
if itemline.order_id == item.id and itemline.date_fulfilled is None:
myline = {
"id": itemline.order_id,
"product_id": itemline.product_id,
"quantity": itemline.quantity,
"fulfilled": itemline.date_fulfilled,
"price": float(itemline.cost)
}
line.append(myline)
elif type=="all":
for itemline in queryOrderLine:
if itemline.order_id == item.id:
myline = {
"id": itemline.order_id,
"product_id": itemline.product_id,
"quantity": itemline.quantity,
"fulfilled": itemline.date_fulfilled,
"price": float(itemline.cost)
}
line.append(myline)
itemelement={
"order": myitem,
"order_line": line
}
if len(line) > 0:
totalitem.append(itemelement)
totalitem = {
"allitems": totalitem
}
return totalitem, 200
return {
'code': 200,
'message': 'success'
}, 200
except DBAPIError as db_error:
# Returns an error in case of a integrity constraint not being followed.
return {
'code': 400,
'message': re.search('DETAIL: (.*)', db_error.args[0]).group(1)
}, 400
|
import os
from galaxy.util import galaxy_root_path
from .framework import (
selenium_test,
SeleniumTestCase
)
STOCK_TOURS_DIRECTORY = os.path.join(galaxy_root_path, "config", "plugins", "tours")
class TestStockToursTestCase(SeleniumTestCase):
# Test doesn't pass consistently on Jenkins yet, something is wrong is tool panel
# interactions. Example problems:
# - https://jenkins.galaxyproject.org/view/All/job/selenium/86/testReport/junit/selenium_tests.test_stock_tours/TestStockToursTestCase/test_core_galaxy_ui/
# - https://jenkins.galaxyproject.org/view/All/job/selenium/83/testReport/junit/selenium_tests.test_stock_tours/TestStockToursTestCase/test_core_galaxy_ui/
# - https://jenkins.galaxyproject.org/view/All/job/selenium/81/testReport/junit/selenium_tests.test_stock_tours/TestStockToursTestCase/test_core_galaxy_ui/
# I'd think that just pausing a bit between transitions to allow the tool panel to
# settle would fix it but it doesn't seem to in my initial testing. -John
# Tracking with https://github.com/galaxyproject/galaxy/issues/3598
# @selenium_test
# def test_core_galaxy_ui(self):
# sleep_on_steps = {
# "Tools": 20, # Give upload a chance to take so tool form is filled in.
# "History": 20,
# }
# self.run_tour(
# os.path.join(STOCK_TOURS_DIRECTORY, "core.galaxy_ui.yaml"),
# sleep_on_steps=sleep_on_steps,
# )
@selenium_test
def test_core_scratchbook(self):
self.run_tour(
os.path.join(STOCK_TOURS_DIRECTORY, "core.scratchbook.yaml"),
)
@selenium_test
def test_core_history(self):
self.run_tour(
os.path.join(STOCK_TOURS_DIRECTORY, "core.history.yaml"),
)
|
class tube(object) :
# tube is-a shape
def __init__(self,Dt_mm,Ltw_mm,Lt_mm):
self.Dt_mm = Dt_mm #Tube outer diameter, mm
self.Ltw_mm = Ltw_mm #Tube wall thickness, mm
self.Dti_mm = Dt_mm - 2 * Ltw_mm #Tube inner diameter, mm
self.Lt_mm = Lt_mm #Tube length of Active Heat Transfer Area, mm
self.Ata_mm2 = 3.14 * self.Dt_mm * self.Lt_mm #Active Outer Heat Transfer Area, mm2
self.Ati_mm2 = 3.14 * self.Dti_mm * self.Lt_mm #inner Heat Transfer Area, mm2
self.St_mm2 = 3.14 * self.Dti_mm**2.0 / 4.0#tube inner cross section Area, mm2
class utube(object) :
pass
class tube_bundle :
pass
|
from multiprocessing import cpu_count
# socket
bind = "127.0.0.1:8000"
# Worker options
workers = cpu_count() * 2 + 1
worker_class = "uvicorn.workers.UvicornWorker"
loglevel = "debug"
|
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The following code was copied from the Django project, and only lightly
# modified. Please adhere to the above copyright and license for the code
# in this file.
# Note: Nothing is covered here because this file is imported before nose and
# coverage take over.. and so its a false positive that nothing is covered.
import datetime # pragma: nocover
import os # pragma: nocover
import subprocess # pragma: nocover
VERSION = (0, 0, 3, 'beta', 1)
def get_version(version=VERSION): # pragma: nocover
"""Returns a PEP 386-compliant version number from VERSION."""
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
# We want to explicitly include all three version/release numbers
# parts = 2 if version[2] == 0 else 3
parts = 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_git_changeset(): # pragma: nocover
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very
unlikely, so it's sufficient for generating the development version
numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir,
universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError: # pragma: nocover
return None # pragma: nocover
return timestamp.strftime('%Y%m%d%H%M%S')
|
#!/usr/bin/env python
""" Problem 39 daily-coding-problem.com """
class GameOfLife:
def __init__(self, cells=[], steps=5):
self.cells = cells
self.steps = steps
def start(self):
for _ in range(self.steps):
print(self)
self.next_tick()
def next_tick(self):
board = [
# TODO
]
self.cells = [i for i in board if self.is_alive_next_tick(i[0], i[1])]
def is_alive_next_tick(self, x, y):
return
def __repr__(self):
result = ""
board = [
# TODO
]
for row in board:
for i in row:
result += "*" if i in self.cells else "."
result += "\n"
return result
if __name__ == "__main__":
board = GameOfLife([(0, 1), (1, 2), (2, 0), (2, 1), (2, 2)], 5)
board.start() |
#-*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
import sys
PY3 = sys.version_info[0] == 3
if PY3:
install_requires = ["six"]
else:
install_requires = ["six", "simplejson"]
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
setup(
name='yandexwebdav',
version='0.2.11',
include_package_data=True,
py_modules=['yandexwebdav'],
url='https://github.com/lexich/yandex-disk-webdav',
license='MIT',
author='lexich',
author_email='lexich121@gmail.com',
description='Simple wrapper to work with yandex disk using webdav Basic Auth',
long_description=README,
install_requires=install_requires,
scripts=[
"ydw.py"
]
)
|
import json
import os.path
from datetime import datetime
import threading
import time
# ChatLogger
create_json_file = True
threadLock = threading.Lock()
def logToFile(history, userid):
thread = threading.Thread(target=__appendToLogFile, args=[history, userid])
thread.daemon = False
thread.run()
def __appendToLogFile(history, userid):
with threadLock:
# print "history log: {}".format(history);
input_string = "\n".join("{}: {}".format(str(speaker), str(text)) for (speaker, text) in history)
log_directory = "../logs/text_logs"
try:
os.mkdir(log_directory)
except Exception:
pass
# Add time to file_name so that new log will be created each hour
file_name = "output {}.log.txt".format(datetime.now().strftime("D=%Y-%m-%d H=%H"))
with open(os.path.join(log_directory, file_name), "a+") as log_file:
output = "User:\t{};\t\tTimestamp:\t{}".format(userid, datetime.now()) + "\n"
output += input_string + "\n\n"
log_file.write(output)
log_file.close()
if create_json_file:
__appendToJsonFile(history, userid)
def __appendToJsonFile(history, userid):
session = ChatSession(userid, datetime.now(), history)
log_directory = "../logs/json_logs"
try:
os.mkdir(log_directory)
except Exception:
pass
file_name = "{}.json".format(userid)
with open(os.path.join(log_directory, file_name), "a+") as log_file:
log_file.write("{}\n".format(session.json()))
log_file.close()
# JSON Stuff
class ChatSession:
userid = ''
date = datetime.now()
history = []
def __init__(self, userid, date, history):
self.userid = userid
self.date = date
self.history = self.__encode_history(history)
def __encode_history(self, history):
encodedHistory = []
lastName = history[0][0]
cResponses = []; uResponses = []
for index, (name, response) in enumerate(history):
if (lastName is 'U' and name is 'C'):
encodedHistory.append({'c': cResponses, 'u': uResponses})
cResponses = []; uResponses = []
if name is 'C': cResponses.append(response)
elif name is 'U': uResponses.append(response)
if index == len(history) - 1:
encodedHistory.append({'c': cResponses, 'u': uResponses})
lastName = name
return encodedHistory
def json(self):
return json.dumps({
'userid': self.userid,
'timestamp': str(self.date),
'history': self.history
})
|
# Constants
NB_ROOMS = 4
NB_ZONES = 4
DIMENSION = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 120, 180, 210, 241]
DIMENSION_SIZE = len(DIMENSION)
INFINITY_IDX = DIMENSION.index(241)
MIN_TAU_VALUE = 5
MAX_NB_DEVICES = 6
URGT_TIME = 30
T_SYNCHRO = 30
THIRTY_SECONDS = 30
TWO_MINUTS = 120
THREE_HOURS = 180
TIMEOUT = 60 # 1 min
INFINITY = 241
MQTT_SERVER = "10.33.120.182"
MQTT_PORT = 1883
KEEP_ALIVE_PERIOD = 60
DATA = "data"
VARS = "vars"
SERVER = "SERVER/"
# Log types
STATE = "State"
INFO = "Info"
DFS = "Dfs"
UTIL = "Util"
VALUE = "Value"
RESULTS = "Results"
LOG = "Log"
EVENT = "Event"
|
#-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
'''
Created on Jun 28, 2016
@author: 463188
'''
import urllib2
import xmltodict
import json
import base64
from ....core.BaseAgent import BaseAgent
class NexusAgent(BaseAgent):
def process(self):
self.userid = self.getCredential("userid")
self.passwd = self.getCredential("passwd")
BaseUrl = self.config.get("baseUrl", '')
FirstEndPoint = self.config.get("firstEndPoint", '')
nexIDs = self.getResponse(FirstEndPoint, 'GET', self.userid, self.passwd, None)
print(nexIDs)
previousname = nexIDs["items"][0]["repository"]
for artifacts in range(len(nexIDs["items"])):
if nexIDs["items"][artifacts]["repository"] == previousname and artifacts != 0:
continue
else:
repoid = nexIDs["items"][artifacts]["repository"]
artifactid = nexIDs["items"][artifacts]["name"]
previousname = repoid
groupid = nexIDs["items"][artifacts]["group"].replace(".", "/", 3)
print("aftre here")
request = urllib2.Request(self.config.get("baseUrl", '')+"repository/"+repoid+"/"+groupid+"/"+nexIDs["items"][artifacts]["name"]+"/maven-metadata.xml")
request.add_header('Authorization', 'Basic %s' % self.getBase64Value(self.userid,self.passwd))
mavenmetafile = urllib2.urlopen(request)#reading base mavenmetadata file to fetch main version
#mavenmetafile = urllib2.urlopen(self.config.get("baseUrl", '')+"repository/"+repoid+"/"+groupid+"/"+nexIDs["items"][artifacts]["name"]+"/maven-metadata.xml")#reading base mavenmetadata file to fetch main version
mavenmetadata = xmltodict.parse(mavenmetafile.read())
print(mavenmetadata)
mavenmetafile.close()
lastupdated = mavenmetadata["metadata"]["versioning"]["lastUpdated"]
tracking = self.trackingUpdation(repoid, lastupdated)
self.prepareAndPublish(nexIDs["items"][artifacts], tracking)
def prepareAndPublish(self, nexIDs, tracking):
repoid = nexIDs["repository"]
artifactid = nexIDs["name"]
groupid = nexIDs["group"].replace(".", "/", 3)
request = urllib2.Request(self.config.get("baseUrl", '')+"repository/"+repoid+"/"+groupid+"/"+nexIDs["name"]+"/maven-metadata.xml")
request.add_header('Authorization', 'Basic %s' % self.getBase64Value(self.userid,self.passwd))
mavenmetafile = urllib2.urlopen(request)#reading base mavenmetadata file to fetch main version
mavenmetadata = xmltodict.parse(mavenmetafile.read())
mavenmetafile.close()
lastupdated = mavenmetadata["metadata"]["versioning"]["lastUpdated"]
if tracking>0:
if tracking == 1:
if isinstance(mavenmetadata["metadata"]["versioning"]["versions"]["version"],list):
for version in mavenmetadata["metadata"]["versioning"]["versions"]["version"]:
self.publishdata(repoid, groupid, nexIDs, version, artifactid, lastupdated)
else:
version = mavenmetadata["metadata"]["versioning"]["versions"]["version"]
self.publishdata(repoid, groupid, nexIDs, version, artifactid, lastupdated)
else:
version = mavenmetadata["metadata"]["versioning"]["versions"]["version"][len(mavenmetadata["metadata"]["versioning"]["versions"]["version"])-1]
self.publishdata(repoid, groupid, nexIDs, version, artifactid, lastupdated)
def publishdata(self, repoid, groupid, nexIDs, version, artifactid, lastupdated):
data = []
print(self.config.get("baseUrl", '')+"repository/"+repoid+"/"+groupid+"/"+nexIDs["name"]+"/"+version+"/"+nexIDs["name"]+"-"+version+".pom")
request = urllib2.Request(self.config.get("baseUrl", '')+"repository/"+repoid+"/"+groupid+"/"+nexIDs["name"]+"/"+version+"/"+nexIDs["name"]+"-"+version+".pom")
request.add_header('Authorization', 'Basic %s' % self.getBase64Value(self.userid,self.passwd))
mainmavenxml = urllib2.urlopen(request)#reading mavenmetadata file inside main version folder
mainmavendata = mainmavenxml.read()
mainmavenxml.close()
artifactfullname = artifactid + "-" + version + "." + xmltodict.parse(mainmavendata)["project"]["packaging"]
injectData = {}
injectData["timestamp"] = lastupdated
injectData["version"] = version
injectData["currentID"] = groupid+ "-" + artifactfullname
injectData["resourceKey"] = nexIDs["group"] + ':' + nexIDs["name"]
injectData["Status"] = "Archive"
injectData["Author"] = self.userid
data.append(injectData)
print("*****")
print(data)
self.publishToolsData(data)
def nexus(self, logResponse):
#print (logResponse)
return
def getBase64Value(self,userid,passwd):
userpass = '%s:%s' % (userid,passwd)
base64string = base64.standard_b64encode(userpass.encode('utf-8'))
return base64string.decode('utf-8')
def trackingUpdation(self, repoid, lastupdated):
self.loadTrackingConfig()
if self.tracking.get(repoid) is None:
self.tracking[repoid] = lastupdated
self.updateTrackingJson(self.tracking)
return 1
else:
if int(self.tracking.get(repoid, None)) < int(lastupdated):
self.tracking[repoid] = lastupdated
self.updateTrackingJson(self.tracking)
return 2
else:
return 0
if __name__ == "__main__":
NexusAgent()
|
import os
import math
import discord
from discord.ext import menus
class ViewMenu(menus.Menu):
def __init__(self, *, auto_defer=True, **kwargs):
super().__init__(**kwargs)
self.auto_defer = auto_defer
self.view = None
self.__tasks = []
def button_check(self, interaction):
return self.ctx.author.id == interaction.user.id
async def on_menu_button_error(self, exc):
await self.bot.errors.handle_menu_button_error(exc, self)
def build_view(self):
if not self.should_add_reactions():
return None
def make_callback(button):
async def callback(interaction):
if self.button_check(interaction) is False:
return
if self.auto_defer:
await interaction.response.defer()
try:
if button.lock:
async with self._lock:
if self._running:
await button(self, interaction)
else:
await button(self, interaction)
except Exception as exc:
await self.on_menu_button_error(exc)
return callback
view = discord.ui.View(timeout=self.timeout)
for i, (emoji, button) in enumerate(self.buttons.items()):
item = discord.ui.Button(style=discord.ButtonStyle.secondary, emoji=emoji, row=i // 5)
item.callback = make_callback(button)
view.add_item(item)
self.view = view
return view
def add_button(self, button, *, react=False):
super().add_button(button)
if react:
if self.__tasks:
async def wrapped():
self.buttons[button.emoji] = button
try:
await self.message.edit(view=self.build_view())
except discord.HTTPException:
raise
return wrapped()
async def dummy():
raise menus.MenuError("Menu has not been started yet")
return dummy()
def remove_button(self, emoji, *, react=False):
super().remove_button(emoji)
if react:
if self.__tasks:
async def wrapped():
self.buttons.pop(emoji, None)
try:
await self.message.edit(view=self.build_view())
except discord.HTTPException:
raise
return wrapped()
async def dummy():
raise menus.MenuError("Menu has not been started yet")
return dummy()
def clear_buttons(self, *, react=False):
super().clear_buttons()
if react:
if self.__tasks:
async def wrapped():
try:
await self.message.edit(view=None)
except discord.HTTPException:
raise
return wrapped()
async def dummy():
raise menus.MenuError("Menu has not been started yet")
return dummy()
async def _internal_loop(self):
self.__timed_out = False
try:
self.__timed_out = await self.view.wait()
except Exception:
pass
finally:
self._event.set()
try:
await self.finalize(self.__timed_out)
except Exception:
pass
finally:
self.__timed_out = False
if self.bot.is_closed():
return
try:
if self.delete_message_after:
return await self.message.delete()
if self.clear_reactions_after:
return await self.message.edit(view=None)
except Exception:
pass
async def start(self, ctx, *, channel=None, wait=False):
try:
del self.buttons
except AttributeError:
pass
self.bot = bot = ctx.bot
self.ctx = ctx
self._author_id = ctx.author.id
channel = channel or ctx.channel
is_guild = hasattr(channel, "guild")
me = channel.guild.me if is_guild else ctx.bot.user
permissions = channel.permissions_for(me)
self._verify_permissions(ctx, channel, permissions)
self._event.clear()
msg = self.message
if msg is None:
self.message = msg = await self.send_initial_message(ctx, channel)
for task in self.__tasks:
task.cancel()
self.__tasks.clear()
self._running = True
self.__tasks.append(bot.loop.create_task(self._internal_loop()))
if wait:
await self._event.wait()
def send_with_view(self, messageable, *args, **kwargs):
return messageable.send(*args, **kwargs, view=self.build_view())
def stop(self):
self._running = False
for task in self.__tasks:
task.cancel()
self.__tasks.clear()
class ViewMenuPages(menus.MenuPages, ViewMenu):
def __init__(self, source, **kwargs):
self._source = source
self.current_page = 0
super().__init__(source, **kwargs)
async def send_initial_message(self, ctx, channel):
page = await self._source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
return await self.send_with_view(channel, **kwargs)
class IndexMenu(ViewMenu):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.active_menu = None
async def send_initial_message(self, ctx, channel):
return await self.send_with_view(ctx, embed=await self.format_index())
async def format_index(self):
"""Displays the menu embed."""
raise NotImplementedError()
async def on_menu_button_error(self, exc):
await self.bot.errors.handle_menu_button_error(exc, self)
async def finalize(self, timed_out):
if self.active_menu is not None:
self.active_menu.stop()
def build_view(self):
if not self.should_add_reactions():
return None
def make_callback(button):
async def callback(interaction):
if self.button_check(interaction) is False:
return
if self.auto_defer:
await interaction.response.defer()
try:
if button.lock:
async with self._lock:
if self._running:
await button(self, interaction)
else:
await button(self, interaction)
except Exception as exc:
await self.on_menu_button_error(exc)
return callback
view = discord.ui.View(timeout=self.timeout)
for i, (emoji, button) in enumerate(self.buttons.items()):
item = discord.ui.Button(style=discord.ButtonStyle.secondary, emoji=emoji, row=i // 5,
custom_id=f"indexmenu:{os.urandom(16).hex()}")
item.callback = make_callback(button)
view.add_item(item)
self.view = view
return view
class SubMenuPages(ViewMenuPages):
def __init__(self, source, *, parent_menu: IndexMenu, **kwargs):
self._source = source
self.parent_menu = parent_menu
super().__init__(message=parent_menu.message, source=source, clear_reactions_after=False, **kwargs)
self.show_index = True
self.clean_up_buttons()
def clean_up_buttons(self):
self.remove_button('\N{BLACK SQUARE FOR STOP}\ufe0f')
async def start(self, ctx, *, channel=None, wait=False):
await super().start(ctx, channel=None, wait=False)
await self.message.edit(view=self.build_view())
await self.show_page(0)
def build_restored_parent_view(self):
# Make a brand new view with the parent items since it wouldn't work using the original one.
view = discord.ui.View(timeout=self.parent_menu.timeout)
for item in self.view.children:
if item.custom_id.startswith("indexmenu"):
view.add_item(item)
return view
def stop(self, *, show_index: bool = False):
"""Make sure to set show_index to true on an index calling button."""
self.show_index = show_index
super().stop()
async def finalize(self, timed_out):
if timed_out or not self.show_index:
return
self.parent_menu.view = view = self.build_restored_parent_view()
self.parent_menu.active_menu = None
await self.message.edit(view=view, embed=await self.parent_menu.format_index())
def build_view(self):
if not self.should_add_reactions():
view = discord.ui.View(timeout=self.parent_menu.timeout)
for item in self.parent_menu.view.children:
if item.custom_id.startswith('indexmenu'):
view.add_item(item)
self.view = self.parent_menu.view = view
return self.parent_menu.view
def make_callback(button):
async def callback(interaction):
if self.button_check(interaction) is False:
return
if self.auto_defer:
await interaction.response.defer()
try:
if button.lock:
async with self._lock:
if self._running:
await button(self, interaction)
else:
await button(self, interaction)
except Exception as exc:
await self.on_menu_button_error(exc)
return callback
# Brand new View Object so the original one stays intact for later use.
view = discord.ui.View(timeout=self.parent_menu.timeout)
for item in self.parent_menu.view.children:
if item.custom_id.startswith('indexmenu'):
view.add_item(item)
print((len(view.children) // 5) * 5)
for i, (emoji, button) in enumerate(self.buttons.items(), start=math.ceil(len(view.children) / 5) * 5):
print(i // 5)
item = discord.ui.Button(style=discord.ButtonStyle.secondary, emoji=emoji, row=i // 5)
item.callback = make_callback(button)
view.add_item(item)
self.view = self.parent_menu.view = view
return view
class SubMenu(ViewMenu):
def __init__(self, *, parent_menu: IndexMenu, **kwargs):
self.parent_menu = parent_menu
super().__init__(message=parent_menu.message, clear_reactions_after=False, **kwargs)
self.show_index = True
self.clean_up_buttons()
async def send_initial_message(self, ctx, **kwargs):
raise Exception('This menu does not support send_initial_message')
async def get_initial_embed(self):
"""Returns the embed that will be shown once the menu is started."""
raise NotImplementedError
def clean_up_buttons(self):
self.remove_button('\N{BLACK SQUARE FOR STOP}\ufe0f')
async def start(self, ctx, *, channel=None, wait=False):
await super().start(ctx, channel=channel, wait=wait)
await self.message.edit(embed=await self.get_initial_embed(), view=self.build_view())
def build_restored_parent_view(self):
# Make a brand new view with the parent items since it wouldn't work using the original one.
view = discord.ui.View(timeout=self.parent_menu.timeout)
for item in self.view.children:
if item.custom_id.startswith("indexmenu"):
view.add_item(item)
return view
def stop(self, *, show_index: bool = False):
"""Make sure to set show_index to true on an index calling button."""
self.show_index = show_index
super().stop()
async def finalize(self, timed_out):
if timed_out or not self.show_index:
return
self.parent_menu.view = view = self.build_restored_parent_view()
self.parent_menu.active_menu = None
await self.message.edit(view=view, embed=await self.parent_menu.format_index())
def build_view(self):
if not self.should_add_reactions():
view = discord.ui.View(timeout=self.parent_menu.timeout)
for item in self.parent_menu.view.children:
if item.custom_id.startswith('indexmenu'):
view.add_item(item)
self.view = self.parent_menu.view = view
return self.parent_menu.view
def make_callback(button):
async def callback(interaction):
if self.button_check(interaction) is False:
return
if self.auto_defer:
await interaction.response.defer()
try:
if button.lock:
async with self._lock:
if self._running:
await button(self, interaction)
else:
await button(self, interaction)
except Exception as exc:
await self.on_menu_button_error(exc)
return callback
# Brand new View Object so the original one stays intact for later use.
view = discord.ui.View(timeout=self.parent_menu.timeout)
for item in self.parent_menu.view.children:
if item.custom_id.startswith('indexmenu'):
view.add_item(item)
for i, (emoji, button) in enumerate(self.buttons.items(), start=math.ceil(len(view.children) / 5) * 5):
item = discord.ui.Button(style=discord.ButtonStyle.secondary, emoji=emoji, row=i // 5)
item.callback = make_callback(button)
view.add_item(item)
self.view = self.parent_menu.view = view
return view
|
import discogs_client
import json
import libtmux
import os
import subprocess
import shutil
import time
from dotenv import dotenv_values
from PIL import Image
from imgcat import imgcat
from rich import print, box
from rich.console import Console
from rich.table import Table
# from rich.rule import Rule
def get_info(status):
info = {}
for line in status:
if 'file ' in line:
info['file'] = ' '.join(line.split(' ')[1:])
if ' title ' in line:
info['title'] = ' '.join(line.split(' ')[2:])
if ' artist ' in line:
info['artist'] = ' '.join(line.split(' ')[2:])
if ' album ' in line:
info['album'] = ' '.join(line.split(' ')[2:])
if 'originaldate' in line:
info['year'] = line.split(' ')[2][0:4]
elif 'date' in line:
info['year'] = line.split(' ')[2][0:4]
if ' label ' in line:
info['label'] = ' '.join(line.split(' ')[2:])
if 'duration ' in line:
info['length'] = line.split(' ')[1]
if 'position ' in line:
info['position'] = line.split(' ')[1]
return info
def build_table(info):
table = Table(box=box.SIMPLE)
table.add_row('Title', info['title'])
table.add_row('Artist', info['artist'])
table.add_row('Album', info['album'])
table.add_row('Year', info['year'])
try:
table.add_row('Label', info['label'])
except:
pass
return table
def find_image(file):
# get file location
folder = os.path.dirname(file)
# look for cover.jpg or cover.png
if os.path.isfile(folder + '/cover.jpg'):
image = folder + '/cover.jpg'
elif os.path.isfile(folder + '/cover.png'):
image = folder + '/cover.png'
else:
image = None
print('no cover art found')
return image
def get_album_info(album, artist):
pass
# print(album, artist)
# results = d.search(release_title=album, artist=artist, type='master')
# print(results.page(1))
def display():
console.clear()
columns = shutil.get_terminal_size()[0] * 12
info = get_info(status)
try:
image = find_image(info['file'])
table = build_table(info)
# doesn't need to be saved to file, but tmux won't
# display it properly otherwise
im_file = os.getcwd() + '/art.jpg'
im = Image.open(image)
im = im.resize((columns,columns), reducing_gap=2)
quality = 75
im.save(im_file, 'JPEG', optimize=True, quality=quality)
while os.path.getsize(im_file) > 77000:
quality -= 5
im.save(im_file, 'JPEG', optimize=True, quality=quality)
else:
window.select_pane(1)
imgcat(open(im_file))
# imgcat(im)
except Exception as e:
# print(Exception, e)
table = Table(box=box.SIMPLE)
window.select_pane(1)
console.print(table, justify='center')
try:
get_album_info(info['album'], info['artist'])
except Exception as e:
# print(Exception, e)
pass
# do some discogs searching
# TODO: try to avoid running multiple searches for the same album
try:
d_search = d.search(artist=info['artist'], title=info['album'])
owned = False
for i in d_search:
if i.id in collection:
console.print('In collection! [link=https://discogs.com'+i.url+']see release[/link]', justify='center')
owned = True
if not owned:
# TODO: add link to find a copy
# ----- grab the master release in previous for loop
# ----- search bandcamp?
console.print('Not in collection', justify='center')
except Exception as e:
pass
# print(Exception, e)
window.select_pane(0)
###############################################################
# running starts here
#
config = dotenv_values(".env")
d = discogs_client.Client('stvnrlly/0.1', user_token=config['DISCOGS_TOKEN'])
# connect to tmux so that we can switch panes
# tmux must be started directly outside the script
try:
server = libtmux.Server()
session = server.find_where({ "session_name": "cmus" })
window = session.attached_window
except:
print('no tmux running')
# grab initial cmus status
status = [line.decode('utf-8') for line in subprocess.check_output(['cmus-remote', '-Q']).splitlines()]
# initialize discogs collection info
collection = {}
with open('./data/discogs.json') as f:
collection = json.load(f)
# set up rich (used for centering text)
console = Console()
# create initial display
if status[0] == 'status playing':
display()
# start scrobble counter
listened = 0
while True:
# check if track changed
new_status = [line.decode('utf-8') for line in subprocess.check_output(['cmus-remote', '-Q']).splitlines()]
if status[1] != new_status[1]:
listened = 0
status = new_status
display()
else:
listened += 1
# if listened == 10:
# TODO: scrobble here
time.sleep(1)
|
from .stream_interface import TDKLambdaGenesysStreamInterface
__all__ = ['TDKLambdaGenesysStreamInterface']
|
# Script to grab data from HYCOM model and save it.
from __future__ import division
import netCDF4 as nc
import datetime
import os
import numpy as np
SAVE_DIR = '/ocean/nsoontie/MEOPAR/HYCOM/'
HYCOM_URL = 'http://nomads.ncep.noaa.gov:9090/dods/rtofs/'
FILENAMES = ['rtofs_glo_2ds_{}_3hrly_diag', # ssh files
'rtofs_glo_3dz_{}_6hrly_reg2', # temp/salinity
]
# sub domain
LON_MIN = -126
LON_MAX = -124
LAT_MIN = 48
LAT_MAX = 49
def main():
"""save yesterday's data from hycom"""
date = datetime.date.today() - datetime.timedelta(days=1)
modes = ['forecast', 'nowcast']
for name in FILENAMES:
for mode in modes:
save_netcdf_file(date, mode, name)
def save_netcdf_file(date, mode, name):
"""Saves hycom netcdf file in a subdomain
:arg date: the hycom simulation date
:type date: datetime object
:arg mode: forecast or nowcast
:type mode: string, either 'forecast' or 'nowcast'
:arg name: basename of the HYCOM model. Determines which variables
are downloaded.
:type name: string
"""
# setting up to read file
datestr = 'rtofs_global{}'.format(date.strftime('%Y%m%d'))
filename = name.format(mode)
url = os.path.join(HYCOM_URL, datestr, filename)
# look up subdomain indices
iss, jss = determine_subdomain(url, LON_MIN, LON_MAX, LAT_MIN, LAT_MAX)
# setting up to save file
directory = os.path.join(SAVE_DIR, mode, date.strftime('%Y-%m-%d'))
if not os.path.exists(directory):
os.makedirs(directory)
save_path = os.path.join(directory, filename+'.nc')
# copy netcdf to save file
cmd = 'ncks -d lat,{j1},{j2} -d lon,{i1},{i2} {hycom} {newfile}'.format(
j1=jss[0][0], j2=jss[0][-1], i1=iss[0][0], i2=iss[0][-1],
hycom=url, newfile=save_path)
os.system(cmd)
def determine_subdomain(url, lon_min, lon_max, lat_min, lat_max):
"""Return indices for latitude and longitude in a subdomain.
The subdomain is defined by lon_min, lon_max, lat_min, lat_max.
:arg url: opendap URL
:type url: string
:arg lon_min: minimum longitude of subdomain
:type lon_min: float
:arg lon_max: maxmium longitude of subdomain
:type lon_max: float
:arg lat_min: minimum latitude of subdomain
:type lat_min: float
:arg lat_max: maximum latitude of subdomain
:type lat_max: float
:returns: iss, jss - numpy arrays with the indices corresponding to
subdomina. iss for longitude, jss for latitude. """
f = nc.Dataset(url)
lons = f.variables['lon'][:]
lats = f.variables['lat'][:]
if lons.max() > 180:
lons = lons - 360
iss = np.where(np.logical_and(lons <= lon_max, lons >= lon_min))
jss = np.where(np.logical_and(lats <= lat_max, lats >= lat_min))
return iss, jss
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from interfaceBuilder import interface
from interfaceBuilder import file_io
from interfaceBuilder import inputs
from interfaceBuilder import utils as ut
class Structure():
"""
Class for holding structure data from a general simulation.
Single time snapshot.
"""
def __init__(self,
cell = None,\
pos = None,\
type_n = None,\
type_i = None,\
idx = None,\
mass = None,\
frozen = None,\
filename = None,\
pos_type = None,\
load_from_file = None,\
load_from_input = None,\
format = None):
if load_from_file is not None:
cell, pos, type, idx, mass = file_io.readData(load_from_file, format)
if isinstance(type[0], (np.integer, int)):
type_i = type
else:
type_n = type
if np.all(pos >= 0) and np.all(pos <= 1):
pos_type = "d"
else:
pos_type = "c"
if filename is None: filename = load_from_file
elif load_from_input is not None:
cell, pos, type_n, mass = inputs.getInputs(lattice = load_from_input)
if np.all(pos >= 0) and np.all(pos <= 1):
pos_type = "d"
else:
pos_type = "c"
type_i = np.zeros(type_n.shape[0])
for i, item in enumerate(np.unique(type_n)):
type_i[type_n == item] = i + 1
"""Simply placeholders for element names"""
elements = [ "A", "B", "C", "D", "E", "F", "G", "H", "I",\
"J", "K", "L", "M", "N", "O", "P", "Q", "R"]
if type_n is None:
if type_i is None:
type_i = np.ones(pos.shape[0])
type_n = np.chararray(pos.shape[0], itemsize = 2)
type_n[:] = "A"
else:
type_n = np.chararray(pos.shape[0], itemsize = 2)
for i, item in enumerate(np.unique(type_i)):
type_n[type_i == item] = elements[i]
else:
if type_i is None:
type_i = np.ones(type_n.shape[0])
for i, item in enumerate(np.unique(type_n)):
type_i[type_n == item] = i + 1
if idx is None: idx = np.arange(pos.shape[0])
if mass is None: mass = type_i
if frozen is None: frozen = np.zeros((pos.shape[0], 3), dtype = bool)
if pos_type is None: pos_type = "c"
if filename is None: filename = "structure_obj"
self.filename = filename
self.cell = cell
self.pos = pos
self.type_n = type_n
self.type_i = type_i
self.pos_type = pos_type.lower()
self.frozen = frozen
"""Assign idx if dimensions match otherwise assign a range"""
if idx.shape[0] != self.pos.shape[0]:
self.idx = np.arange(self.pos.shape[0])
else:
self.idx = idx
mass = np.array(mass)
if mass.shape[0] == self.pos.shape[0]:
"""If shape of mass == nr o atoms simply assign it"""
self.mass = mass
elif mass.shape[0] == np.unique(self.type_i).shape[0]:
"""If shape mass == unique types, assign to types in order"""
self.mass = np.ones(self.pos.shape[0])
for i, item in enumerate(np.unique(self.type_i)):
self.mass[self.type_i == item] = mass[i]
else:
"""Else simply assign the type_i value as placeholder"""
self.mass = type_i
self.sortStructure(verbose = 0)
def sortStructure(self, sort = "type", reset_idx = False, verbose = 1):
"""Function for sorting the structure
sort = str("type"/"index"), Sort by type-z-y-x or sort by index as loaded
from simulation file.
reset_idx = bool, If True then reset the index of the structure
verbose = int, Print extra information
"""
if sort.lower() == "type":
"""Sorts the structure based on type_i then z then y then x"""
si = np.lexsort((self.pos[:, 0], self.pos[:, 1], self.pos[:, 2], self.type_i))
elif sort.lower() == "index":
"""Sort by the idx property, (index as written by the simulation programs)"""
si = np.argsort(self.idx)
if verbose > 0:
string = "Sorting structure by %s" % sort.lower()
ut.infoPrint(string)
"""Sort as specified"""
self.pos = self.pos[si]
self.type_i = self.type_i[si]
self.type_n = self.type_n[si]
self.frozen = self.frozen[si]
self.mass = self.mass[si]
if reset_idx:
self.resetIndex(verbose = verbose)
else:
self.idx = self.idx[si]
def resetIndex(self, verbose = 1):
"""Function for reseting the atomic indicies
verbose = int, Print extra information
"""
self.idx = np.arange(self.pos.shape[0])
if verbose > 0:
string = "Reseting atom index"
ut.infoPrint(string)
def printStructure(self):
"""Function to print formated output of the structure"""
string = "%s" % self.filename
print("\n%s" % string)
print("-" * len(string))
print("Cell")
print("-" * 32)
for i in range(self.cell.shape[0]):
print("%10.4f %10.4f %10.4f" % (self.cell[i, 0], self.cell[i, 1], self.cell[i, 2]))
print("-" * 32)
string = "%5s %5s %5s %12s %12s %12s" %\
("Index", "Name", "Type", "Pos x ", "Pos y ", "Pos z ")
print(string)
print("-" * len(string))
for i in range(self.pos.shape[0]):
print("%5i %5s %5i %12.5f %12.5f %12.5f" % (self.idx[i], self.type_n[i].decode("utf-8"),\
self.type_i[i], self.pos[i, 0], self.pos[i, 1],\
self.pos[i, 2]))
print("-" * len(string))
string = "Nr of Atoms: %i | Nr of Elements: %i" % (self.pos.shape[0], np.unique(self.type_i).shape[0])
print(string)
def alignStructure(self, dim = [1, 0, 0], align = [1, 0, 0], verbose = 1):
"""Function for aligning a component of the structure in a specific dimension
dim = [float, float, float], the cell will be aligned to have this
cell vector entierly in the direction of the cartesian axis supplied
in the align parameter. Dim in direct coordinates.
align = [float, float, float], cartesian axis to align dim to. Align in cartesian coordinates.
verbose = int, Print extra information
"""
if align[2] != 0:
align[2] = 0
print("Only alignment of cells with the z-axis ortogonal to the xy-plane is supported")
print("The Z-component is discarded")
dim = np.array(dim)
align = np.array(align)
"""Get the cartesian direction of the dim which is to be aligned"""
dim_cart = np.matmul(self.cell, dim)
if dim_cart[2] != 0:
dim_cart[2] = 0
print("Specified dimension has cartesian components in the Z direction.")
print("The rotation is made ortogonal to the xy-plane.")
"""Get the angle between dim and align"""
aRad = np.arccos(np.dot(dim_cart, align) / (np.linalg.norm(dim_cart) * np.linalg.norm(align)))
"""Check in which direction the rotation should be made"""
dir = np.cross(dim_cart, align)[2]
if dir < 0:
aRad = -aRad
R = np.array([[np.cos(aRad), -np.sin(aRad), 0],
[np.sin(aRad), np.cos(aRad), 0],
[ 0, 0, 1]])
self.cell = np.matmul(R, self.cell)
self.pos = np.matmul(R, self.pos.T).T
def getBoxLengths(self):
"""Function for getting the box side lengths in orthogonal x,y,z"""
return self.cell[np.identity(3, dtype = bool)]
def getAtoms(self, mode = "layer_z", opt = [0, 0.2], coordinates = "c"):
"""get index of atoms that match supplied criteria
Mode is supplied as a dict with keyword specified as below
and containing a list of
Mode and options
----------------
layer_x - [x_center, dx], slice out atoms within x_center +- dx
layer_y - [y_center, dy], slice out atoms within y_center +- dy
layer_z - [z_center, dz], slice out atoms within z_center +- dz
"""
if "c".startswith(coordinates.lower()):
self.dir2car()
string = "Changed to cartesian coordinates"
ut.infoPrint(string)
elif "d".startswith(coordinates.lower()):
self.car2dir()
string = "Changed to direct coordinates"
ut.infoPrint(string)
else:
string = "Unrecognized option coordinates = %s, (can be c or d)" % coordinates
ut.infoPrint(string)
return
if mode.lower() == "layer_x":
mask = (self.pos[:, 0] >= (opt[0] - opt[1])) *\
(self.pos[:, 0] <= (opt[0] + opt[1]))
index = np.arange(self.pos.shape[0])[mask]
elif mode.lower() == "layer_y":
mask = (self.pos[:, 1] >= (opt[0] - opt[1])) *\
(self.pos[:, 1] <= (opt[0] + opt[1]))
index = np.arange(self.pos.shape[0])[mask]
elif mode.lower() == "layer_z":
mask = (self.pos[:, 2] >= (opt[0] - opt[1])) *\
(self.pos[:, 2] <= (opt[0] + opt[1]))
index = np.arange(self.pos.shape[0])[mask]
return index
def getNeighborDistance(self, idx = None, r = 6, idx_to = None,\
extend = np.array([1, 1, 1], dtype = bool),\
verbose = 1):
"""Function for getting the distance between specified atoms within
radius r
idx = int, [int,], Index from which to calculate nearest neighbors
r = float, Radius or NN calculation
idx_to = int, [int,], Calculate the NN considering only these atoms
extend = np.ndarray([1/0, 1/0, 1/0]), Extend the cell if needed in
specified x, y, z directions
verbose = int, Print extra information
"""
"""Check some defaults"""
if idx is None: idx = np.arange(self.pos.shape[0])
if isinstance(idx, (int, np.integer)): idx = np.array([idx])
if idx_to is None: idx_to = np.arange(self.pos.shape[0])
if isinstance(idx_to, (int, np.integer)): idx_to = np.array([idx_to])
extend = np.array(extend, dtype = bool)
"""Change to cartesian coordinates"""
self.dir2car()
"""Check the rough maximum possible extent for relevant atoms"""
max_pos = np.max(self.pos[idx, :], axis = 0) + r
min_pos = np.min(self.pos[idx, :], axis = 0) - r
lim = np.all(self.pos < max_pos, axis = 1) *\
np.all(self.pos > min_pos, axis = 1)
idx_to = np.intersect1d(self.idx[lim], idx_to)
if verbose > 0:
string = "Considering idx_to within [%.2f, %.2f] (x), [%.2f, "\
" %.2f] (y), [%.2f, %.2f] (z)" % (min_pos[0], max_pos[0],\
min_pos[1], max_pos[1], min_pos[2], max_pos[2])
ut.infoPrint(string)
"""Cell extension to comply with the sepecified r value"""
box = self.getBoxLengths()
rep = np.ceil(r / box) - 1
rep = rep.astype(np.int)
rep[np.logical_not(extend)] = 0
"""If the box < r then extend the box. Otherwise wrap the cell"""
if np.any(box < r):
if verbose > 0:
string = "Replicating cell by %i, %i, %i (x, y, z)"\
% (rep[0] + 1, rep[1] + 1, rep[2] + 1)
ut.infoPrint(string)
pos_to, cell = self.getExtendedPositions(x = rep[0], y = rep[1], z = rep[2],\
idx = idx_to, return_cart = True,\
verbose = verbose - 1)
"""Change to cartesian coordinates"""
self.dir2car()
"""Change back to direct coordinates using the new extended cell"""
pos_to = np.matmul(np.linalg.inv(cell), pos_to.T).T
pos_from = np.matmul(np.linalg.inv(cell), self.pos.T).T
else:
"""Change to direct coordinates"""
self.car2dir()
if verbose > 0:
string = "Cell is only wrapped, not extended"
ut.infoPrint(string)
pos_to = self.pos.copy()[idx_to, :]
pos_from = self.pos.copy()
cell = self.cell
ps = pos_to.shape[0]
dist = np.zeros((np.shape(idx)[0] * ps, 3))
"""Measure distances between all specified atoms, wrap cell"""
for i, item in enumerate(idx):
d = pos_to - pos_from[[item], :]
d[d > 0.5] -= 1
d[d < -0.5] += 1
dist[i * ps : (i + 1) * ps, :] = d
"""Convert to cartesian coordinates"""
dist = np.matmul(cell, dist.T).T
"""Calculate the distances"""
dist = np.linalg.norm(dist, axis = 1)
"""Remove distances outside of radius r"""
dist = dist[dist < r]
"""Remove the 0 distances, (atom to it self)"""
dist = dist[dist > 0]
return dist
def getNearestNeighbors(self, idx = None, idx_to = None, NN = 8,\
verbose = 1, limit = np.array([5, 5, 5]),\
extend = np.array([1, 1, 1], dtype = bool)):
"""Function for getting index and distance to nearest neighbors
of specified atoms
idx = int, [int,], Index from which to calculate nearest neighbors
NN = int, Number of nearest neighbors to keep
idx_to = int, [int,], Calculate the NN considering only these atoms
limit = np.ndarray([float, float, float]), Limit around the maximum extent
of the included atoms, to speed up calculations
verbose = int, Print extra information
"""
"""Check some defaults"""
if idx is None: idx = np.arange(self.pos.shape[0])
if isinstance(idx, (int, np.integer)): idx = np.array([idx])
if idx_to is None: idx_to = np.arange(self.pos.shape[0])
if isinstance(idx_to, (int, np.integer)): idx_to = np.array([idx_to])
"""Change to cartesian coordinates"""
self.dir2car()
"""Do a rough check which atoms must be included in the NN search"""
max_pos = np.max(self.pos[idx, :], axis = 0) + limit
min_pos = np.min(self.pos[idx, :], axis = 0) - limit
lim = np.all(self.pos < max_pos, axis = 1) *\
np.all(self.pos > min_pos, axis = 1)
idx_to = np.intersect1d(self.idx[lim], idx_to)
if verbose > 0:
string = "Considering idx_to within [%.2f, %.2f] (x), [%.2f, "\
" %.2f] (y), [%.2f, %.2f] (z)" % (min_pos[0], max_pos[0],\
min_pos[1], max_pos[1], min_pos[2], max_pos[2])
ut.infoPrint(string)
"""Cell extension to comply with the sepecified limit"""
box = self.getBoxLengths()
rep = np.ceil(limit / box) - 1
rep = rep.astype(np.int)
rep[np.logical_not(extend)] = 0
if np.any(rep > 0):
if verbose > 0:
string = "Replicating cell by %i, %i, %i (x, y, z)"\
% (rep[0] + 1, rep[1] + 1, rep[2] + 1)
ut.infoPrint(string)
"""Extend teh cell"""
pos_to, cell = self.getExtendedPositions(x = rep[0], y = rep[1], z = rep[2],\
idx = idx_to, return_cart = True,\
verbose = verbose - 1)
"""Change to cartesian coordinates"""
self.dir2car()
"""Change back to direct coordinates using the new extended cell"""
pos_to = np.matmul(np.linalg.inv(cell), pos_to.T)
pos_from = np.matmul(np.linalg.inv(cell), self.pos.T)
else:
"""Change to direct coordinates"""
self.car2dir()
if verbose > 0:
string = "Cell is only wrapped, not extended"
ut.infoPrint(string)
pos_to = self.pos.copy()[idx_to, :].T
pos_from = self.pos.copy().T
cell = self.cell
if pos_to.shape[1] - 1 < NN:
string = "Within current limits (%.2f, %.2f, %.2f) fewer NN (%i) "\
"are present than specified (%i)" % (limit[0], limit[1],\
limit[2], pos_to.shape[1] - 1, NN)
ut.infoPrint(string)
return
distance = np.zeros((np.shape(idx)[0], NN))
"""Measure distances between all specified atoms, wrap cell"""
for i, item in enumerate(idx):
d = pos_to - pos_from[:, [item]]
d[d > 0.5] -= 1
d[d < -0.5] += 1
"""Convert to cartesian coordinates"""
c = np.matmul(cell, d)
"""Calculate distances"""
dist = np.sqrt(c[0, :]**2 + c[1, :]**2 + c[2, :]**2)
"""Remove distance to the same atom"""
mask = dist > 0
dist = dist[mask]
"""Sort distances"""
si = np.argsort(dist)
"""Pick out the NN nearest in all variables"""
distance[i, :] = dist[si][:NN]
return distance
def getNearestNeighborCollection(self, idx = None, idx_to = None, NN = 8,\
verbose = 1, limit = np.array([5, 5, 5]),\
extend = np.array([1, 1, 1], dtype = bool)):
"""Function for getting nearest neighbors around specified atoms to
specified atoms collected as an average with a standard deviation
idx = int, [int,], Index from which to calculate nearest neighbors
NN = int, Number of nearest neighbors to keep
idx_to = int, [int,], Calculate the NN considering only these atoms
limit = np.ndarray([float, float, float]), Limit around the maximum extent
of the included atoms, to speed up calculations
extend = np.ndarray([1/0, 1/0, 1/0]), Extend the cell if needed in
specified x, y, z directions
verbose = int, Print extra information
"""
"""Check some defaults"""
if idx is None: idx = np.arange(self.pos.shape[0])
if isinstance(idx, (int, np.integer)): idx = np.array([idx])
if idx_to is None: idx_to = np.arange(self.pos.shape[0])
if isinstance(idx_to, (int, np.integer)): idx_to = np.array([idx_to])
"""Change to cartesian coordinates"""
self.dir2car()
distance = self.getNearestNeighbors(idx = idx, idx_to = idx_to, NN = NN,\
verbose = verbose, limit = limit,\
extend = extend)
"""Return if less than NN neighbors could be found"""
if distance is None:
return
if verbose > 0:
string = "Calculated %i nearest neighbors for %i atoms" % (NN, distance.shape[0])
ut.infoPrint(string)
dist_mean = np.mean(distance, axis = 0)
dist_std = np.std(distance, axis = 0)
return dist_mean, dist_std
def plotNNC(self, idx, idx_to = None, NN = 8, verbose = True,\
handle = False, row = 1, col = 1, N = 1, save = False,\
format = "pdf", dpi = 100, legend = None, **kwargs):
"""Function for plotting a nearest neighbor collection with std
idx = int, [int,], Index from which to calculate nearest neighbors
NN = int, Number of nearest neighbors to keep
idx_to = int, [int,], Calculate the NN considering only these atoms
handle = bool, If True only prepare the axis don't draw the plot
row = int, Rows if used in subplots
col = int, Columns if used in subplots
N = int, Nr of plot if used in subplots
save = bool or str, Name to save the file to or save to default name
if True
format = valid matplotlib format, Format to save the plot in
dpi = int, DPI used when saving the plot
legend = [str,], Legend for the different entries
**kvargs = valid matplotlib errorbar kwargs
"""
lbl_1 = None
if idx is None:
idx = [np.arange(self.pos.shape[0])]
elif isinstance(idx, (int, np.integer)):
idx = [np.array([idx])]
elif isinstance(idx[0], (int, np.integer)):
idx = [idx]
elif isinstance(idx, str) and idx.lower() == "species":
idx, lbl_1 = self.getElementIdx()[:2]
lbl_2 = None
if idx_to is None:
idx_to = [np.arange(self.pos.shape[0])]
elif isinstance(idx_to, (int, np.integer)):
idx_to = [np.array([idx_to])]
elif isinstance(idx_to[0], (int, np.integer)):
idx_to = [idx_to]
elif isinstance(idx_to, str) and idx_to.lower() == "species":
idx_to, lbl_2 = self.getElementIdx()[:2]
if len(idx) == 1:
l_idx = np.zeros(len(idx_to), dtype = np.int)
else:
l_idx = np.arange(len(idx), dtype = np.int)
if len(idx_to) == 1:
l_idx_to = np.zeros(len(idx), dtype = np.int)
else:
l_idx_to = np.arange(len(idx_to), dtype = np.int)
if l_idx.shape[0] != l_idx_to.shape[0]:
string = "Length of idx and idx_to does not match (%i, %i). "\
"Can be (1,N), (N,1) or (N,N)"\
% (l_idx.shape[0], l_idx_to.shape[0])
ut.infoPrint(string)
return
x = []; y = []; s = []
for i in range(l_idx.shape[0]):
d_mean, d_std = self.getNearestNeighborCollection(idx = idx[l_idx[i]],\
idx_to = idx_to[l_idx_to[i]],\
NN = NN, verbose = verbose)
x.append(np.arange(1, d_mean.shape[0] + 1))
y.append(d_mean)
s.append(d_std)
if not handle:
hFig = plt.figure()
hAx = plt.subplot(row, col, N)
label = "_ignore"
for i, item in enumerate(y):
if legend is not None:
if legend.lower() == "idx":
label = "%i -> %i" % (l_idx[i], l_idx_to[i])
else:
label = legend[i]
elif lbl_1 is not None and lbl_2 is not None:
label = "%2s -> %2s" % (lbl_1[i], lbl_2[i])
elif lbl_1 is not None:
label = "%2s -> %i" % (lbl_1[i], l_idx_to[i])
elif lbl_2 is not None:
label = "%i -> %2s" % (l_idx[i], lbl_2[i])
hAx.errorbar(x[i], y[i], yerr = s[i], linestyle = ls, marker = m, capsize = cs,\
elinewidth = elw, markersize = ms, linewidth = lw, label = label, **kwargs)
hAx.set_xlabel("Neighbor")
hAx.set_ylabel("Distance, $(\AA)$")
if label != "_ignore":
hAx.legend(framealpha = 1, loc = "upper left")
hAx.set_title("Nearest Neighbor Distances")
plt.tight_layout()
if save:
if save is True:
ut.save_fig(filename = "NNC.%s" % (format), format = format,\
dpi = dpi, verbose = verbose)
else:
ut.save_fig(filename = save, format = format, dpi = dpi,\
verbose = verbose)
plt.close()
else:
plt.show()
def plotNN(self, idx, idx_to = None, NN = 8, verbose = True,\
handle = False, row = 1, col = 1, N = 1, save = False,\
format = "pdf", dpi = 100, legend = None, **kwargs):
"""Function to plot the distances to the N nearest neighbors
idx = int, [int,], Index from which to calculate nearest neighbors
NN = int, Number of nearest neighbors to keep
idx_to = int, [int,], Calculate the NN considering only these atoms
handle = bool, If True only prepare the axis don't draw the plot
row = int, Rows if used in subplots
col = int, Columns if used in subplots
N = int, Nr of plot if used in subplots
save = bool or str, Name to save the file to or save to default name
if True
format = valid matplotlib format, Format to save the plot in
dpi = int, DPI used when saving the plot
legend = [str,], Legend for the different entries
**kvargs = valid matplotlib errorbar kwargs
"""
if isinstance(idx, (np.integer, int)): idx = np.array([idx])
if idx_to is None: idx_to = np.arange(self.pos.shape[0])
distance = self.getNearestNeighbors(idx = idx, idx_to = idx_to,\
NN = NN, verbose = verbose)
x = np.arange(1, distance.shape[1] + 1)
if not handle:
hFig = plt.figure()
hAx = plt.subplot(row, col, N)
for i in range(np.shape(distance)[0]):
hAx.plot(x, distance[i, :], **kwargs)
hAx.set_title("Nearest Neighbor Distances")
hAx.set_xlabel("Neighbor")
hAx.set_ylabel("Distance, $(\AA)$")
if legend is not None:
hAx.legend(legend, ncol = 2, frameon = False)
plt.tight_layout()
if save:
if save is True:
ut.save_fig(filename = "NN.%s" % (format), format = format,\
dpi = dpi, verbose = verbose)
else:
ut.save_fig(filename = save, format = format, dpi = dpi,\
verbose = verbose)
plt.close()
else:
plt.show()
def getRDF(self, idx = None, idx_to = None, r = 6, dr = 0.1, bins = None,\
extend = np.array([1, 1, 1], dtype = bool), edges = False,\
verbose = 1):
"""Function for getting the radial distribution function around and
to specified atoms
idx = int, [int,], Index from which to calculate nearest neighbors
idx_to = int, [int,], Calculate the NN considering only these atoms
r = float, Cut off used in the RDF
dr = float, bins size used in the RDF
bins = int, Alternative to dr, specify the total number of bins
extend = np.ndarray([1/0, 1/0, 1/0]), Allow the cell to be extended in
the specified x, y, z directions
edges = bool, Include both edges
verbose = int, Print extra information
"""
"""Check some defaults"""
if idx is None: idx = np.arange(self.pos.shape[0])
if isinstance(idx, (int, np.integer)): idx = np.array([idx])
if idx_to is None: idx_to = np.arange(self.pos.shape[0])
if isinstance(idx_to, (int, np.integer)): idx_to = np.array([idx_to])
extend = np.array(extend, dtype = bool)
dist = self.getNeighborDistance(idx = idx, idx_to = idx_to, r = r,\
extend = extend, verbose = verbose)
"""If bins is specified it is used over dr"""
if bins is None:
bins = np.arange(0, r + dr, dr)
cnt, bin = np.histogram(dist, bins = bins, range = (0, r))
"""Get volume of the radial spheres, V=4/3*pi*(bin[1]^3 - bin[0]^3) """
V = 4/3*np.pi * (bin[1:]**3 - bin[:-1]**3)
"""Calculate the cumulative distribution without normalizing by V"""
N = np.shape(idx)
tot = np.cumsum(cnt / N)
"""Normalize the RD count by V and nr of atoms which the RDF is centered around"""
cnt = cnt / (N * V)
if not edges:
bin = bin[:-1] + bin[1] / 2
return cnt, bin, tot
def plotRDF(self, idx = None, idx_to = None, r = 6, dr = 0.1, bins = None,\
extend = np.array([1, 1, 1], dtype = bool), cumulative = False, legend = None,\
row = 1, col = 1, N = 1, handle = False, save = False, format = "pdf",\
dpi = 100, verbose = 1, **kwargs):
"""Function for ploting the RDF and cumulative distribution
idx = int, [int,], Index from which to calculate nearest neighbors
idx_to = int, [int,], Calculate the NN considering only these atoms
r = float, Cut off used in the RDF
dr = float, bins size used in the RDF
bins = int, Alternative to dr, specify the total number of bins
extend = np.ndarray([1/0, 1/0, 1/0]), Allow the cell to be extended in
the specified x, y, z directions
edges = bool, Include both edges
cumulative = bool, Add the cumulative RDF value to a right y-axis
legend = [str,] legend inputs
handle = bool, If True only prepare the axis don't draw the plot
row = int, Rows if used in subplots
col = int, Columns if used in subplots
N = int, Nr of plot if used in subplots
save = bool or str, Name to save the file to or save to default name
if True
format = valid matplotlib format, Format to save the plot in
dpi = int, DPI used when saving the plot
kwargs = valid matplotlib plot kwargs
"""
lbl_1 = None
if idx is None:
idx = [np.arange(self.pos.shape[0])]
elif isinstance(idx, (int, np.integer)):
idx = [np.array([idx])]
elif isinstance(idx[0], (int, np.integer)):
idx = [idx]
elif isinstance(idx, str) and idx.lower() == "species":
idx, lbl_1 = self.getElementIdx()[:2]
lbl_2 = None
if idx_to is None:
idx_to = [np.arange(self.pos.shape[0])]
elif isinstance(idx_to, (int, np.integer)):
idx_to = [np.array([idx_to])]
elif isinstance(idx_to[0], (int, np.integer)):
idx_to = [idx_to]
elif isinstance(idx_to, str) and idx_to.lower() == "species":
idx_to, lbl_2 = self.getElementIdx()[:2]
if len(idx) == 1:
l_idx = np.zeros(len(idx_to), dtype = np.int)
else:
l_idx = np.arange(len(idx), dtype = np.int)
if len(idx_to) == 1:
l_idx_to = np.zeros(len(idx), dtype = np.int)
else:
l_idx_to = np.arange(len(idx_to), dtype = np.int)
if l_idx.shape[0] != l_idx_to.shape[0]:
string = "Length of idx and idx_to does not match (%i, %i). "\
"Can be (1,N), (N,1) or (N,N)"\
% (l_idx.shape[0], l_idx_to.shape[0])
ut.infoPrint(string)
return
y = []; yt = []
for i in range(l_idx.shape[0]):
cnt, bin, tot = self.getRDF(idx = idx[l_idx[i]], idx_to = idx_to[l_idx_to[i]], r = r,\
dr = 0.1, bins = bins, extend = extend,\
edges = False, verbose = verbose)
y.append(cnt)
yt.append(tot)
if not handle:
hFig = plt.figure()
hAx = plt.subplot(row, col, N)
if cumulative:
hAxR = hAx.twinx()
label = "_ignore"
for i, item in enumerate(y):
if legend is not None:
if legend.lower() == "idx":
label = "%i -> %i" % (l_idx[i], l_idx_to[i])
else:
label = legend[i]
elif lbl_1 is not None and lbl_2 is not None:
label = "%2s -> %2s" % (lbl_1[i], lbl_2[i])
elif lbl_1 is not None:
label = "%2s -> %i" % (lbl_1[i], l_idx_to[i])
elif lbl_2 is not None:
label = "%i -> %2s" % (l_idx[i], lbl_2[i])
hL = hAx.plot(bin, item, linestyle = "-", label = label, **kwargs)
if cumulative:
hAxR.plot(bin, yt[i], linestyle = "--", color = hL[-1].get_color(), **kwargs)
hAx.set_xlabel("Radius, $\AA$")
hAx.set_ylabel("Atoms / (Atom * Volume), ($1/\AA^3$)")
if label != "_ignore":
hAx.legend(framealpha = 1, loc = "upper left")
hAx.set_title("RDF")
plt.tight_layout()
if save:
if save is True:
ut.save_fig(filename = "RDF.%s" % (format), format = format,\
dpi = dpi, verbose = verbose)
else:
ut.save_fig(filename = save, format = format, dpi = dpi,\
verbose = verbose)
plt.close()
else:
plt.show()
def getElementIdx(self):
"""Function for getting the atomic indices for each element"""
index = np.arange(self.pos.shape[0])
idx = []; element = []; nr = []
for i in np.unique(self.type_n):
idx.append(index[self.type_n == i])
nr.append(self.type_i[self.type_n == i][0])
element.append(i.decode("utf-8"))
return idx, element, nr
def extendStructure(self, x = 1, y = 1, z = 1, reset_index = False, verbose = 1):
"""Function for repeating the cell in x, y or z direction
x = int, extend this many times
y = int, extend this many times
z = int, extend this many times
reset_index = bool, Reset the index after extending the structure
verbose = Print Extra information
"""
"""Change to direct coordinates"""
self.car2dir()
if x < 1: x = 1
if y < 1: y = 1
if z < 1: z = 1
x = np.int(np.ceil(x))
y = np.int(np.ceil(y))
z = np.int(np.ceil(z))
l = self.pos.shape[0]
self.type_n = np.tile(self.type_n, x*y*z)
self.type_i = np.tile(self.type_i, x*y*z)
self.mass = np.tile(self.mass, x*y*z)
self.frozen = np.tile(self.frozen, (x*y*z, 1))
self.idx = np.tile(self.idx, x*y*z)
self.pos = np.tile(self.pos, (x*y*z, 1))
n = 0
for i in range(z):
for j in range(y):
for k in range(x):
self.pos[l*n:l*(n+1), :] += (k, j, i)
self.idx[l*n:l*(n+1)] += l*n
n += 1
"""Sort resulting properties, type-z-y-x"""
s = np.lexsort((self.pos[:, 0], self.pos[:, 1],\
self.pos[:, 2], self.type_i))
self.pos = self.pos[s, :]
self.type_i = self.type_i[s]
self.type_n = self.type_n[s]
self.mass = self.mass[s]
if reset_index:
self.idx = np.arange(self.pos.shape[0])
else:
self.idx = self.idx[s]
"""Convert the cell back to cartesian coordinates"""
self.dir2car()
"""Extend the cell"""
self.cell[:, 0] *= x
self.cell[:, 1] *= y
self.cell[:, 2] *= z
def stretchCell(self, x = 1, y = 1, z = 1, transform = None, verbose = 1):
"""Function for stretching the cell and positions of a structure"""
"""Change to direct coordinates"""
self.car2dir()
"""If the full transformation matrix is supplied it is used otherwise
it is built from the x, y and z entries"""
if transform is None:
transform = np.zeros((3, 3))
transform[0, 0] = x
transform[1, 1] = y
transform[2, 2] = z
if verbose > 0:
cell = self.cell.copy()
"""Transform the cell"""
self.cell = np.matmul(self.cell, transform)
if verbose > 0:
for i in range(cell.shape[0]):
string = "|%10.4f %10.4f %10.4f | %5s |%10.4f %10.4f %10.4f |"\
% (cell[i, 0], cell[i, 1], cell[i, 2], " -->",\
self.cell[i, 0], self.cell[i, 1], self.cell[i, 2])
if i == 0: print("=" * 35 + " " * 7 + "=" * 35)
print(string)
if i == 2: print("=" * 35 + " " * 7 + "=" * 35)
"""Transform positions back to cartesian coordinates"""
self.dir2car()
def getExtendedPositions(self, x = 0, y = 0, z = 0, idx = None,\
return_cart = True, verbose = 1):
"""Function for retreving an extended set of positions
x = int, times to extend the cell
y = int, times to extend the cell
z = int, times to extend the cell
idx = int, [int,], Index of atoms to include
return_cart = bool, Return the positions in cartesian coordinates
verbose = int, Print extra information
"""
if idx is None: idx = np.arange(self.pos.shape[0])
if isinstance(idx, (np.integer, int)): idx = np.arange([idx])
"""Total number of atoms"""
N = self.pos.shape[0]
"""Change to direct coordinates"""
self.car2dir()
l = np.shape(idx)[0]
xR = np.arange(0, x + 1, dtype = np.int)
yR = np.arange(0, y + 1, dtype = np.int)
zR = np.arange(0, z + 1, dtype = np.int)
pos = self.pos.copy()[idx, :].T
pos = np.tile(pos, (x + 1) * (y + 1) * (z + 1))
n = 0
for i in zR:
for j in yR:
for k in xR:
pos[:, l * n : l * (n + 1)] += np.array([k, j, i])[:, None]
n += 1
cell = self.cell.copy()
cell[:, 0] *= xR.shape[0]
cell[:, 1] *= yR.shape[0]
cell[:, 2] *= zR.shape[0]
if return_cart:
pos = np.matmul(self.cell, pos)
return pos.T, cell
else:
return pos.T, cell
def car2dir(self):
"""Change positions from cartesian to direct coordinates"""
if self.pos_type.lower() == "d":
return
"""c = M*d so here M(-1)*c = d"""
self.pos = np.matmul(np.linalg.inv(self.cell), self.pos.T).T
self.pos_type = "d"
def dir2car(self):
"""Change positions from direct to cartesian coordinates"""
if self.pos_type.lower() == "c":
return
"""c = M*d"""
self.pos = np.matmul(self.cell, self.pos.T).T
self.pos_type = "c"
def writeStructure(self, filename = None, format = "lammps",\
direct = False, sd = False, verbose = 1):
"""Function for writing the structure to specified file format
filename = str(), Filename to write to
format = str("lammps"/"vasp"/"eon"/"xyz"), Format to write to
direct = bool, Write in direct coordinates (VASP)
sd = bool, consider selective dynamics (VASP) and frozen atoms EON
verbose = int, Print extra information
"""
if filename is None:
if self.filename is None:
filename = "Structure.%s" % format
else:
filename = self.filename
"""Write the structure object to specified file"""
file_io.writeData(filename = filename, atoms = self, format = format,\
sd = sd, direct = direct, verbose = verbose - 1)
if verbose > 0:
string = "Structure written to file: %s (%s-format)" % (filename, format)
ut.infoPrint(string)
|
#!/usr/bin/env python
import click
import os
import codecs
import json
from nlppln.utils import create_dirs, out_file_name, get_files
@click.command()
@click.argument('in_dir', type=click.Path(exists=True))
@click.option('--out_dir', '-o', default=os.getcwd(), type=click.Path())
@click.option('--mode', default='word')
def saf_to_text(in_dir, out_dir, mode):
create_dirs(out_dir)
if mode not in ('word', 'lemma'):
raise ValueError("Unknown mode: {mode}, "
"please choose either word or lemma"
.format(**locals()))
in_files = get_files(in_dir)
for fi in in_files:
with codecs.open(fi, encoding='utf-8') as f:
saf = json.load(f)
s_id = None
lines = []
for t in saf['tokens']:
if s_id is None:
s_id = t['sentence']
sentence = []
elif t['sentence'] != s_id:
lines.append(u' '.join(sentence))
sentence = []
s_id = t['sentence']
sentence.append(t[mode])
out_file = out_file_name(out_dir, os.path.basename(fi), ext='txt')
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
f.write(u'\n'.join(lines))
f.write(u'\n')
if __name__ == '__main__':
saf_to_text()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyoverwatch import app
# app.run(use_reloader=False, ssl_context='adhoc')
app.run(use_reloader=True)
|
from pyomo.core.base import ConcreteModel, Set, Param, NonNegativeReals, Reals, Var, Binary, Constraint
from pyomo.environ import inequality
from ...simulation.individual.hwt import HWT
def factory(model: ConcreteModel, name: str, hwt: HWT, **kwargs):
def s(key, value):
setattr(model, name+'_'+key, value)
def g(key):
return getattr(model, name+'_'+key)
s('theta', Var(model.t, within=NonNegativeReals))
s('b_charging', Var(model.t, within=Binary))
s('P_pos', Var(model.t, within=NonNegativeReals))
s('P_neg', Var(model.t, within=NonNegativeReals))
s('P_th', Var(model.t, within=Reals))
# minimum and maximum temperature
def con_temp(model, i):
return inequality(hwt.ambient_temperature, g('theta')[i], hwt.max_temp)
s('con_temp', Constraint(model.t, rule=con_temp))
# either charging or discharging
def con_charging(model, t):
return g('P_pos')[t] <= g('b_charging')[t] * model.M
s('con_charging', Constraint(model.t, rule=con_charging))
# either charging or discharging
def con_discharging(model, t):
return g('P_neg')[t] <= (1 - g('b_charging')[t]) * model.M
s('con_discharging', Constraint(model.t, rule=con_discharging))
# power
def con_power(model, t):
return g('P_th')[t] == g('P_pos')[t] - g('P_neg')[t]
s('con_power', Constraint(model.t, rule=con_power))
# state computation
def con_state(model, t):
if t > 0:
stored_energy = (g('theta')[t-1] - hwt.ambient_temperature) * hwt.tank_ws_per_k
else:
stored_energy = hwt.stored_energy
dQ = model.dt * (g('P_pos')[t] * hwt.charging_efficiency - g('P_neg')[t] / hwt.discharging_efficiency)
relative_loss_term = (model.dt / 60. / 60) / 2 * hwt.relative_loss
return g('theta')[t] == (stored_energy * (1 - relative_loss_term) + dQ) / (1 + relative_loss_term) / hwt.tank_ws_per_k + hwt.ambient_temperature
s('con_state', Constraint(model.t, rule=con_state))
|
from waldur_core.core import WaldurExtension
class MarketplaceVMwareExtension(WaldurExtension):
@staticmethod
def django_app():
return 'waldur_mastermind.marketplace_vmware'
@staticmethod
def is_assembly():
return True
|
__all__=['grad_reverse', 'svhn2mnist', 'syn2gtrsb', 'usps'] |
# -*- coding: utf-8 -*-
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import unittest
import pytest
from iconservice.base.block import Block
from iconservice.base.exception import IllegalFormatException
from iconservice.database.wal import (
_MAGIC_KEY, _FILE_VERSION, _OFFSET_VERSION, _HEADER_SIZE,
WriteAheadLogReader, WriteAheadLogWriter, WALogable, WALState
)
from iconservice.icon_constant import Revision
from tests import create_block_hash
class WALogableData(WALogable):
def __init__(self, data):
self._data = data
def __iter__(self):
for key, value in self._data.items():
yield key, value
class TestWriteAheadLog(unittest.TestCase):
def setUp(self) -> None:
self.path = "./test.wal"
self.log_data = [
{
b"a": b"apple",
b"b": b"banana",
b"c": None,
b"d": b""
},
{
b"1": None,
b"2": b"2-hello",
b"4": b"",
b"3": b"3-world"
}
]
self.block = Block(
block_height=random.randint(0, 1000),
block_hash=os.urandom(32),
prev_hash=os.urandom(32),
timestamp=random.randint(0, 1_000_000),
cumulative_fee=random.randint(0, 1_000_000)
)
def tearDown(self) -> None:
try:
os.remove(self.path)
except:
pass
def test_writer_and_reader(self):
revision = Revision.IISS.value
log_count = 2
instant_block_hash = create_block_hash()
writer = WriteAheadLogWriter(revision, log_count, self.block, instant_block_hash)
writer.open(self.path)
writer.write_state(WALState.CALC_PERIOD_START_BLOCK.value, add=False)
writer.write_walogable(WALogableData(self.log_data[0]))
writer.write_state(WALState.WRITE_RC_DB.value, add=True)
writer.write_walogable(WALogableData(self.log_data[1]))
writer.write_state(WALState.WRITE_STATE_DB.value, add=True)
state = (WALState.WRITE_RC_DB | WALState.WRITE_STATE_DB).value
writer.write_state(state, add=False)
writer.close()
reader = WriteAheadLogReader()
reader.open(self.path)
assert reader.magic_key == _MAGIC_KEY
assert reader.version == _FILE_VERSION
assert reader.revision == revision
assert reader.state == state
assert reader.log_count == log_count
assert reader.block == self.block
assert reader.instant_block_hash == instant_block_hash
for i in range(len(self.log_data)):
data = {}
for key, value in reader.get_iterator(i):
data[key] = value
assert data == self.log_data[i]
assert id(data) != id(self.log_data[i])
reader.close()
def test_invalid_magic_key(self):
revision = Revision.IISS.value
log_count = 2
instant_block_hash = create_block_hash()
writer = WriteAheadLogWriter(revision, log_count, self.block, instant_block_hash)
writer.open(self.path)
writer.close()
# Make the magic key invalid
with open(self.path, "rb+") as f:
ret = f.write(b"iwal")
assert ret == 4
reader = WriteAheadLogReader()
with pytest.raises(IllegalFormatException):
reader.open(self.path)
def test_invalid_version(self):
revision = Revision.IISS.value
log_count = 2
instant_block_hash = create_block_hash()
writer = WriteAheadLogWriter(revision, log_count, self.block, instant_block_hash)
writer.open(self.path)
writer.close()
# Make the version invalid
with open(self.path, "rb+") as f:
f.seek(_OFFSET_VERSION)
version = 0xFFFFFFFF
ret = f.write(version.to_bytes(4, "big"))
assert ret == 4
reader = WriteAheadLogReader()
with pytest.raises(IllegalFormatException):
reader.open(self.path)
def test_out_of_header_size(self):
revision = Revision.IISS.value
log_count = 2
instant_block_hash = create_block_hash()
writer = WriteAheadLogWriter(
revision=revision, max_log_count=log_count, block=self.block, instant_block_hash=instant_block_hash)
writer.open(self.path)
writer.close()
f = open(self.path, "rb+")
f.truncate(_HEADER_SIZE - 4)
f.close()
reader = WriteAheadLogReader()
with pytest.raises(IllegalFormatException):
reader.open(self.path)
|
"""
* Copyright 2020, Maestria de Humanidades Digitales,
* Universidad de Los Andes
*
* Developed for the Msc graduation project in Digital Humanities
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# =========================================
# native python libraries
# =========================================
# import re
# import os
# import copy
# import urllib
# import requests
# =========================================
# extension python libraries
# =========================================
# from urllib.parse import urlparse
# import unicodedata
# =========================================
# developed python libraries
# =========================================
import Conf
from Lib.Utils import Err
assert Conf
assert Err
# =========================================
# Global variables
# =========================================
DEFAULT_HTML_PARSER = "html.parser"
class Topic():
"""
this module make a request of an URL and helps translate
data into readable information for the dataframe
"""
# =========================================
# class variables
# =========================================
url = str()
request = None
sbody = None
shead = None
dialect = DEFAULT_HTML_PARSER
def __init__(self, *args, **kwargs):
"""
class creator for page()
Args:
url (str): page url to recover. Defaults is empty str
dialect (str): beautifulSoup parser dialect. Defaults
"html.parser"
Raises:
exp: raise a generic exception if something goes wrong
"""
try:
# default object attributes
self.url = str()
self.dialect = DEFAULT_HTML_PARSER
self.request = None
self.sbody = None
self.shead = None
# when arguments are pass as parameters
if len(args) > 0:
# iterating all over the args
for arg in args:
# updating the url if the crator has it
if args.index(arg) == 0:
self.url = arg
# if there are dict decrators in the creator
if len(kwargs) > 0:
# iterating all over the decorators
for key in list(kwargs.keys()):
# updating schema in the controller
if key == "dialect":
self.dialect = kwargs.get("dialect")
# exception handling
except Exception as exp:
Err.reraise(exp, "Topic: XXXXX")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Project: Macaron O/R Mapper
# Module: Tests
"""
Testing for basic usage.
"""
import unittest, warnings
import macaron
from models import Team, Member, Song
DB_FILE = ":memory:"
class TestQueryOperation(unittest.TestCase):
names = [
("Ritsu" , "Tainaka" , "Dr" , 17),
("Mio" , "Akiyama" , "Ba" , 17),
("Yui" , "Hirasawa", "Gt1", 17),
("Tsumugi", "Kotobuki", "Kb" , 17),
("Azusa" , "Nakano" , "Gt2", 16),
]
def setUp(self):
macaron.macaronage(DB_FILE, lazy=True)
macaron.create_table(Team)
macaron.create_table(Member)
macaron.create_table(Song)
def tearDown(self):
macaron.SQL_TRACE_OUT = None
macaron.bake()
macaron.cleanup()
def testBorderValues(self):
azusa = Member.create(first_name="Azusa", last_name="Nakano", part="Gt2", age=16)
self.assert_(azusa)
def _age_exceeded(): azusa.age = 19
def _age_underrun(): azusa.age = 14
self.assertRaises(macaron.ValidationError, _age_exceeded)
self.assertRaises(macaron.ValidationError, _age_underrun)
def _too_long_part_name(): azusa.part = "1234567890A"
self.assertRaises(macaron.ValidationError, _too_long_part_name)
def _name_is_not_set(): Member.create(first_name="Azusa")
self.assertRaises(macaron.ValidationError, _name_is_not_set)
def testManyToManyOperation(self):
team = Team.create(name="Houkago Tea Time")
for name in self.names:
team.members.append(first_name=name[0], last_name=name[1], part=name[2], age=name[3])
song1 = Song.create(name="Utauyo!! MIRACLE")
song2 = Song.create(name="Tenshi ni Fureta yo!")
for m in Member.all(): song1.members.append(m)
for m in Member.select(age=17): song2.members.append(m)
members = song1.members
self.assertEqual(members.count(), 5)
members = song2.members
self.assertEqual(members.count(), 4)
azusa = Member.get(first_name="Azusa")
songs = azusa.songs
self.assertEqual(songs.count(), 1)
self.assertEqual(songs[0].name, "Utauyo!! MIRACLE")
def testLimitOffset(self):
team = Team.create(name="Houkago Tea Time")
for name in self.names:
team.members.append(first_name=name[0], last_name=name[1], part=name[2], age=name[3])
# OFFSET 2
qs = Member.all().offset(2).order_by("id")
self.assertEqual(qs[0].first_name, "Yui")
self.assertEqual(qs[1].first_name, "Tsumugi")
self.assertEqual(qs.count(), 3)
# LIMIT 1 OFFSET 2
qs = Member.all().offset(2).limit(1).order_by("id")
self.assertEqual(qs[0].first_name, "Yui")
self.assertEqual(qs.count(), 1)
|
# conding: utf-8
# paramiko
# windows ssh http://www.freesshd.com/, putty
# http://www.techrepublic.com/blog/tr-dojo/set-up-a-free-ssh-server-on-windows-7-with-freesshd/
# http://www.janaka.co.uk/2012/12/how-to-configure-freesshd-on-windows.html
#
# Win:
# https://www.youtube.com/watch?v=Zei2DPCnMl4
# https://askubuntu.com/questions/204400/ssh-public-key-no-supported-authentication-methods-available-server-sent-publ
#
# Not working:
# https://wthwdik.wordpress.com/2011/03/28/how-to-get-freesshd-public-key-authentication-to-work/
# на винде так и не заработало
import paramiko
import select
import socket
# from socket import Queue
import sys
# import Queue # no module
if __name__ == '__main__':
# https://stackoverflow.com/questions/760978/long-running-ssh-commands-in-python-paramiko-module-and-how-to-end-them
if 0:
client = paramiko.SSHClient()
client.load_system_host_keys()
client.connect('localhost')
channel = client.get_transport().open_session()
channel.exec_command("tail -f /var/log/everything/current")
while True:
if channel.exit_status_ready():
break
rl, wl, xl = select.select([channel], [], [], 0.0)
if len(rl) > 0:
# print
channel.recv(1024)
if 1:
# https://pymotw.com/2/select/
# no poll() on Windows
pass
|
import hcuppy.utils as utils
import string
class CPT:
def __init__(self):
fn = "data/cpt_sections.csv"
self.cpt2sect = utils.read_cpt_sect(fn)
def download_data(self):
utils.download_cpt()
def get_cpt_section(self, x_lst):
output_type = "list"
if not isinstance(x_lst, list):
output_type = "value"
x_lst = [x_lst]
sect_lst = []
out_default = {"sect": "na",
"desc": "na"}
for x in x_lst:
if x not in self.cpt2sect:
sect_lst.append(out_default)
else:
sect_lst.append(self.cpt2sect[x])
out = sect_lst
if output_type == "value":
out = sect_lst[0]
return out
|
from django.http import HttpRequest, HttpResponse
from zerver.decorator import internal_notify_view
from zerver.lib.email_mirror import mirror_email_message
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
@internal_notify_view(False)
@has_request_variables
def email_mirror_message(
request: HttpRequest,
rcpt_to: str = REQ(),
msg_base64: str = REQ(),
) -> HttpResponse:
result = mirror_email_message(rcpt_to, msg_base64)
if result["status"] == "error":
raise JsonableError(result["msg"])
return json_success()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.