content stringlengths 5 1.05M |
|---|
# coding: utf-8
"""
Jamf Pro API
## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501
The version of the OpenAPI document: 10.25.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from jamf.configuration import Configuration
class UserMappings(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'object_class_limitation': 'str',
'object_classes': 'str',
'search_base': 'str',
'search_scope': 'str',
'additional_search_base': 'str',
'user_id': 'str',
'username': 'str',
'real_name': 'str',
'email_address': 'str',
'department': 'str',
'building': 'str',
'room': 'str',
'phone': 'str',
'position': 'str',
'user_uuid': 'str'
}
attribute_map = {
'object_class_limitation': 'objectClassLimitation',
'object_classes': 'objectClasses',
'search_base': 'searchBase',
'search_scope': 'searchScope',
'additional_search_base': 'additionalSearchBase',
'user_id': 'userID',
'username': 'username',
'real_name': 'realName',
'email_address': 'emailAddress',
'department': 'department',
'building': 'building',
'room': 'room',
'phone': 'phone',
'position': 'position',
'user_uuid': 'userUuid'
}
def __init__(self, object_class_limitation=None, object_classes=None, search_base=None, search_scope=None, additional_search_base='', user_id=None, username=None, real_name=None, email_address=None, department=None, building='', room='', phone='', position=None, user_uuid=None, local_vars_configuration=None): # noqa: E501
"""UserMappings - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._object_class_limitation = None
self._object_classes = None
self._search_base = None
self._search_scope = None
self._additional_search_base = None
self._user_id = None
self._username = None
self._real_name = None
self._email_address = None
self._department = None
self._building = None
self._room = None
self._phone = None
self._position = None
self._user_uuid = None
self.discriminator = None
self.object_class_limitation = object_class_limitation
self.object_classes = object_classes
self.search_base = search_base
self.search_scope = search_scope
if additional_search_base is not None:
self.additional_search_base = additional_search_base
self.user_id = user_id
self.username = username
self.real_name = real_name
self.email_address = email_address
self.department = department
self.building = building
self.room = room
self.phone = phone
self.position = position
self.user_uuid = user_uuid
@property
def object_class_limitation(self):
"""Gets the object_class_limitation of this UserMappings. # noqa: E501
:return: The object_class_limitation of this UserMappings. # noqa: E501
:rtype: str
"""
return self._object_class_limitation
@object_class_limitation.setter
def object_class_limitation(self, object_class_limitation):
"""Sets the object_class_limitation of this UserMappings.
:param object_class_limitation: The object_class_limitation of this UserMappings. # noqa: E501
:type object_class_limitation: str
"""
if self.local_vars_configuration.client_side_validation and object_class_limitation is None: # noqa: E501
raise ValueError("Invalid value for `object_class_limitation`, must not be `None`") # noqa: E501
allowed_values = ["ANY_OBJECT_CLASSES", "ALL_OBJECT_CLASSES"] # noqa: E501
if self.local_vars_configuration.client_side_validation and object_class_limitation not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `object_class_limitation` ({0}), must be one of {1}" # noqa: E501
.format(object_class_limitation, allowed_values)
)
self._object_class_limitation = object_class_limitation
@property
def object_classes(self):
"""Gets the object_classes of this UserMappings. # noqa: E501
:return: The object_classes of this UserMappings. # noqa: E501
:rtype: str
"""
return self._object_classes
@object_classes.setter
def object_classes(self, object_classes):
"""Sets the object_classes of this UserMappings.
:param object_classes: The object_classes of this UserMappings. # noqa: E501
:type object_classes: str
"""
if self.local_vars_configuration.client_side_validation and object_classes is None: # noqa: E501
raise ValueError("Invalid value for `object_classes`, must not be `None`") # noqa: E501
self._object_classes = object_classes
@property
def search_base(self):
"""Gets the search_base of this UserMappings. # noqa: E501
:return: The search_base of this UserMappings. # noqa: E501
:rtype: str
"""
return self._search_base
@search_base.setter
def search_base(self, search_base):
"""Sets the search_base of this UserMappings.
:param search_base: The search_base of this UserMappings. # noqa: E501
:type search_base: str
"""
if self.local_vars_configuration.client_side_validation and search_base is None: # noqa: E501
raise ValueError("Invalid value for `search_base`, must not be `None`") # noqa: E501
self._search_base = search_base
@property
def search_scope(self):
"""Gets the search_scope of this UserMappings. # noqa: E501
:return: The search_scope of this UserMappings. # noqa: E501
:rtype: str
"""
return self._search_scope
@search_scope.setter
def search_scope(self, search_scope):
"""Sets the search_scope of this UserMappings.
:param search_scope: The search_scope of this UserMappings. # noqa: E501
:type search_scope: str
"""
if self.local_vars_configuration.client_side_validation and search_scope is None: # noqa: E501
raise ValueError("Invalid value for `search_scope`, must not be `None`") # noqa: E501
allowed_values = ["ALL_SUBTREES", "FIRST_LEVEL_ONLY"] # noqa: E501
if self.local_vars_configuration.client_side_validation and search_scope not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `search_scope` ({0}), must be one of {1}" # noqa: E501
.format(search_scope, allowed_values)
)
self._search_scope = search_scope
@property
def additional_search_base(self):
"""Gets the additional_search_base of this UserMappings. # noqa: E501
:return: The additional_search_base of this UserMappings. # noqa: E501
:rtype: str
"""
return self._additional_search_base
@additional_search_base.setter
def additional_search_base(self, additional_search_base):
"""Sets the additional_search_base of this UserMappings.
:param additional_search_base: The additional_search_base of this UserMappings. # noqa: E501
:type additional_search_base: str
"""
self._additional_search_base = additional_search_base
@property
def user_id(self):
"""Gets the user_id of this UserMappings. # noqa: E501
:return: The user_id of this UserMappings. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this UserMappings.
:param user_id: The user_id of this UserMappings. # noqa: E501
:type user_id: str
"""
if self.local_vars_configuration.client_side_validation and user_id is None: # noqa: E501
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def username(self):
"""Gets the username of this UserMappings. # noqa: E501
:return: The username of this UserMappings. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this UserMappings.
:param username: The username of this UserMappings. # noqa: E501
:type username: str
"""
if self.local_vars_configuration.client_side_validation and username is None: # noqa: E501
raise ValueError("Invalid value for `username`, must not be `None`") # noqa: E501
self._username = username
@property
def real_name(self):
"""Gets the real_name of this UserMappings. # noqa: E501
:return: The real_name of this UserMappings. # noqa: E501
:rtype: str
"""
return self._real_name
@real_name.setter
def real_name(self, real_name):
"""Sets the real_name of this UserMappings.
:param real_name: The real_name of this UserMappings. # noqa: E501
:type real_name: str
"""
if self.local_vars_configuration.client_side_validation and real_name is None: # noqa: E501
raise ValueError("Invalid value for `real_name`, must not be `None`") # noqa: E501
self._real_name = real_name
@property
def email_address(self):
"""Gets the email_address of this UserMappings. # noqa: E501
:return: The email_address of this UserMappings. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this UserMappings.
:param email_address: The email_address of this UserMappings. # noqa: E501
:type email_address: str
"""
if self.local_vars_configuration.client_side_validation and email_address is None: # noqa: E501
raise ValueError("Invalid value for `email_address`, must not be `None`") # noqa: E501
self._email_address = email_address
@property
def department(self):
"""Gets the department of this UserMappings. # noqa: E501
:return: The department of this UserMappings. # noqa: E501
:rtype: str
"""
return self._department
@department.setter
def department(self, department):
"""Sets the department of this UserMappings.
:param department: The department of this UserMappings. # noqa: E501
:type department: str
"""
if self.local_vars_configuration.client_side_validation and department is None: # noqa: E501
raise ValueError("Invalid value for `department`, must not be `None`") # noqa: E501
self._department = department
@property
def building(self):
"""Gets the building of this UserMappings. # noqa: E501
:return: The building of this UserMappings. # noqa: E501
:rtype: str
"""
return self._building
@building.setter
def building(self, building):
"""Sets the building of this UserMappings.
:param building: The building of this UserMappings. # noqa: E501
:type building: str
"""
if self.local_vars_configuration.client_side_validation and building is None: # noqa: E501
raise ValueError("Invalid value for `building`, must not be `None`") # noqa: E501
self._building = building
@property
def room(self):
"""Gets the room of this UserMappings. # noqa: E501
:return: The room of this UserMappings. # noqa: E501
:rtype: str
"""
return self._room
@room.setter
def room(self, room):
"""Sets the room of this UserMappings.
:param room: The room of this UserMappings. # noqa: E501
:type room: str
"""
if self.local_vars_configuration.client_side_validation and room is None: # noqa: E501
raise ValueError("Invalid value for `room`, must not be `None`") # noqa: E501
self._room = room
@property
def phone(self):
"""Gets the phone of this UserMappings. # noqa: E501
:return: The phone of this UserMappings. # noqa: E501
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this UserMappings.
:param phone: The phone of this UserMappings. # noqa: E501
:type phone: str
"""
if self.local_vars_configuration.client_side_validation and phone is None: # noqa: E501
raise ValueError("Invalid value for `phone`, must not be `None`") # noqa: E501
self._phone = phone
@property
def position(self):
"""Gets the position of this UserMappings. # noqa: E501
:return: The position of this UserMappings. # noqa: E501
:rtype: str
"""
return self._position
@position.setter
def position(self, position):
"""Sets the position of this UserMappings.
:param position: The position of this UserMappings. # noqa: E501
:type position: str
"""
if self.local_vars_configuration.client_side_validation and position is None: # noqa: E501
raise ValueError("Invalid value for `position`, must not be `None`") # noqa: E501
self._position = position
@property
def user_uuid(self):
"""Gets the user_uuid of this UserMappings. # noqa: E501
:return: The user_uuid of this UserMappings. # noqa: E501
:rtype: str
"""
return self._user_uuid
@user_uuid.setter
def user_uuid(self, user_uuid):
"""Sets the user_uuid of this UserMappings.
:param user_uuid: The user_uuid of this UserMappings. # noqa: E501
:type user_uuid: str
"""
if self.local_vars_configuration.client_side_validation and user_uuid is None: # noqa: E501
raise ValueError("Invalid value for `user_uuid`, must not be `None`") # noqa: E501
self._user_uuid = user_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserMappings):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UserMappings):
return True
return self.to_dict() != other.to_dict()
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sort_quick_singly_linked_list_implement.py
# python
#
# 🎂"Here's to the crazy ones. The misfits. The rebels.
# The troublemakers. The round pegs in the square holes.
# The ones who see things differently. They're not found
# of rules. And they have no respect for the status quo.
# You can quote them, disagree with them, glority or vilify
# them. About the only thing you can't do is ignore them.
# Because they change things. They push the human race forward.
# And while some may see them as the creazy ones, we see genius.
# Because the poeple who are crazy enough to think thay can change
# the world, are the ones who do."
#
# Created by Chyi Yaqing on 02/19/19 09:24.
# Copyright © 2019. Chyi Yaqing.
# All rights reserved.
#
# Distributed under terms of the
# MIT
"""
QuickSort on Singly Linked List
The important things aboyt implementation are, it changes pointers rather
swapping data and time complexity is same as the implementation for Doubly
Linked List.
"""
# Node class
class Node:
# Function to initialize the ndoe object
def __init__(self, data):
self.data = data # Assign data
self.next = None # Initialize next as null
# Singly Linked list class contains a Node object
class SinglyLinkedList:
# Function to initialize head
def __init__(self):
self.head = None # Initialize head as None
self.tail = None # Initialize tail as None
# Function to insert a new node at the beginning
def push(self, new_data):
# 1 & 2: Allocate the Node & Put in the data
new_node = Node(new_data)
# 3. Make next of new Node as head
new_node.next = self.head
if self.getCount() == 0:
self.tail = new_node
# 4. Move the head to point to new Node
self.head = new_node
# Inserts a new node after the given prev_node.
def insertAfter(self, prev_node, new_data):
# 1. check if the given prev_node exists
if prev_node is None:
print("The given previous node must in LinkedList.")
return
# 2. Create new node & 3. Put in the data
new_node = Node(new_data)
# 4. Make next of new Node as next of prev_node
new_node.next = prev_node.next
# 5. make next of prev_node as new node
prev_node.next = new_node
# Appends a new node at the end. This method is defined inside
# LinkedList class shown above.
def append(self, new_data):
# 1. Create a new node
# 2. Put in the data
# 3. Set next as Node
new_node = Node(new_data)
# 4. If the Linked List is empty, then make the new node as head
if self.head is None:
self.head = new_node
return
# 5. Else traverse till the last node
last = self.head
while last.next:
last = last.next
# 6. Change the next of last node
last.next = new_node
# Delete the first occurence of key in linked list
def deleteNode(self, key):
# Store head node
temp = self.head
# If head node itself holds the key to be deleted
if (temp is not None and temp.data == key):
self.head = temp.next
temp = None
return
# Search for the key to be deleted, keep track of the previous
# node as we need to change 'prev.next'
while (temp is not None):
if temp.data == key:
break
prev = temp
temp = temp.next
# if key was not present in linked list
if (temp is None):
return
# Unlink the node from linked list
prev.next = temp.next
temp = None
# This Function checks whether the value x present in the linked list
def search(self, x):
# Initialize current to head
current = self.head
# loop till current not equal to None
while current is not None:
if current.data == x:
return True # data found
current = current.next
return False # Data Not found
# Function to reverse the linked list
def reverse(self):
prev = None
current = self.head
while (current is not None):
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
def detectLoop(self):
s = set()
temp = self.head
while (temp):
# If we have already has this node in hashmap it means their is
# cycle (Because you we encountering the node second time).
if (temp in s):
return True
# If we are seeing the node for the first time, insert it in
# hash
s.add(temp)
temp = temp.next
# Implementation of Floyd's Cycle-Finding Algorithm
def detectLoopFloyd(self):
slow_p = self.head
fast_p = self.head
while(slow_p and fast_p and fast_p.next):
slow_p = slow_p.next
fast_p = fast_p.next.next
if slow_p == fast_p:
print("Found Loop")
return
# This function counts number of nodes in Linked List iterative,
# given 'node' as starting node.
def getCount(self):
temp = self.head # Initialise temp
count = 0 # Initialise count
# Loop while end of linked list is not reached
while (temp):
count += 1
temp = temp.next
return count
# Function to get the middle of the linked list
def middle(self):
slow_p = self.head
fast_p = self.head
if self.head is not None:
while fast_p is not None and fast_p.next is not None:
fast_p = fast_p.next.next
slow_p = slow_p.next
print("The middle element is [{}]\n".format(slow_p.data))
else:
print("the linked list is None, so no middle node")
# Utility function to print the linked Linked List
def printList(self):
temp = self.head
print("HEADER", end='')
while temp:
print(' -> ', temp.data, end='')
temp = temp.next
print(" None", end='')
# Find the before
def find_before(first, last, search_node):
current = first
while current is not last.next:
if current.next == search_node:
return current
current = current.next
return None
def swap(A, B):
'''swaps the data '''
temp = A.data
A.data = B.data
B.data = temp
def partition(head, end):
# Partitions the list taking the last element as the pivot
pivot = end
switcher, current = head, head
# During partition, both the head and end of the list might change
# which is updated in the newHead and newEnd variables
while (current is not pivot):
if current.data < pivot.data:
swap(switcher, current)
switcher = switcher.next
current = current.next
swap(pivot, switcher)
return switcher
def quickSort(head, tail):
if head.next is not tail:
m = partition(head, tail)
quickSort(m.next, tail)
mprev = find_before(head, tail, m)
quickSort(head, mprev)
# Driver program to test above functions
if __name__ == '__main__':
slist = SinglyLinkedList()
slist.push(5)
slist.push(20)
slist.push(4)
slist.push(3)
slist.push(30)
print("Original Singly Linked List")
slist.printList()
quickSort(slist.head, slist.tail)
print("\n\nSorted Singly Linked List")
slist.printList()
|
from Harbir_101917050 import main
# main('file','file2') |
# Copyright (c) 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import six
from sahara.utils import files
# job execution status
JOB_STATUS_DONEWITHERROR = 'DONEWITHERROR'
JOB_STATUS_FAILED = 'FAILED'
JOB_STATUS_KILLED = 'KILLED'
JOB_STATUS_PENDING = 'PENDING'
JOB_STATUS_RUNNING = 'RUNNING'
JOB_STATUS_SUCCEEDED = 'SUCCEEDED'
JOB_STATUS_TOBEKILLED = 'TOBEKILLED'
# statuses for terminated jobs
JOB_STATUSES_TERMINATED = [
JOB_STATUS_DONEWITHERROR,
JOB_STATUS_FAILED,
JOB_STATUS_KILLED,
JOB_STATUS_SUCCEEDED
]
# job type separator character
JOB_TYPE_SEP = '.'
# job sub types available
JOB_SUBTYPE_STREAMING = 'Streaming'
JOB_SUBTYPE_NONE = ''
# job types available
JOB_TYPE_HIVE = 'Hive'
JOB_TYPE_JAVA = 'Java'
JOB_TYPE_MAPREDUCE = 'MapReduce'
JOB_TYPE_SPARK = 'Spark'
JOB_TYPE_STORM = 'Storm'
JOB_TYPE_MAPREDUCE_STREAMING = (JOB_TYPE_MAPREDUCE + JOB_TYPE_SEP +
JOB_SUBTYPE_STREAMING)
JOB_TYPE_PIG = 'Pig'
JOB_TYPE_SHELL = 'Shell'
# job type groupings available
JOB_TYPES_ALL = [
JOB_TYPE_HIVE,
JOB_TYPE_JAVA,
JOB_TYPE_MAPREDUCE,
JOB_TYPE_MAPREDUCE_STREAMING,
JOB_TYPE_PIG,
JOB_TYPE_SHELL,
JOB_TYPE_SPARK,
JOB_TYPE_STORM
]
ADAPT_FOR_OOZIE = 'edp.java.adapt_for_oozie'
ADAPT_SPARK_FOR_SWIFT = 'edp.spark.adapt_for_swift'
def split_job_type(job_type):
'''Split a job type string into a type and subtype
The split is done on the first '.'. A subtype will
always be returned, even if it is empty.
'''
type_info = job_type.split(JOB_TYPE_SEP, 1)
if len(type_info) == 1:
type_info.append('')
return type_info
def compare_job_type(job_type, *args, **kwargs):
'''Compare a job type against a list of job types
:param job_type: The job type being compared
:param *args: A list of types to compare against
:param strict: Passed as a keyword arg. Default is False.
If strict is False, job_type will be compared
with and without its subtype indicator.
:returns: True if job_type is present in the list, False otherwise
'''
strict = kwargs.get('strict', False)
res = job_type in args
if res or strict or JOB_TYPE_SEP not in job_type:
return res
jtype, jsubtype = split_job_type(job_type)
return jtype in args
def get_hive_shared_conf_path(hdfs_user):
return "/user/%s/conf/hive-site.xml" % hdfs_user
def is_adapt_for_oozie_enabled(configs):
return configs.get(ADAPT_FOR_OOZIE, False)
def is_adapt_spark_for_swift_enabled(configs):
return configs.get(ADAPT_SPARK_FOR_SWIFT, False)
def get_builtin_binaries(job, configs):
if job.type == JOB_TYPE_JAVA:
if is_adapt_for_oozie_enabled(configs):
path = 'service/edp/resources/edp-main-wrapper.jar'
name = 'builtin-%s.jar' % six.text_type(uuid.uuid4())
return [{'raw': files.get_file_text(path),
'name': name}]
return []
|
#!/usr/bin/env python
"""This module defines the entry of console commend.
"""
import sys
from cogdata.arguments import get_args
def main():
args = get_args()
if args is not None:
args.func(args)
if __name__ == '__main__':
main()
|
# Contains the code for managing psychiatrist post comments.
# Imports --------------------------------------------------------------------------------
from datetime import datetime
from flask_login import current_user
from flaskr.mood_tracker.forms import MoodReview
from flaskr import db
from flaskr.mood_tracker.models import PatientFeelings
from flaskr.register.models import Patient
# Functions ------------------------------------------------------------------------------
def get_my_patients(): # Here, we get all the patients assigned to a user.
my_patients_list = []
my_patients_raw = db.session.query(Patient.username).filter_by(psychiatrist_id=current_user.bacp_number).all()
# Returns usernames like this: [('joshua',), ('joshuahigginson12@gmail.com',), ('kriskringle',)]
for raw_tuples in my_patients_raw: # We need to unpack this tuple.
(username,) = raw_tuples
my_patients_list.append(username) # Append each username to a list of patients.
return my_patients_list # Returns the list of patients.
def get_my_flagged(): # Here, we get all the flagged patients assigned to a user.
my_flagged_list = []
my_flagged_raw = db.session.query(Patient.username).filter_by(psychiatrist_id=current_user.bacp_number). \
filter_by(requires_urgent_help=1).all()
# Returns usernames like this: [('joshua',), ('joshuahigginson12@gmail.com',), ('kriskringle',)]
for raw_tuples in my_flagged_raw: # We need to unpack this tuple.
(username,) = raw_tuples
my_flagged_list.append(username) # Append each username to a list of patients.
return my_flagged_list
def get_my_moods(): # Get all moods.
my_patients = get_my_patients()
my_patient_moods = db.session.query(PatientFeelings).filter(Patient.username.in_(my_patients)).all()
# Returns PatientFeelings objects, which can then be referenced later on in python with .feeling_id.
return my_patient_moods
def get_my_flagged_moods_and_accounts(): # Get all flagged moods.
flagged_patients = get_my_flagged()
flagged_moods = db.session.query(PatientFeelings).filter(PatientFeelings.patient_id.in_(flagged_patients)).all()
get_patient_info = []
for moods in flagged_moods:
patient = db.session.query(Patient).filter(Patient.username == moods.patient_id).all()
# Python returns objects on their own, but in a list. We use index referencing to get it.
get_patient_info.append(patient[0])
# Returns PatientFeelings objects, which can then be referenced later on in python with .feeling_id.
return flagged_moods, get_patient_info
def get_moods_not_replied(): # Get all moods not replied to.
my_patients = get_my_patients() # Returns all of the user's patients.
list_of_moods = []
for patient in my_patients: # Returns all fields that have no comments in.
patient_feelings_sorted = db.session.query(PatientFeelings).filter_by(patient_id=patient). \
filter_by(psychiatrist_comment=None).all()
for feelings in patient_feelings_sorted: # Get rid of empty fields.
if feelings:
list_of_moods.append(patient_feelings_sorted)
# Returns PatientFeelings objects, which can then be referenced later on in python with .feeling_id.
return list_of_moods
def get_details_from_username(username):
return db.session.query(Patient).filter_by(username=username)
"""
Didn't have enough time to fully implement this functionality.
def get_patient_mood(patient_id, limit):
patient_mood = db.session.query(PatientFeelings).
filter_by(PatientFeelings.patient_id == patient_id).limit(limit).all()
return patient_mood
def get_patient_mood_by_date(patient, limit, date):
patient_mood = get_patient_mood(patient, limit)
mood_by_date = patient_mood.filter_by(PatientFeelings.date_id == date).all()
return mood_by_date
# Methods --------------------------------------------------------------------------------
def psychiatrist_comment(post_id):
# Forms --------------------------------------------------
psychiatrist_mood_review = MoodReview() # Initialises a new instance of our Mood Review Form.
# Functions ----------------------------------------------
if psychiatrist_mood_review.validate_on_submit():
# Queries ----------------------------------------------
# Find the corresponding post ID from our table, and store the values.
update_field = db.PatientFeelings.query.filter_by(feelings_id=post_id).first()
# Get the user ID corresponding to this post.
patient_id = update_field.username
# Find the corresponding user, and set their 'in danger' box to safe.
update_safety = db.Patient.query.filter_by(username=patient_id).first
update_field.psychiatrist_comment = psychiatrist_mood_review.psychiatrist_comment
update_field.date_psychiatrist_updated = datetime.utcnow().date()
update_safety.requires_urgent_help = False
db.session.commit()
# Jinja Template must render form=psychiatrist_mood_review
""" |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Elastigroup(pulumi.CustomResource):
availability_zones: pulumi.Output[list]
"""
List of availability zones for the group.
"""
backend_services: pulumi.Output[list]
"""
Describes the backend service configurations.
"""
description: pulumi.Output[str]
"""
The region your GCP group will be created in.
"""
desired_capacity: pulumi.Output[int]
"""
The desired number of instances the group should have at any time.
"""
disks: pulumi.Output[list]
draining_timeout: pulumi.Output[int]
"""
Time (seconds) the instance is allowed to run after it is detached from the group. This is to allow the instance time to drain all the current TCP connections before terminating it.
"""
fallback_to_ondemand: pulumi.Output[bool]
gpu: pulumi.Output[list]
"""
Defines the GPU configuration.
"""
health_check_grace_period: pulumi.Output[int]
"""
Period of time (seconds) to wait for VM to reach healthiness before monitoring for unhealthiness.
"""
instance_types_customs: pulumi.Output[list]
"""
Defines a set of custom instance types. Required if instance_types_preemptible and instance_types_on_demand are not set.
"""
instance_types_ondemand: pulumi.Output[str]
instance_types_preemptibles: pulumi.Output[list]
"""
The preemptible VMs instance type. To maximize cost savings and market availability, select as many types as possible. Required if instance_types_on_demand is not set.
"""
ip_forwarding: pulumi.Output[bool]
labels: pulumi.Output[list]
"""
Array of objects with key-value pairs.
"""
max_size: pulumi.Output[int]
"""
The maximum number of instances the group should have at any time.
"""
metadatas: pulumi.Output[list]
"""
Array of objects with key-value pairs.
"""
min_size: pulumi.Output[int]
"""
The minimum number of instances the group should have at any time.
"""
name: pulumi.Output[str]
"""
Name of this access configuration.
"""
network_interfaces: pulumi.Output[list]
"""
Array of objects representing the network configuration for the elastigroup.
"""
ondemand_count: pulumi.Output[int]
preemptible_percentage: pulumi.Output[int]
"""
Percentage of Preemptible VMs to spin up from the "desired_capacity".
"""
scaling_down_policies: pulumi.Output[list]
"""
Contains scaling policies for scaling the Elastigroup down.
"""
scaling_up_policies: pulumi.Output[list]
"""
Contains scaling policies for scaling the Elastigroup up.
"""
service_account: pulumi.Output[str]
"""
The email of the service account in which the group instances will be launched.
"""
startup_script: pulumi.Output[str]
"""
Create and run your own startup scripts on your virtual machines to perform automated tasks every time your instance boots up.
"""
subnets: pulumi.Output[list]
"""
A list of regions and subnets.
"""
tags: pulumi.Output[list]
"""
Tags to mark created instances.
"""
def __init__(__self__, resource_name, opts=None, availability_zones=None, backend_services=None, description=None, desired_capacity=None, disks=None, draining_timeout=None, fallback_to_ondemand=None, gpu=None, health_check_grace_period=None, instance_types_customs=None, instance_types_ondemand=None, instance_types_preemptibles=None, ip_forwarding=None, labels=None, max_size=None, metadatas=None, min_size=None, name=None, network_interfaces=None, ondemand_count=None, preemptible_percentage=None, scaling_down_policies=None, scaling_up_policies=None, service_account=None, startup_script=None, subnets=None, tags=None, __name__=None, __opts__=None):
"""
Provides a Spotinst elastigroup GCP resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] availability_zones: List of availability zones for the group.
:param pulumi.Input[list] backend_services: Describes the backend service configurations.
:param pulumi.Input[str] description: The region your GCP group will be created in.
:param pulumi.Input[int] desired_capacity: The desired number of instances the group should have at any time.
:param pulumi.Input[int] draining_timeout: Time (seconds) the instance is allowed to run after it is detached from the group. This is to allow the instance time to drain all the current TCP connections before terminating it.
:param pulumi.Input[list] gpu: Defines the GPU configuration.
:param pulumi.Input[int] health_check_grace_period: Period of time (seconds) to wait for VM to reach healthiness before monitoring for unhealthiness.
:param pulumi.Input[list] instance_types_customs: Defines a set of custom instance types. Required if instance_types_preemptible and instance_types_on_demand are not set.
:param pulumi.Input[list] instance_types_preemptibles: The preemptible VMs instance type. To maximize cost savings and market availability, select as many types as possible. Required if instance_types_on_demand is not set.
:param pulumi.Input[list] labels: Array of objects with key-value pairs.
:param pulumi.Input[int] max_size: The maximum number of instances the group should have at any time.
:param pulumi.Input[list] metadatas: Array of objects with key-value pairs.
:param pulumi.Input[int] min_size: The minimum number of instances the group should have at any time.
:param pulumi.Input[str] name: Name of this access configuration.
:param pulumi.Input[list] network_interfaces: Array of objects representing the network configuration for the elastigroup.
:param pulumi.Input[int] preemptible_percentage: Percentage of Preemptible VMs to spin up from the "desired_capacity".
:param pulumi.Input[list] scaling_down_policies: Contains scaling policies for scaling the Elastigroup down.
:param pulumi.Input[list] scaling_up_policies: Contains scaling policies for scaling the Elastigroup up.
:param pulumi.Input[str] service_account: The email of the service account in which the group instances will be launched.
:param pulumi.Input[str] startup_script: Create and run your own startup scripts on your virtual machines to perform automated tasks every time your instance boots up.
:param pulumi.Input[list] subnets: A list of regions and subnets.
:param pulumi.Input[list] tags: Tags to mark created instances.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['availability_zones'] = availability_zones
__props__['backend_services'] = backend_services
__props__['description'] = description
if desired_capacity is None:
raise TypeError('Missing required property desired_capacity')
__props__['desired_capacity'] = desired_capacity
__props__['disks'] = disks
__props__['draining_timeout'] = draining_timeout
__props__['fallback_to_ondemand'] = fallback_to_ondemand
__props__['gpu'] = gpu
__props__['health_check_grace_period'] = health_check_grace_period
__props__['instance_types_customs'] = instance_types_customs
__props__['instance_types_ondemand'] = instance_types_ondemand
__props__['instance_types_preemptibles'] = instance_types_preemptibles
__props__['ip_forwarding'] = ip_forwarding
__props__['labels'] = labels
__props__['max_size'] = max_size
__props__['metadatas'] = metadatas
__props__['min_size'] = min_size
if name is None:
raise TypeError('Missing required property name')
__props__['name'] = name
__props__['network_interfaces'] = network_interfaces
__props__['ondemand_count'] = ondemand_count
__props__['preemptible_percentage'] = preemptible_percentage
__props__['scaling_down_policies'] = scaling_down_policies
__props__['scaling_up_policies'] = scaling_up_policies
__props__['service_account'] = service_account
__props__['startup_script'] = startup_script
__props__['subnets'] = subnets
__props__['tags'] = tags
super(Elastigroup, __self__).__init__(
'spotinst:gcp/elastigroup:Elastigroup',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import torch
import torch.nn as nn
from torchvision import models
from torch.autograd import Variable
from utils.utils import weights_init_kaiming, weights_init_classifier
class ClassBlock(nn.Module):
def __init__(self, input_dim, class_num, droprate, relu=False, bnorm=True, num_bottleneck=512, linear=True, return_f=False):
super(ClassBlock, self).__init__()
self.return_f = return_f
add_block = []
if linear:
add_block += [nn.Linear(input_dim, num_bottleneck)]
else:
num_bottleneck = input_dim
if bnorm:
add_block += [nn.BatchNorm1d(num_bottleneck)]
if relu:
add_block += [nn.LeakyReLU(0.1)]
if droprate > 0:
add_block += [nn.Dropout(p=droprate)]
add_block = nn.Sequential(*add_block)
add_block.apply(weights_init_kaiming)
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num)]
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.add_block = add_block
self.classifier = classifier
def forward(self, x):
x = self.add_block(x)
if self.return_f:
f = x
x = self.classifier(x)
return [x,f]
else:
x = self.classifier(x)
return x
class Resnet50_ft(nn.Module):
def __init__(self, class_num=751, droprate=0.5, stride=2, circle=False, ibn=False):
super(Resnet50_ft, self).__init__()
model_ft = models.resnet50(pretrained=True)
if ibn == True:
model_ft = torch.hub.load('XingangPan/IBN-Net', 'resnet50_ibn_a', pretrained=True)
# avg pooling to global pooling
if stride == 1:
model_ft.layer4[0].downsample[0].stride = (1,1)
model_ft.layer4[0].conv2.stride = (1,1)
model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.model = model_ft
self.circle = circle
self.classifier = ClassBlock(2048, class_num, droprate, return_f = circle)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
if __name__ == '__main__':
net = Resnet50_ft(751, stride=1, ibn=True)
# net.classifier = nn.Sequential()
# print(net)
input = Variable(torch.FloatTensor(8, 3, 224, 224))
print(input.shape)
output = net(input)
print(output.shape)
|
#!/usr/bin/env python
'''
Copyright (c) 2020 RIKEN
All Rights Reserved
See file LICENSE for details.
'''
import log,traceback
class load:
def __init__(self, args):
log.logger.debug('started')
try:
# default
self.genome_cov_thresholds=0.05 # Defining high coverage viruses relies greatly on this parameter
self.ave_depth_of_mapped_region_threshold=3 # Defining high coverage viruses relies greatly on this parameter
self.hisat2_mismatch_penalties='2,1'
self.min_seq_len=20
self.bedgraph_bin=1
self.reconst_minimum_depth=1
if args.ONT_bamin is True:
self.reconst_minimum_depth=5
if args.ONT_recon_min_depth is not None:
if isinstance(args.ONT_recon_min_depth, int) is False:
log.logger.error('No integer was specified with -ONT_recon_min_depth flag.')
exit(1)
self.reconst_minimum_depth= int(args.ONT_recon_min_depth)
log.logger.info('%s was specified with -ONT_recon_min_depth flag. It will use %s.' % (args.ONT_recon_min_depth, args.ONT_recon_min_depth))
self.ont_hhv6_ratio_threshold=2
self.gzip_compresslevel=1
self.metaspades_kmer='21,33,55'
self.metaspades_memory=4
self.quick_check_read_num=1000000
params_for_debug=[]
for k,v in self.__dict__.items():
params_for_debug.append('%s=%s' % (k, str(v)))
log.logger.debug('parameters:\n'+ '\n'.join(params_for_debug))
except:
log.logger.error('\n'+ traceback.format_exc())
exit(1)
|
import math
import networkx
import numpy
from matplotlib import pyplot as plt
from networkx import DiGraph
from networkx import nx_agraph
def route_len(g: DiGraph):
edge_costs = [e[2]['cost'] for e in g.edges(data = True)]
return numpy.sum(edge_costs)
def closest_neighbor(g: DiGraph, v, forbidden: set):
min_dist = math.inf
closest_node = None
for u in g.neighbors(v):
if u in forbidden:
continue
dist = g.edges[v, u]['cost']
if dist < min_dist:
min_dist = dist
closest_node = u
return closest_node
def draw_route_graph_geo(graph: DiGraph, file: str):
x = list(networkx.get_node_attributes(graph, 'x').values())
y = list(networkx.get_node_attributes(graph, 'y').values())
positions = dict(zip(graph.nodes, list(zip(x, y))))
plt.clf()
networkx.draw_networkx(graph, positions, with_labels = True)
plt.savefig(file)
def draw_route_graph(graph: DiGraph, file: str):
drawable = nx_agraph.to_agraph(graph)
for e in graph.edges(data = True):
drawable_edge = drawable.get_edge(e[0], e[1])
drawable_edge.attr['label'] = e[2]['cost']
for v in graph.nodes(data = True):
drawable_node = drawable.get_node(v[0])
demand = v[1]['demand']
drawable_node.attr['label'] = f'{v[0]}: {demand}'
drawable.layout('dot')
drawable.draw(file)
|
import re
from unicodedata import category
def letter_only(text):
return re.sub("[^\w' ]", "",
re.sub("[^\w']", " ", text))
def normalize(text):
lower = text.lower()
return letter_only(lower)
def split(text):
return text.split()
def count(text):
words = {}
for word in text:
if word not in words:
words[word] = 0
words[word] += 1
return words
with open("hamlet.txt", encoding="UTF-8") as file:
hamlet = file.read()
text = normalize(hamlet)
wordstream = split(text)
counts = count(wordstream)
print(sorted(counts.items(), key=lambda x: x[1])[-10:])
|
import argparse
from argparse import RawTextHelpFormatter
from dataclasses import dataclass
import sys
queue = []
def rr(n, timeslice):
total_wait = total_turn = complete = time = 0
while complete != n:
for i in range(n):
if not queue[i].isCompleted:
if queue[i]._burst <= timeslice:
time += queue[i]._burst
queue[i]._burst = 0
else:
queue[i]._burst -= timeslice
time += timeslice
if queue[i]._burst == 0:
queue[i].isCompleted = True
complete += 1
queue[i].completion = time
queue[i].turnaround = queue[i].completion - queue[i].arrival
queue[i].waiting = queue[i].turnaround - queue[i].burst
total_turn += queue[i].turnaround
total_wait += queue[i].waiting
return total_wait, total_turn
def computePreemptive(n, priority = False):
global queue
completed = time = total_turn = total_wait = 0
while completed != n:
idx = -1
if priority:
low = -1
else:
low = sys.maxsize
for i in range(n):
if not queue[i].isCompleted and time >= queue[i].arrival:
if priority:
if low < queue[i].priority:
low = queue[i].priority
idx = i
if low == queue[i].priority:
if queue[i].arrival < queue[idx].arrival:
low = queue[i].priority
idx = i
else:
if low > queue[i]._burst:
low = queue[i]._burst
idx = i
if low == queue[i]._burst:
if queue[i].arrival < queue[idx].arrival:
low = queue[i]._burst
idx = i
time += 1
if idx != -1:
queue[idx]._burst -= 1
if queue[idx]._burst == 0:
queue[idx].completion = time
queue[idx].turnaround = queue[idx].completion - queue[idx].arrival
queue[idx].waiting = queue[idx].turnaround - queue[idx].burst
total_wait += queue[idx].waiting
total_turn += queue[idx].turnaround
completed += 1
queue[idx].isCompleted = True
return total_wait, total_turn
def computeNonPreemptive(n, prio = False):
global queue
time = total_turn = total_wait = 0
temp = [queue.pop(0)]
res = []
while len(res) != n:
temp[0]._burst -= 1
time += 1
if len(queue) > 0:
temp.append(queue.pop(0))
if temp[0]._burst <= 0:
temp[0].isCompleted = True
temp[0].completion = time
temp[0].turnaround = abs(temp[0].completion - temp[0].arrival)
temp[0].waiting = abs(temp[0].turnaround - temp[0].burst)
total_wait += temp[0].waiting
total_turn += temp[0].turnaround
res.append(temp.pop(0))
if prio:
temp = sorted(temp, key=lambda l: l.priority)
else:
temp = sorted(temp, key=lambda l: l.burst)
queue = res
return total_wait, total_turn
def fcfs(n):
queue[0].completion = queue[0].burst + queue[0].arrival
total_turn = queue[0].turnaround = abs(queue[0].completion - queue[0].arrival)
total_wait = queue[0].waiting = abs(queue[0].turnaround - queue[0].burst)
queue[0].isCompleted = True
queue[0]._burst = 0
for i in range(1, n):
queue[i].completion = queue[i - 1].completion + queue[i].burst
queue[i].turnaround = abs(queue[i].completion - queue[i].arrival)
queue[i].waiting = abs(queue[i].turnaround - queue[i].burst)
total_wait += queue[i].waiting
total_turn += queue[i].turnaround
queue[i].isCompleted = True
queue[i]._burst = 0
return total_wait, total_turn
def main():
global queue
numberOfProcess = int(input("Enter number of process(es): "))
for i in range(numberOfProcess):
arrival = int(input("Enter the arrival time: "))
burst = int(input("Enter the burst time: "))
process = Process(i, arrival, burst, _burst=burst)
if args.func == "prio" or args.func == "nprio":
process.priority = int(input("Enter the priority: "))
queue.append(process)
#sort by arrival
queue = sorted(queue, key=lambda l: l.arrival)
if args.func == "sjf":
wait, turn = computeNonPreemptive(numberOfProcess)
elif args.func == "nprio":
wait, turn = computeNonPreemptive(numberOfProcess, True)
elif args.func == "srtf":
wait, turn = computePreemptive(numberOfProcess)
elif args.func == "prio":
wait, turn = computePreemptive(numberOfProcess, True)
elif args.func == "rr":
timeslice = int(input("Enter the time quantum of the system: "))
wait, turn = rr(numberOfProcess, timeslice)
else:
wait, turn = fcfs(numberOfProcess)
print("Process ID\tArrival Time\tBurst Time\tWaiting Time\tTurnaround Time")
for i in range(numberOfProcess):
print(f"{queue[i].P}\t\t{queue[i].arrival}\t\t{queue[i].burst}\t\t{queue[i].waiting}\t\t{queue[i].turnaround}")
print("Average Waiting Time: {:.2f} ms".format(wait / numberOfProcess))
print("Average Turnaround Time: {:.2f} ms".format(turn / numberOfProcess))
for i in queue:
print(i)
@dataclass
class Process:
P: int
arrival: int
burst: int
priority: int = 0
completion: int = 0
waiting: int = 0
turnaround: int = 0
isCompleted: bool = False
_burst: int = 0
parser = argparse.ArgumentParser(
prog="cpu process scheduler",
description="Choose cpu scheduling algorithm",
formatter_class=RawTextHelpFormatter
)
parser.add_argument(
"-f", "--func",
type=str,
default="fcfs",
help="\n\n\"fcfs\" - First-come, First-served\n"+
"\"sjf\" - Shortest Job First\n"+
"\"nprio\" - Non Preemptive Priority\n" +
"\"srtf\" - Shortest Remaining Time First\n" +
"\"pprio\" - Preemptive Priority\n" +
"\"rr\"- Round Robin\n")
args = parser.parse_args()
if __name__ == "__main__":
main() |
# %% Importing libraries
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import time
import os
import random
import copy
import loss_landscapes
import loss_landscapes.metrics
from mpl_toolkits.mplot3d import axes3d, Axes3D # ignore warning, we need this
from sklearn.decomposition import PCA
from CNN_functions import files_tensor, getListOfFiles, batch_generator, string_tensor, \
sec_max, weights_init, plot_confusion_matrix, weights_to_list, list_to_weights
import pandas as pd
import seaborn as sb
from scipy.special import softmax
# %% ----- CUDA initialization ------------------------------------------------------------------------------------------
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# NOTE: I did not soft code CUDA, so if CUDA is not available, one needs to go through the script manually and disable it
# (simply ctrl+F and comment out the lines with "cuda")
# %% ----- Delete old net -----------------------------------------------------------------------------------------------
if 'net' in locals():
del net
# %% ----- Set Parameters and Folders -----------------------------------------------------------------------------------
# Place where output folders are created (does not have to be desktop):
DT = r'[enter path]'
# Location of training data:
dirName = r'[enter path]'
# Location of fooling data:
dataset_fool_dir = r'[enter path]'
# NOTE: it can be advantageous to set this dir in the relevant code block below, so that one can re-execute only that
# code block with various fooling datasets
BS = 50 # batch size
epochs = 12 # number of epochs for training
fig_dpi = 150 # dpi for saved figures
fig_font_size = 12 # font size for figures
random_initialization = True # set False to use saved_weights from saved_weights dir in the next line
saved_weights = r'D:\ddavidse\Desktop\5 class network runs\network runs - background - full data\Network_run_16\initial_weights.pth'
set_random_seed = False # set True to use the seed value in the next line
seed_value = 784410
# %% ----- Configure dataset split ---------------------------------------------------------------------------------------
ValFrac = 1/6 # set fraction of total dataset used for validation
TestFrac = 1/6 # set fraction of total dataset used for testing
# Note: look at the size of test and val sets in run_info.txt and adjust these settings if necessary
# %% ----- Select features -----------------------------------------------------------------------------------------------
confusion_flag = True # make True to output confusion matrices
batchnorm_flag = True # make True to use batch normalization
misclassified_images_flag = False # make True to output misclassified images
misclassified_outputs_flag = False # make True to output misclassified net output values to run info.txt
loss_landscape_flag = False # make True to create a loss landscape based on random planes
PCA_flag = False # make True to create a loss landscape based on PCA
MAV_flag = False # make True to output MAV stuff
baseline_flag = False # make True to output baseline stuff
# %% ----- Loss landscapes package settings (used if loss_landscape_flag is True) --------------------------------------------------
STEPS = 40 # resolution of random loss landscape, higher = more detail
# %% ----- PCA settings (used if PCA_flag is True) ---------------------------------------------------------------------------------
FilterSteps = 40 # resolution for PCA loss landscape
filter_normalization_flag = True # make True to use filter normalization for PCA loss landscape
distance_multiplier = 2 # distance multiplier for filter normalization, should be roughly between 1 and 5
bowl = False # make True for alternative visualization of convex loss landscape
contour_center = False # make True to output test plots with center and min of contour
# %% ----- Create output folder ------------------------------------------------------------------------------------------
os.chdir(DT)
mfname = 'Network_runs'
if not os.path.exists(mfname):
os.mkdir(mfname)
fdir = '{}\\{}'.format(DT,mfname)
counter = 0
tv = 0
while tv == 0:
counter += 1
fname = '{}\\Network_run_{}'.format(fdir, counter)
if not os.path.exists(fname):
os.mkdir(fname)
tv = 1
# %% ----- detecting number of images per class --------------------------------------------------------------------------
os.chdir(dirName)
DirListTop = os.listdir()
names = DirListTop
N = []
for x in names:
os.chdir(x)
DirListSub = os.listdir()
N.append(len(DirListSub))
os.chdir(dirName)
NC = np.cumsum(N)
# %% ----- detecting file size -------------------------------------------------------------------------------------------
os.chdir(names[0])
filelist = os.listdir()
testfile = filelist[0]
testimage = sio.loadmat(testfile)
testimage2 = list(testimage.values())
testimage3 = testimage2[3]
input_size = len(testimage3)
# %% ----- Obtaining and storing data ------------------------------------------------------------------------------------
# Data
listOfFiles = getListOfFiles(dirName)
listOfImages = files_tensor(listOfFiles)
# Labels
label = np.zeros(sum(N))
labels = [x for x in range(len(names))]
for i in range(0,NC[0]):
label[i] = labels[0]
for k in range(1,len(N)):
for i in range(NC[k-1],NC[k]):
label[i] = labels[k]
# Data and labels
shuffle_list = list(zip(listOfImages, label))
# %% ----- Creating sets with equal class distribution -------------------------------------------------------------------
if set_random_seed == True:
SeedVal = seed_value
else:
SeedVal = random.randrange(1,1000000)
random.seed(SeedVal)
zipZ = [[] for x in range(len(N))]
zip_train = [[] for x in range(len(N))]
zip_val = [[] for x in range(len(N))]
zip_test = [[] for x in range(len(N))]
zipZ[0] = shuffle_list[0:NC[0]]
for k in range(1,len(N)):
zipZ[k] = shuffle_list[NC[k-1]:NC[k]]
for k in range(len(N)):
random.shuffle(zipZ[k])
TrainFrac = 1 - ValFrac - TestFrac
for k in range(len(N)):
zip_train[k] = zipZ[k][0:int(TrainFrac*N[k])]
zip_val[k] = zipZ[k][int(TrainFrac*N[k]):int((TrainFrac+ValFrac)*N[k])]
zip_test[k] = zipZ[k][int((TrainFrac+ValFrac)*N[k]):N[k]]
train = [x for s in zip_train for x in s]
val = [x for s in zip_val for x in s]
test = [x for s in zip_test for x in s]
files1, labels1 = zip(*test)
test_set = [files1, labels1]
files2, labels2 = zip(*train)
train_set = [files2, labels2]
files3, labels3 = zip(*val)
val_set = [files3, labels3]
# %% ----- Adjusting sets to fit in batches (optional code, not important) -----------------------------------------------
diff_val = BS - (len(val_set[0]) % BS);
if diff_val == 2:
c_images = []
c_labels = []
c_images.append(train_set[0][0])
c_labels.append(train_set[1][0])
c_images.append(train_set[0][len(train_set[0]) - 1])
c_labels.append(train_set[1][len(train_set[1]) - 1])
c_images = tuple(c_images)
c_labels = tuple(c_labels)
val_set[0] = val_set[0] + c_images
val_set[1] = val_set[1] + c_labels
train_set[0] = train_set[0][1:len(train_set[0])-1]
train_set[1] = train_set[1][1:len(train_set[1])-1]
elif diff_val == 1:
c_image = train_set[0][len(train_set[0]) - 1]
c_label = train_set[1][len(train_set[1]) - 1]
t0list = list(val_set[0])
t0list.append(c_image)
val_set[0] = tuple(t0list)
t1list = list(val_set[1])
t1list.append(c_label)
val_set[1] = tuple(t1list)
train_set[0] = train_set[0][0:len(train_set[0])-2]
train_set[1] = train_set[1][0:len(train_set[1])-2]
diff_test = BS - (len(test_set[0]) % BS);
if diff_test == 2:
c_images = []
c_labels = []
c_images.append(train_set[0][0])
c_labels.append(train_set[1][0])
c_images.append(train_set[0][len(train_set[0]) - 1])
c_labels.append(train_set[1][len(train_set[1]) - 1])
c_images = tuple(c_images)
c_labels = tuple(c_labels)
test_set[0] = test_set[0] + c_images
test_set[1] = test_set[1] + c_labels
train_set[0] = train_set[0][1:len(train_set[0])-1]
train_set[1] = train_set[1][1:len(train_set[1])-1]
elif diff_test == 1:
c_image = train_set[0][len(train_set[0]) - 1]
c_label = train_set[1][len(train_set[1]) - 1]
t0list = list(test_set[0])
t0list.append(c_image)
test_set[0] = tuple(t0list)
t1list = list(test_set[1])
t1list.append(c_label)
test_set[1] = tuple(t1list)
train_set[0] = train_set[0][0:len(train_set[0])-1]
train_set[1] = train_set[1][0:len(train_set[1])-1]
diff_train = len(test_set[0]) % BS
if diff_train < 5:
c_image = train_set[0][len(test_set[0]) - 1]
c_label = train_set[1][len(test_set[1]) - 1]
t0list = list(train_set[0])
t0list.append(c_image)
train_set[0] = tuple(t0list)
t1list = list(train_set[1])
t1list.append(c_label)
train_set[1] = tuple(t1list)
test_set[0] = test_set[0][0:len(test_set[0])-1]
test_set[1] = test_set[1][0:len(test_set[1])-1]
# %% ----- Batch generator and loaders ----------------------------------------------------------------------------------
train_loader = batch_generator (BS, train_set)
test_loader = batch_generator (BS, test_set)
val_loader = batch_generator (BS, val_set)
# %% ----- Soft coding input size ----------------------------------------------------------------------------------------
if input_size % 2 == 0:
Size_1 = input_size / 2
else:
Size_1 = (input_size - 1) / 2
if Size_1 % 2 == 0:
Size_2 = Size_1 / 2
else:
Size_2 = (Size_1 - 1) / 2
Size_2 = int(Size_2)
# %% ----- Defining the neural net --------------------------------------------------------------------------------------
#note: nn.Conv2D(in, out, kernel, stride=1, padding)
#note: nn.MaxPool2d(kernel, stride, padding)
class BN_Net(nn.Module):
def __init__(self):
super(BN_Net, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1,5,5,1,2),
nn.MaxPool2d(2,2),
nn.ReLU(inplace=True),
nn.BatchNorm2d(5),
nn.Conv2d(5,8,5,1,2),
nn.MaxPool2d(2,2),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8)
)
self.classifier = nn.Sequential(
nn.Linear(8*Size_2*Size_2, 120),
nn.ReLU(inplace=True),
nn.BatchNorm1d(120),
nn.Linear(120,84),
nn.ReLU(inplace=True),
nn.BatchNorm1d(84),
nn.Linear(84,5)
)
def forward(self, x):
x = self.features(x)
x = x.view(-1, 8*Size_2*Size_2)
x = self.classifier(x)
return x
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1,5,5,1,2),
nn.MaxPool2d(2,2),
nn.ReLU(inplace=True),
nn.Conv2d(5,8,5,1,2),
nn.MaxPool2d(2,2),
nn.ReLU(inplace=True)
)
self.classifier = nn.Sequential(
nn.Linear(8*Size_2*Size_2, 120),
nn.ReLU(inplace=True),
nn.Linear(120,84),
nn.ReLU(inplace=True),
nn.Linear(84,5)
)
def forward(self, x):
x = self.features(x)
x = x.view(-1, 8*Size_2*Size_2)
x = self.classifier(x)
return x
# %% ----- Initialize or load weights -----------------------------------------------------------------------------
if batchnorm_flag:
net = BN_Net()
else:
net = Net()
net.cuda()
if random_initialization:
net.apply(weights_init)
else:
checkpoint = torch.load(saved_weights)
net.load_state_dict(checkpoint['model_state_dict'])
# %% ----- Loss and optimizer ---------------------------------------------------------------------------------------
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(),lr=0.001, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False)
#optimizer = optim.RMSprop(net.parameters(), lr=0.01, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)
#optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0, dampening=0, weight_decay=0, nesterov=False)
model_initial = copy.deepcopy(net)
# %% ----- Training the net ---------------------------------------------------------------------------------------------
start = time.time()
running_loss_history = []
running_corrects_history = []
val_running_loss_history = []
val_running_corrects_history = []
StateVecList = []
for e in range(epochs):
print('\nepoch :', (e+1))
running_loss = 0.0
running_corrects = 0.0
val_running_loss = 0.0
val_running_corrects = 0.0
train_loader = batch_generator(BS, train_set)
val_loader = batch_generator(BS, val_set)
train_amount = 0.0
val_amount = 0.0
for inputs, labels in train_loader:
labels = string_tensor(labels)
inputs = torch.stack(inputs)
inputs = torch.unsqueeze(inputs, 1)
outputs = net(inputs)
loss = criterion(outputs, labels.long())
optimizer.zero_grad()
loss.backward()
optimizer.step()
_, preds = torch.max(outputs, 1)
running_loss += loss.item()
running_corrects += torch.sum(preds == labels.long().data)
train_amount += BS
if PCA_flag:
StateVecList.append(weights_to_list(net))
with torch.no_grad():
for val_inputs, val_labels in val_loader:
val_inputs = torch.stack(val_inputs)
val_inputs = torch.unsqueeze(val_inputs, 1)
val_labels = string_tensor(val_labels)
val_outputs = net(val_inputs)
val_loss = criterion(val_outputs, val_labels.long())
_, val_preds = torch.max(val_outputs, 1)
val_running_loss += val_loss.item()
val_running_corrects += torch.sum(val_preds == val_labels.long().data)
val_amount += BS
epoch_loss = running_loss
epoch_acc = running_corrects
running_loss_history.append(epoch_loss)
running_corrects_history.append(epoch_acc)
val_epoch_loss = val_running_loss
val_epoch_acc = val_running_corrects
val_running_loss_history.append(val_epoch_loss)
val_running_corrects_history.append(val_epoch_acc)
print('\ntraining loss: {:.4f} '.format(epoch_loss))
print('validation loss: {:.4f}'.format(val_epoch_loss))
torch.cuda.synchronize()
end = time.time()
print('\n-----------------------------------------------------------------------')
print('\nTime of training: {:d} s'.format(round((end - start))))
# %% ----- Prediction and training stats and graphs --------------------------------------------------------------------
plt.rcParams.update({'font.size': fig_font_size})
correct = [0,0,0]
total = [0,0,0]
testsize = [0,0,0]
AV_all = [[],[],[],[],[]]
with torch.no_grad():
for data in test_loader:
images_0_0, labels_0 = data
images_0_1 = torch.stack(images_0_0)
images_0_2 = torch.unsqueeze(images_0_1, 1)
outputs_0 = net(images_0_2)
_, predicted_0 = torch.max(outputs_0.data, 1)
total[0] += string_tensor(labels_0).size(0)
testsize[0] += 1
correct_index_0 = predicted_0 == string_tensor(labels_0).long()
correct[0] += correct_index_0.sum().item()
if MAV_flag:
for x in range(len(outputs_0)):
if correct_index_0[x]:
AV_all[predicted_0[x]].append(outputs_0[x])
train_loader = batch_generator(BS, train_set)
val_loader = batch_generator(BS, val_set)
for data in train_loader:
images_1_0, labels_1 = data
images_1_1 = torch.stack(images_1_0)
images_1_2 = torch.unsqueeze(images_1_1, 1)
outputs_1 = net(images_1_2)
_, predicted_1 = torch.max(outputs_1.data, 1)
total[1] += string_tensor(labels_1).size(0)
testsize[1] += 1
correct_index_1 = predicted_1 == string_tensor(labels_1).long()
correct[1] += correct_index_1.sum().item()
if MAV_flag:
for x in range(len(outputs_1)):
if correct_index_1[x]:
AV_all[predicted_1[x]].append(outputs_1[x])
for data in val_loader:
images_2_0, labels_2 = data
images_2_1 = torch.stack(images_2_0)
images_2_2 = torch.unsqueeze(images_2_1, 1)
outputs_2 = net(images_2_2)
_, predicted_2 = torch.max(outputs_2.data, 1)
total[2] += string_tensor(labels_2).size(0)
testsize[2] += 1
correct_index_2 = predicted_2 == string_tensor(labels_2).long()
correct[2] += correct_index_2.sum().item()
if MAV_flag:
for x in range(len(outputs_2)):
if correct_index_2[x]:
AV_all[predicted_2[x]].append(outputs_2[x])
if MAV_flag:
MAV = np.empty((5,5))
for i in range(5):
AV_array = np.array([list(x.cpu().numpy()) for x in AV_all[i]])
MAV[i] = np.mean(AV_array, axis=0)
"""
================================================
================================================
===== SECONDARY DATASET LOOPS FOR MAV
================================================
================================================
"""
AV_dist_correct = []
AV_dist_wrong = []
if MAV_flag:
train_loader = batch_generator(BS, train_set)
val_loader = batch_generator(BS, val_set)
test_loader = batch_generator(BS, test_set)
with torch.no_grad():
for data in test_loader:
images_0_0, labels_0 = data
images_0_1 = torch.stack(images_0_0)
images_0_2 = torch.unsqueeze(images_0_1, 1)
outputs_0 = net(images_0_2)
_, predicted_0 = torch.max(outputs_0.data, 1)
correct_index_0 = predicted_0 == string_tensor(labels_0).long()
"""
for x in range(len(outputs_0)):
avdiff = outputs_0[x].cpu().numpy() - MAV[predicted_0[x]]
avdist = np.linalg.norm(avdiff)
if correct_index_0[x]:
AV_dist_correct.append(avdist)
else:
AV_dist_wrong.append(avdist)
"""
for data in train_loader:
images_1_0, labels_1 = data
images_1_1 = torch.stack(images_1_0)
images_1_2 = torch.unsqueeze(images_1_1, 1)
outputs_1 = net(images_1_2)
_, predicted_1 = torch.max(outputs_1.data, 1)
correct_index_1 = predicted_1 == string_tensor(labels_1).long()
for x in range(len(outputs_1)):
avdiff = outputs_1[x].cpu().numpy() - MAV[predicted_1[x]]
avdist = np.linalg.norm(avdiff)
if correct_index_1[x]:
AV_dist_correct.append(avdist)
else:
AV_dist_wrong.append(avdist)
for data in val_loader:
images_2_0, labels_2 = data
images_2_1 = torch.stack(images_2_0)
images_2_2 = torch.unsqueeze(images_2_1, 1)
outputs_2 = net(images_2_2)
_, predicted_2 = torch.max(outputs_2.data, 1)
correct_index_2 = predicted_2 == string_tensor(labels_2).long()
for x in range(len(outputs_0)):
avdiff = outputs_2[x].cpu().numpy() - MAV[predicted_2[x]]
avdist = np.linalg.norm(avdiff)
if correct_index_2[x]:
AV_dist_correct.append(avdist)
else:
AV_dist_wrong.append(avdist)
AVCA = np.array(AV_dist_correct)
AVC_max = np.max(AVCA)
AVC_min = np.min(AVCA)
AVC_avg = np.mean(AVCA)
AVC_std = np.std(AVCA)
AVWA = np.array(AV_dist_wrong)
AVW_max = np.max(AVWA)
AVW_min = np.min(AVWA)
AVW_avg = np.mean(AVWA)
AVW_std = np.std(AVWA)
test_loader = batch_generator(BS, test_set)
with torch.no_grad():
ImagesChecked = 0
ImagesCheckedCorrect = 0
for data in test_loader:
images_3_0, labels_3 = data
images_3_1 = torch.stack(images_3_0)
images_3_2 = torch.unsqueeze(images_3_1, 1)
outputs_3 = net(images_3_2)
_, predicted_3 = torch.max(outputs_3.data, 1)
correct_index_3 = predicted_3 == string_tensor(labels_3).long()
for x in range(len(outputs_3)):
avdiff = outputs_3[x].cpu().numpy() - MAV[predicted_3[x]]
avdist = np.linalg.norm(avdiff)
if avdist < AVW_min:
#if avdist < 2.5:
ImagesChecked += 1
if correct_index_3[x]:
ImagesCheckedCorrect += 1
df1 = pd.DataFrame({'1':AVCA})
df2 = pd.DataFrame({'2':AVWA})
df = pd.concat([df1, df2], ignore_index=True, axis=1)
df.columns = ['correct','incorrect']
MAV_dist_plot = plt.figure(figsize=(9,7))
plt.grid(1, which='major')
plt.grid(1, which='minor', color='k', linestyle='-', alpha=0.08)
plt.minorticks_on()
sbplot = sb.stripplot(data=df)
plt.ylabel('distance to MAV')
x = plt.gca().axes.get_xlim()
plt.plot(x, len(x)*[AVW_min],'r')
# %%
"""
================================================
================================================
===== FOOLING DATASET
================================================
================================================
"""
if MAV_flag:
thresh_flag = True
thresh = 4.7
#dataset_fool_dir = r'D:\ddavidse\Desktop\converted_elephants'
#dataset_fool_dir = r'D:\ddavidse\Desktop\data_mirrored_100x100'
os.chdir(dataset_fool_dir)
DirListTop_fool = os.listdir()
names_fool = DirListTop_fool
N_fool = []
for x in names_fool:
os.chdir(x)
DirListSub_fool = os.listdir()
N_fool.append(len(DirListSub_fool))
os.chdir(dataset_fool_dir)
NC_fool = np.cumsum(N_fool)
listOfFiles_fool = getListOfFiles(dataset_fool_dir)
listOfImages_fool = files_tensor(listOfFiles_fool)
label_fool = np.zeros(sum(N_fool))
labels_fool = [x for x in range(len(names_fool))]
for i in range(0,NC_fool[0]):
label_fool[i] = labels_fool[0]
for k in range(1,len(N_fool)):
for i in range(NC_fool[k-1],NC_fool[k]):
label_fool[i] = labels_fool[k]
dataset_fool = [listOfImages_fool, label_fool]
list_1 = AV_dist_correct
list_2 = AV_dist_wrong
data_loader = batch_generator(BS, dataset_fool)
dist_test = []
with torch.no_grad():
for data in data_loader:
images, labels = data
images = torch.stack(images)
images = torch.unsqueeze(images, 1)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
correct_index = predicted == string_tensor(labels).long()
for x in range(len(outputs)):
avdiff = outputs[x].cpu().numpy() - MAV[predicted[x]]
avdist = np.linalg.norm(avdiff)
dist_test.append(avdist)
df0 = pd.DataFrame({'0':list_1 + list_2})
df1 = pd.DataFrame({'1':list_1})
df2 = pd.DataFrame({'2':list_2})
df3 = pd.DataFrame({'3':dist_test})
df = pd.concat([df0, df3], ignore_index=True, axis=1)
df.columns = ['original set','fooling set']
MAV_dist_plot = plt.figure(figsize=(9,7))
plt.grid(1, which='major')
plt.grid(1, which='minor', color='k', linestyle='-', alpha=0.08)
plt.minorticks_on()
sbplot = sb.stripplot(data=df, jitter=0.15)
plt.ylabel('distance to MAV')
if thresh_flag:
x = plt.gca().axes.get_xlim()
plt.plot(x, len(x)*[thresh],'r')
list_1_thr = [x for x in list_1 if x < thresh]
list_2_thr = [x for x in list_2 if x < thresh]
dist_test_thr = [x for x in dist_test if x < thresh]
L1C = len(list_1)
L1W = len(list_2)
L1T = len(dist_test)
L2C = len(list_1_thr)
L2W = len(list_2_thr)
L2T = len(dist_test_thr)
A1 = round(100 * L1C / (L1C + L1W), 1)
A2 = round(100 * L2C / (L2C + L2W), 1)
P1 = round(100 * (1 - (L2C + L2W) / (L1C + L1W)), 1)
P2 = round(100 * (1 - L2T / L1T), 1)
print('\n-------------------------------------------------------------------')
print('\nAccuracy on the original dataset: \t\t{} %'.format(A1))
print('\nThreshold: {}'.format(thresh))
print('\nAccuracy on thresholded original dataset: \t{} %'.format(A2))
print('\nPart of the original dataset thrown away: \t{} %'.format(P1))
print('Part of the secondary dataset thrown away: \t{} %'.format(P2))
print('\n-------------------------------------------------------------------')
os.chdir(fname)
path = '{}\\MAV_distance_dist.png'.format(fname)
MAV_dist_plot.savefig(path, dpi=fig_dpi)
path = '{}\\MAV_distance_dist.svg'.format(fname)
MAV_dist_plot.savefig(path, dpi=fig_dpi)
f = open('MAV results.txt','w+')
f.write('--------------------------------------------------------------------------------------')
f.write('\n\nAccuracy on the original dataset: \t\t{} %'.format(A1))
f.write('\nAccuracy on thresholded original dataset: \t{} %'.format(A2))
f.write('\n\nThreshold: {}'.format(thresh))
f.write('\n\nPart of the original dataset thrown away: \t{} %'.format(P1))
f.write('\nPart of the secondary dataset thrown away: \t{} %'.format(P2))
f.write('\n\n--------------------------------------------------------------------------------------')
f.close()
# %%
"""
================================================
================================================
===== SECONDARY DATASET LOOPS FOR BASELINE
================================================
================================================
"""
if baseline_flag:
train_loader = batch_generator(BS, train_set)
val_loader = batch_generator(BS, val_set)
test_loader = batch_generator(BS, test_set)
unc_correct = []
unc_wrong = []
b_time_1 = time.time()
with torch.no_grad():
for data in test_loader:
images, labels = data
images = torch.stack(images)
images = torch.unsqueeze(images, 1)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
correct_index = predicted == string_tensor(labels).long()
for x in range(len(outputs)):
probs = softmax(outputs[x].cpu().numpy())
prob = np.max(probs)
uncert = 100*(1 - prob)
if correct_index[x]:
unc_correct.append(uncert)
else:
unc_wrong.append(uncert)
for data in train_loader:
images, labels = data
images = torch.stack(images)
images = torch.unsqueeze(images, 1)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
correct_index = predicted == string_tensor(labels).long()
for x in range(len(outputs)):
probs = softmax(outputs[x].cpu().numpy())
prob = np.max(probs)
uncert = 100*(1 - prob)
if correct_index[x]:
unc_correct.append(uncert)
else:
unc_wrong.append(uncert)
for data in val_loader:
images, labels = data
images = torch.stack(images)
images = torch.unsqueeze(images, 1)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
correct_index = predicted == string_tensor(labels).long()
for x in range(len(outputs)):
probs = softmax(outputs[x].cpu().numpy())
prob = np.max(probs)
uncert = 100*(1 - prob)
if correct_index[x]:
unc_correct.append(uncert)
else:
unc_wrong.append(uncert)
b_time_2 = time.time()
b_time_d = b_time_2 - b_time_1
b_time_d_r = round(b_time_d,1)
print('time to pass entire dataset through network: {} s'.format(b_time_d_r))
df1 = pd.DataFrame({'1':unc_correct})
df2 = pd.DataFrame({'2':unc_wrong})
df = pd.concat([df1, df2], ignore_index=True, axis=1)
df.columns = ['correct','wrong']
baseline_plot = plt.figure(figsize=(9,7))
plt.grid(1, which='major')
plt.grid(1, which='minor', color='k', linestyle='-', alpha=0.08)
plt.minorticks_on()
sbplot = sb.stripplot(data=df, jitter=.15)
plt.ylabel('uncertainty in %')
plt.title('baseline uncertainty plot')
total_tot = sum(total)
correct_tot = sum(correct)
testsize_tot = sum(testsize)
print('\nAccuracy of the network on the test images: {:.1f} %'.format((100 * correct[0] / total[0])))
print('Test result obtained from {} images coming from {} batches'.format(total[0], testsize[0]))
print('\nAccuracy of the network on the total set of all images: {:.1f} %'.format((100 * correct_tot / total_tot)))
print('Test result obtained from {} images coming from {} batches'.format(total_tot, testsize_tot))
if MAV_flag:
print('\nAmount of images in high confidence set: {}'.format(ImagesChecked))
print('This is {:.1f}% of the test set'.format(100*ImagesChecked / total[0]))
print('\nAmount of correctly classified images in high confidence set: {}'.format(ImagesCheckedCorrect))
print('Accuracy on high confidence set: {:.1f} %'.format(100*ImagesCheckedCorrect / ImagesChecked))
print('\nAmount of correct predictions in test 1: {}'.format(correct[0]))
val_plot = plt.figure(figsize=(9,7))
plt.grid(1, which='major')
plt.grid(1, which='minor', color='k', linestyle='-', alpha=0.08)
plt.minorticks_on()
plt.plot(running_loss_history, 'r',label='training loss')
plt.plot(val_running_loss_history,'b', label='validation loss')
plt.autoscale(enable=True, axis='x', tight=True)
plt.legend()
v_len = len(val_set[0])
t_len = len(train_set[0])
acc_plot = plt.figure(figsize=(9,7))
plt.grid(1, which='major')
plt.grid(1, which='minor', color='k', linestyle='-', alpha=0.08)
plt.minorticks_on()
train_corrected = [100*float(x)/(train_amount) for x in running_corrects_history]
plt.plot(train_corrected,'r', label='training accuracy [%]')
val_corrected = [100*float(x)/(val_amount) for x in val_running_corrects_history]
plt.plot(val_corrected,'b', label='validation accuracy [%]')
plt.autoscale(enable=True, axis='x', tight=True)
plt.legend()
# %% ----- confusion matrix --------------------------------------------------------------------------------------------
# code inspiration from https://deeplizard.com/learn/video/0LhiS6yu2qQ
if confusion_flag:
test_loader = batch_generator (BS, test_set)
all_preds = torch.tensor([])
all_labels = torch.tensor([])
misc_preds_t = []
misc_labels_t = []
misc_img_t = []
misc_outputs_t = []
maxdif_t = []
for inputs, labels in test_loader:
inputs = torch.stack(inputs)
inputs = torch.unsqueeze(inputs, 1)
labels = string_tensor(labels)
all_labels = torch.cat((all_labels, labels),dim=0)
outputs = net(inputs)
all_preds = torch.cat((all_preds, outputs),dim=0)
imlist = list(inputs)
outputs1 = outputs.argmax(dim=1)
vec_equal = outputs1 == labels
if misclassified_outputs_flag:
misc_outputs_0 = outputs[~vec_equal]
misc_outputs_1 = misc_outputs_0.cpu().detach().numpy().tolist()
misc_outputs = [list(map(lambda x: round(x,1), i)) for i in misc_outputs_1]
misc_outputs_t += misc_outputs
maxdif = [max(x) - sec_max(x) for x in misc_outputs_1]
maxdif_t += maxdif
if misclassified_images_flag:
misc_ind = [x for x in range(BS) if vec_equal[x] == False]
misc_img = [imlist[x] for x in misc_ind]
misc_img_t += misc_img
misc_labels = [labels[x] for x in misc_ind]
misc_labels_t += misc_labels
misc_preds = [outputs1[x] for x in misc_ind]
misc_preds_t += misc_preds
AL = all_labels
AL2 = np.array(list(AL), dtype=np.int)
AL3 = torch.tensor(AL2)
TP = all_preds.argmax(dim=1)
TP2 = np.array(list(TP), dtype=np.int)
TP3 = torch.tensor(TP2)
ConfAcc = sum(TP3 == AL3).item()
print('Amount of correct predictions in test 2: {}'.format(ConfAcc))
print('\n-----------------------------------------------------------------------')
stacked = torch.stack((AL3, TP3), dim=1)
cmt = torch.zeros(len(N),len(N), dtype=torch.int64)
for p in stacked:
tl, pl = p.tolist()
cmt[tl, pl] = cmt[tl, pl] + 1
cmt2 = cmt.cpu()
if len(names) == 3:
conf_fig_size = (8,8)
elif len(names) == 4:
conf_fig_size = (9,9)
else:
conf_fig_size = (10,10)
conf_fig = plt.figure(figsize = conf_fig_size)
plot_confusion_matrix(cmt2, names)
conf_fig_2 = plt.figure(figsize = conf_fig_size)
plot_confusion_matrix(cmt2, names, normalize='row', percentage=True)
conf_fig_3 = plt.figure(figsize = conf_fig_size)
plot_confusion_matrix(cmt2, names, normalize='full', percentage=True)
# %% ----- loss landscape ----------------------------------------------------------------------------------------------
# source: https://github.com/marcellodebernardi/loss-landscapes/blob/master/examples/core-features.ipynb
if loss_landscape_flag:
start = time.time()
train_loader = batch_generator (BS, train_set)
x, y = iter(train_loader).__next__()
x = torch.stack(x)
x = torch.unsqueeze(x, 1)
y = torch.tensor(y).long()
metric = loss_landscapes.metrics.Loss(criterion, x, y)
LCP = loss_landscapes.random_plane(net, metric, 100, STEPS, normalization='filter', deepcopy_model=True)
torch.cuda.synchronize()
end = time.time()
print('\nTime of calculating loss landscape: {:d} s'.format(round((end - start))))
loss_con = plt.figure()
plt.contour(LCP, levels=50)
plt.title('Loss Contours around Trained Model')
plt.show()
loss_surf_1 = fig = plt.figure(figsize=(9,7))
ax = plt.axes(projection='3d')
X = np.array([[j for j in range(STEPS)] for i in range(STEPS)])
Y = np.array([[i for _ in range(STEPS)] for i in range(STEPS)])
ax.plot_surface(X, Y, LCP, rstride=1, cstride=1, cmap='coolwarm', edgecolor='none')
ax.set_title('Surface Plot of Loss Landscape')
ax.set_xlabel(r'$\theta$', fontsize=18, labelpad=10)
ax.set_ylabel(r"$\theta '$", fontsize=18, labelpad=10)
ax.set_zlabel('Loss', fontsize=18, labelpad=10, rotation=90)
loss_surf_2 = fig = plt.figure(figsize=(9,7))
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, LCP, rstride=1, cstride=1, cmap='coolwarm', edgecolor='none')
#ax.set_title('Surface Plot of Loss Landscape')
ax.view_init(30, 45)
ax.set_xlabel(r'$\theta$', fontsize=18, labelpad=10)
ax.set_ylabel(r"$\theta '$", fontsize=18, labelpad=10)
ax.set_zlabel('Loss', fontsize=18, labelpad=10, rotation=90)
# %% ----- PCA ----------------------------------------------------------------------------------------------------------
if PCA_flag:
PCA_time_start = time.time()
StateVecArray = np.array(StateVecList)
TrainedNetVector = StateVecArray[epochs-1]
pca = PCA(n_components=2)
PC = pca.fit_transform(StateVecArray)
PC_norm = []
for i in range(len(PC[0])):
col = PC[:,i]
col_fixed = col / np.linalg.norm(col)
PC_norm.append(col_fixed)
SVA_1 = []
for i in range(len(StateVecArray[0])):
weightvec = StateVecArray[:,i]
SVA_1.append(np.dot(weightvec, PC_norm[0]))
SVA_2 = []
for i in range(len(StateVecArray[0])):
weightvec = StateVecArray[:,i]
SVA_2.append(np.dot(weightvec, PC_norm[1]))
SVA_1 = np.array(SVA_1)
SVA_2 = np.array(SVA_2)
if filter_normalization_flag:
A = net.state_dict()
S = [x.size() for x in A.values()]
SL = [list(x) for x in S]
L = len(SL)
Sizes = []
WeightTensors = []
ite = 0
for x in range(L):
ite += 1
xsize = 1
for number in SL[x]:
xsize = xsize * number
Sizes.append(xsize)
Sizes_C = np.cumsum(Sizes)
Seg0 = TrainedNetVector[0:Sizes_C[0]]
Seg0_Norm = np.linalg.norm(Seg0)
Seg1 = SVA_1[0:Sizes_C[0]]
Seg1_Norm = np.linalg.norm(Seg1)
Seg2 = SVA_2[0:Sizes_C[0]]
Seg2_Norm = np.linalg.norm(Seg2)
SVA_1[0:Sizes_C[0]] = SVA_1[0:Sizes_C[0]] * Seg0_Norm / Seg1_Norm
SVA_2[0:Sizes_C[0]] = SVA_2[0:Sizes_C[0]] * Seg0_Norm / Seg2_Norm
TestNorm0 = np.linalg.norm(TrainedNetVector[0:Sizes_C[0]])
TestNorm1 = np.linalg.norm(SVA_1[0:Sizes_C[0]])
TestNorm2 = np.linalg.norm(SVA_2[0:Sizes_C[0]])
print('\nTestNorm of first filter from core: \t\t{}'.format(TestNorm0))
print('TestNorm of first filter from PCA vector 1: \t{}'.format(TestNorm1))
print('TestNorm of first filter from PCA vector 2: \t{}'.format(TestNorm2))
for x in range(len(Sizes_C) - 1):
Seg0 = TrainedNetVector[Sizes_C[x]:Sizes_C[x+1]]
Seg0_Norm = np.linalg.norm(Seg0)
Seg1 = SVA_1[Sizes_C[x]:Sizes_C[x+1]]
Seg1_Norm = np.linalg.norm(Seg1)
Seg2 = SVA_2[Sizes_C[x]:Sizes_C[x+1]]
Seg2_Norm = np.linalg.norm(Seg2)
SVA_1[Sizes_C[x]:Sizes_C[x+1]] = SVA_1[Sizes_C[x]:Sizes_C[x+1]] * Seg0_Norm / Seg1_Norm
SVA_2[Sizes_C[x]:Sizes_C[x+1]] = SVA_2[Sizes_C[x]:Sizes_C[x+1]] * Seg0_Norm / Seg2_Norm
RIstep = int(round(FilterSteps/2))
loss_array = np.zeros([FilterSteps+1,FilterSteps+1])
for i in range(-RIstep, RIstep+1):
for j in range(-RIstep, RIstep+1):
NetAdd_1 = distance_multiplier*(i/RIstep)*SVA_1
NetAdd_2 = distance_multiplier*(j/RIstep)*SVA_2
NetWeights = TrainedNetVector + NetAdd_1 + NetAdd_2
net_updated = list_to_weights(NetWeights, net, True)
net_updated.cuda()
val_running_loss = 0.0
val_loader = batch_generator (BS, val_set)
with torch.no_grad():
for val_inputs, val_labels in val_loader:
val_inputs = torch.stack(val_inputs)
val_inputs = torch.unsqueeze(val_inputs, 1)
val_labels = string_tensor(val_labels)
val_outputs = net_updated(val_inputs)
val_loss = criterion(val_outputs, val_labels.long())
val_running_loss += val_loss.item()
loss_array[i+RIstep, j+RIstep] = val_running_loss
x = np.arange(-RIstep+1, RIstep+1)
y = np.arange(-RIstep, RIstep+1)
X,Y = np.meshgrid(x,y)
loss_array_cor = loss_array[:,1:]
if bowl:
tv0 = loss_array[round(FilterSteps/2) - 1, FilterSteps - 1]
tv1 = loss_array[round(FilterSteps/2) - 1, 0]
tv2 = loss_array[0, round(FilterSteps/2) - 1]
tv3 = loss_array[FilterSteps - 1, round(FilterSteps/2) - 1]
bowlmax = max([tv0, tv1, tv2, tv3])
bowlmin = min([tv0, tv1, tv2, tv3])
for vi in range(FilterSteps):
for vj in range(FilterSteps):
if loss_array[vi,vj] > bowlmin:
loss_array[vi,vj] = bowlmin
PCA_fig_1 = plt.figure(figsize=(10,7))
ax = PCA_fig_1.gca(projection='3d')
PCA_surf = ax.plot_surface(X,Y, loss_array_cor, cmap='coolwarm')
ax.set_xlabel(r'X', fontsize=20)
ax.set_ylabel(r'Y', fontsize=20)
ax.set_zlabel(r'Z', fontsize=20)
PCA_fig_2 = plt.figure(figsize=(10,7))
ax = PCA_fig_2.gca(projection='3d')
PCA_surf_2 = ax.plot_surface(X,Y, loss_array_cor, cmap='coolwarm')
ax.set_xlabel(r'X', fontsize=20)
ax.set_ylabel(r'Y', fontsize=20)
ax.set_zlabel(r'Z', fontsize=20)
ax.view_init(30, 45)
PCA_fig_3 = plt.figure(figsize=(10,7))
PCA_cont = plt.contour(X,Y, loss_array_cor, 100)
ax = PCA_fig_3.gca()
ax.set_xlabel(r'X', fontsize=20)
ax.set_ylabel(r'Y', fontsize=20)
if contour_center:
hlinex = [-19,20]
hliney = [0,0]
plt.plot(hlinex, hliney,'k')
vlinex = [0,0]
vliney = [-20,20]
plt.plot(vlinex, vliney,'k')
loss_array_cor[0][0] = loss_array_cor[0][1]
LMIN = np.min(loss_array_cor)
LMINLOC = np.argmin(loss_array_cor)
RowLen = len(loss_array_cor[0])
min_remain = LMINLOC % RowLen
min_y = (LMINLOC - min_remain) / RowLen
min_x = min_remain
plt.plot(min_x - 20, min_y - 20, 'rx', markersize=15)
PCA_time_end = time.time()
PCA_time = PCA_time_end - PCA_time_start
print('\nPCA time: {} s'.format(round(PCA_time,1)))
# %% ----- Saving results -----------------------------------------------------------------------------------------------
os.chdir(fname)
if MAV_flag:
mavpath1 = '{}\\MAV.mat'.format(fname)
mavpath2 = '{}\\MAV_dist_cor.mat'.format(fname)
mavpath3 = '{}\\MAV_dist_inc.mat'.format(fname)
sio.savemat(mavpath1, {'data':MAV})
sio.savemat(mavpath2, {'data':AV_dist_correct})
sio.savemat(mavpath3, {'data':AV_dist_wrong})
f = open('AV stats.txt','w+')
f.write('Correct predictions:')
f.write('\n\nAV_max = {:.2f}'.format(AVC_max))
f.write('\nAV_min = {:.2f}'.format(AVC_min))
f.write('\nAV_avg = {:.2f}'.format(AVC_avg))
f.write('\nAV_std = {:.2f}'.format(AVC_std))
f.write('\n\nWrong predictions:')
f.write('\n\nAV_max = {:.2f}'.format(AVW_max))
f.write('\nAV_min = {:.2f}'.format(AVW_min))
f.write('\nAV_avg = {:.2f}'.format(AVW_avg))
f.write('\nAV_std = {:.2f}'.format(AVW_std))
f.close()
f = open('run info.txt','w+')
A = str(net)
f.write('-----------------------------------------------------------------------------------------\n\n')
f.write(A)
f.write('\n\n-----------------------------------------------------------------------------------------')
f.write('\n\nData used:\n')
for i in range(len(N)):
f.write('\nClass {}: {} images'.format(names[i], N[i]))
f.write('\n\nTrain set size: {} images'.format(len(train_set[0])))
f.write('\nVal set length: {} images'.format(len(val_set[0])))
f.write('\nTest set size: \t{} images'.format(len(test_set[0])))
f.write('\n\nTotal size of dataset: {} images'.format(sum(N)))
f.write('\n\n-----------------------------------------------------------------------------------------')
f.write('\n\ninput_size = {}'.format(input_size))
f.write('\nBS = {}'.format(BS))
f.write('\nepochs = {}'.format(epochs))
f.write('\nSTEPS = {}'.format(STEPS))
f.write('\nSeedVal = {}'.format(SeedVal))
f.write('\n\nLoss function: {}'.format(criterion))
f.write('\n\nOptimizer: \n{}'.format(optimizer))
f.write('\n\n-----------------------------------------------------------------------------------------')
f.write('\n\nAccuracy of the network on the test images: {:.1f} %'.format((100 * correct[0] / total[0])))
f.write('\nTest result obtained from {} images coming from {} batches'.format(total[0], testsize[0]))
f.write('\n\nAccuracy of the network on the total set of all images: {:.1f} %'.format((100 * correct_tot / total_tot)))
f.write('\nTest result obtained from {} images coming from {} batches'.format(total_tot, testsize_tot))
if MAV_flag:
f.write('\n\nAmount of images in high confidence set: {}'.format(ImagesChecked))
f.write('\nThis is {:.1f}% of the test set'.format(100*ImagesChecked / total[0]))
f.write('\n\nAmount of correctly classified images in high confidence set: {}'.format(ImagesCheckedCorrect))
f.write('\nAccuracy on high confidence set: {:.1f} %'.format(100*ImagesCheckedCorrect / ImagesChecked))
if PCA_flag:
f.write('\n\n-----------------------------------------------------------------------------------------')
f.write('\n\nPCA - filter normalization: {}'.format(filter_normalization_flag))
f.write('\nPCA - distance multiplier: {}'.format(distance_multiplier))
f.write('\nPCA - FilterSteps: {}'.format(FilterSteps))
if misclassified_outputs_flag:
f.write('\n\n-----------------------------------------------------------------------------------------')
f.write('\n\nMisclassified net outputs: ')
f.write('\n')
maxdif_t2 = [round(x,1) for x in maxdif_t]
for x in range(len(misc_outputs_t)):
f.write('\n{} \t- maxdif = {}'.format(misc_outputs_t[x], maxdif_t2[x]))
f.write('\n\n-----------------------------------------------------------------------------------------')
f.close()
cwd = os.getcwd()
torch.save({'model_state_dict': net.state_dict()
}, r'{}\\final_weights.pth'.format(cwd))
torch.save({'model_state_dict': model_initial.state_dict()
}, r'{}\\initial_weights.pth'.format(cwd))
path = '{}\\loss_curve.png'.format(cwd)
val_plot.savefig(path, dpi=fig_dpi)
path = '{}\\loss_curve.svg'.format(cwd)
val_plot.savefig(path, dpi=fig_dpi)
path = '{}\\accuracy_curve.png'.format(cwd)
acc_plot.savefig(path, dpi=fig_dpi)
path = '{}\\accuracy_curve.svg'.format(cwd)
acc_plot.savefig(path, dpi=fig_dpi)
if confusion_flag:
path = '{}\\confusion_matrix.png'.format(cwd)
conf_fig.savefig(path, dpi=fig_dpi)
path = '{}\\confusion_matrix.svg'.format(cwd)
conf_fig.savefig(path, dpi=fig_dpi)
path = '{}\\confusion_matrix_normalize_row.png'.format(cwd)
conf_fig_2.savefig(path, dpi=fig_dpi)
path = '{}\\confusion_matrix_normalize_row.svg'.format(cwd)
conf_fig_2.savefig(path, dpi=fig_dpi)
path = '{}\\confusion_matrix_normalize_full.png'.format(cwd)
conf_fig_3.savefig(path, dpi=fig_dpi)
path = '{}\\confusion_matrix_normalize_full.svg'.format(cwd)
conf_fig_3.savefig(path, dpi=fig_dpi)
if PCA_flag:
path = '{}\\PCA_loss_landscape_1.png'.format(cwd)
PCA_fig_1.savefig(path, dpi=fig_dpi)
path = '{}\\PCA_loss_landscape_1.svg'.format(cwd)
PCA_fig_1.savefig(path, dpi=fig_dpi)
path = '{}\\PCA_loss_landscape_2.png'.format(cwd)
PCA_fig_2.savefig(path, dpi=fig_dpi)
path = '{}\\PCA_loss_landscape_2.svg'.format(cwd)
PCA_fig_2.savefig(path, dpi=fig_dpi)
path = '{}\\PCA_loss_contour.png'.format(cwd)
PCA_fig_3.savefig(path, dpi=fig_dpi)
path = '{}\\PCA_loss_contour.svg'.format(cwd)
PCA_fig_3.savefig(path, dpi=fig_dpi)
if loss_landscape_flag:
path = '{}\\loss_surface_contour.png'.format(cwd)
loss_con.savefig(path, dpi=fig_dpi)
path = '{}\\loss_surface_contour.svg'.format(cwd)
loss_con.savefig(path, dpi=fig_dpi)
path = '{}\\loss_surface_angle_1.png'.format(cwd)
loss_surf_1.savefig(path, dpi=fig_dpi)
path = '{}\\loss_surface_angle_1.svg'.format(cwd)
loss_surf_1.savefig(path, dpi=fig_dpi)
path = '{}\\loss_surface_angle_2.png'.format(cwd)
loss_surf_2.savefig(path, dpi=fig_dpi)
path = '{}\\loss_surface_angle_2.svg'.format(cwd)
loss_surf_2.savefig(path, dpi=fig_dpi)
if MAV_flag:
path = '{}\\MAV_distance_dist.png'.format(cwd)
MAV_dist_plot.savefig(path, dpi=fig_dpi)
path = '{}\\MAV_distance_dist.svg'.format(cwd)
MAV_dist_plot.savefig(path, dpi=fig_dpi)
if misclassified_images_flag:
mcfname = 'misclassified images'
os.mkdir(mcfname)
mcfdir = '{}\\{}'.format(fname, mcfname)
os.chdir(mcfdir)
for i in range(len(misc_preds_t)):
imagets0 = misc_img_t[i][0]
imagets = imagets0.cpu()
predts = int(misc_preds_t[i])
predname = names[predts]
labts = int(misc_labels_t[i])
labname = names[labts]
fstag = '{} - label {}, classified as {}'.format(i, labname, predname)
sf = plt.figure(figsize=(6,6))
plt.imshow(imagets, cmap='jet')
path = '{}\\{}.png'.format(mcfdir, fstag)
sf.savefig(path)
if baseline_flag:
path = '{}\\baseline_uncertainty_plot.png'.format(cwd)
baseline_plot.savefig(path, dpi=fig_dpi)
path = '{}\\baseline_uncertainty_plot.svg'.format(cwd)
baseline_plot.savefig(path, dpi=fig_dpi)
sio.savemat('unc_correct.mat', {'data':unc_correct})
sio.savemat('unc_wrong.mat', {'data':unc_wrong})
os.chdir(DT)
os.startfile(fname)
|
from bestiary.models import Monster, GameItem
from data_log import models
from .test_log_views import BaseLogTest
class SummonLogTests(BaseLogTest):
fixtures = ['test_summon_monsters', 'test_game_items']
def test_summon_1_with_unknown_scroll(self):
self._do_log('SummonUnit/scroll_unknown_qty1.json')
self.assertEqual(models.SummonLog.objects.count(), 1)
log = models.SummonLog.objects.first()
self.assertEqual(log.item, GameItem.objects.get(category=GameItem.CATEGORY_SUMMON_SCROLL, com2us_id=1))
self.assertEqual(log.monster, Monster.objects.get(com2us_id=13103))
def test_summon_10_with_unknown_scroll(self):
self._do_log('SummonUnit/scroll_unknown_qty10.json')
self.assertEqual(models.SummonLog.objects.count(), 10)
for log in models.SummonLog.objects.all():
self.assertEqual(log.item, GameItem.objects.get(category=GameItem.CATEGORY_SUMMON_SCROLL, com2us_id=1))
self.assertEqual(log.monster, Monster.objects.get(com2us_id=13103))
def test_summon_1_with_social_points(self):
self._do_log('SummonUnit/currency_social_qty1.json')
self.assertEqual(models.SummonLog.objects.count(), 1)
log = models.SummonLog.objects.first()
self.assertEqual(log.item, GameItem.objects.get(category=GameItem.CATEGORY_CURRENCY, com2us_id=2))
self.assertEqual(log.monster, Monster.objects.get(com2us_id=13103))
def test_summon_10_with_social_points(self):
self._do_log('SummonUnit/currency_social_qty10.json')
self.assertEqual(models.SummonLog.objects.count(), 10)
for log in models.SummonLog.objects.all():
self.assertEqual(log.item, GameItem.objects.get(category=GameItem.CATEGORY_CURRENCY, com2us_id=2))
self.assertEqual(log.monster, Monster.objects.get(com2us_id=13103))
def test_summon_with_mystical_scroll(self):
self._do_log('SummonUnit/scroll_mystical.json')
self.assertEqual(models.SummonLog.objects.count(), 1)
log = models.SummonLog.objects.first()
self.assertEqual(log.item, GameItem.objects.get(category=GameItem.CATEGORY_SUMMON_SCROLL, com2us_id=2))
self.assertEqual(log.monster, Monster.objects.get(com2us_id=14102))
def test_summon_with_crystals(self):
self._do_log('SummonUnit/currency_crystals.json')
self.assertEqual(models.SummonLog.objects.count(), 1)
log = models.SummonLog.objects.first()
self.assertEqual(log.item, GameItem.objects.get(category=GameItem.CATEGORY_CURRENCY, com2us_id=1))
self.assertEqual(log.monster, Monster.objects.get(com2us_id=14102))
def test_summon_with_exclusive_stones(self):
self._do_log('SummonUnit/scroll_exclusive.json')
self.assertEqual(models.SummonLog.objects.count(), 1)
log = models.SummonLog.objects.first()
self.assertEqual(log.item, GameItem.objects.get(category=GameItem.CATEGORY_SUMMON_SCROLL, com2us_id=8))
self.assertEqual(log.monster, Monster.objects.get(com2us_id=14102))
def test_blessing(self):
# Do partial summon
self._do_log('SummonUnit/scroll_mystical_blessing_pop.json')
self.assertEqual(models.SummonLog.objects.count(), 1)
log = models.SummonLog.objects.first()
self.assertEqual(log.item, GameItem.objects.get(category=GameItem.CATEGORY_SUMMON_SCROLL, com2us_id=2))
# Finish it up with blessing confirmation
self._do_log('ConfirmSummonChoice/blessing_selection.json')
self.assertEqual(models.SummonLog.objects.count(), 1)
log.refresh_from_db()
self.assertEqual(log.monster, Monster.objects.get(com2us_id=13103))
|
from os import path
import lmdb
import lz4framed
import numpy as np
import pyarrow as pa
import six
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from utils.image_utils import blur_image
class LMDB(Dataset):
def __init__(self, db_path, transform=None, target_transform=None, use_mask=False):
self.db_path = db_path
self.env = lmdb.open(
db_path,
subdir=path.isdir(db_path),
readonly=True,
lock=False,
readahead=False,
meminit=False,
)
with self.env.begin(write=False) as txn:
self.length = pa.deserialize(lz4framed.decompress(txn.get(b"__len__")))
self.keys = pa.deserialize(lz4framed.decompress(txn.get(b"__keys__")))
self.classnum = pa.deserialize(
lz4framed.decompress(txn.get(b"__classnum__"))
)
self.transform = transform
self.target_transform = target_transform
self.use_mask = use_mask
def __getitem__(self, index):
img, target, mask = None, None, None
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(self.keys[index])
unpacked = pa.deserialize(lz4framed.decompress(byteflow))
# load image
imgbuf = unpacked[0]
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
img = Image.open(buf).convert("RGB")
if self.use_mask:
imgbuf = unpacked[2]
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
mask = Image.open(buf)
img = blur_image(np.asarray(img).copy(), np.asarray(mask).copy())
img = Image.fromarray(img)
# load label
target = unpacked[1]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target, self.classnum)
return img, target
def _get_label(self, index):
target = None
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(self.keys[index])
unpacked = pa.deserialize(lz4framed.decompress(byteflow))
target = unpacked[1]
return target
def __len__(self):
return self.length
def get_targets(self):
targets = []
for idx in range(self.length):
target = self._get_label(idx)
targets.append(target)
return np.asarray(targets)
class LMDBDataLoader(DataLoader):
def __init__(self, config, lmdb_path, train=True, use_mask=False, meta_train=False):
transform = transforms.Compose(
[
transforms.Resize((112, 112)),
transforms.RandomHorizontalFlip(0.5 if train else 0),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
target_transform = None
if config.attribute == "age":
target_transform = self.transform_ages_to_one_hot_ordinal
self._dataset = LMDB(lmdb_path, transform, target_transform, use_mask)
if meta_train:
batch_size = len(self._dataset)
else:
batch_size = config.batch_size
super(LMDBDataLoader, self).__init__(
self._dataset,
batch_size=batch_size,
shuffle=train,
pin_memory=config.pin_memory,
num_workers=config.workers,
drop_last=train,
)
def class_num(self):
return self._dataset.classnum
def transform_ages_to_one_hot_ordinal(self, target, classes):
new_target = np.zeros(shape=classes)
new_target[:target] = 1
return new_target.astype("float32")
|
import functools
import re
from typing import Optional
from sqlalchemy.orm import Session
from itunesdb.web import models
from itunesdb.web import schemas
_genre_name_splitter = re.compile("(.+): ([^:]+)$")
##########
# Genres #
##########
@functools.singledispatch
def get_genre(genre_id: int, db: Session) -> models.Genre:
return db.query(models.Genre).filter(models.Genre.id == genre_id).first()
@get_genre.register
def _(full_name: str, db: Session) -> models.Genre:
return db.query(models.Genre).filter(models.Genre.full_name == full_name).first()
def get_genres(root_genres_only: bool, db: Session) -> list[models.Genre]:
if root_genres_only:
# noinspection PyComparisonWithNone,PyPep8
return db.query(models.Genre).filter(models.Genre.parent_id == None).all()
return db.query(models.Genre).all()
def create_genre(genre: schemas.GenreCreate, db: Session) -> models.Genre:
if not isinstance(genre, schemas.GenreCreateInternal):
genre = schemas.GenreCreateInternal(full_name=genre.full_name)
if (match := _genre_name_splitter.match(genre.full_name)) is not None:
genre.name = match.group(2)
parent_genre_full_name = match.group(1)
if genre.parent_id is None:
# noinspection PyTypeChecker
parent_genre = get_genre(parent_genre_full_name, db)
if parent_genre is None:
raise ValueError(
f"parent genre {parent_genre_full_name} does not exist!"
)
genre.parent_id = parent_genre.id
elif genre.parent_id is not None:
raise ValueError("parent_id specified on root genre!")
else:
genre.name = genre.full_name
db_genre = models.Genre(**genre.dict())
db.add(db_genre)
db.commit()
db.refresh(db_genre)
return db_genre
def create_genre_recursively(genre: schemas.GenreCreate, db: Session) -> models.Genre:
if not isinstance(genre, schemas.GenreCreateInternal):
genre = schemas.GenreCreateInternal(full_name=genre.full_name)
# noinspection PyTypeChecker
if (db_genre := get_genre(genre.full_name, db)) is not None:
return db_genre
if (match := _genre_name_splitter.match(genre.full_name)) is not None:
genre.name = match.group(2)
parent_full_name = match.group(1)
parent_genre = schemas.GenreCreate(full_name=parent_full_name)
db_parent_genre = create_genre_recursively(parent_genre, db)
genre.parent_id = db_parent_genre.id
else:
genre.parent_id = None
genre.name = genre.full_name
return create_genre(genre, db)
##########
# Albums #
##########
def create_album(album: schemas.AlbumCreate, db: Session) -> models.Album:
db_album = models.Album(**album.dict())
db.add(db_album)
db.commit()
db.refresh(db_album)
return db_album
def get_albums(genre_id: Optional[int], db: Session) -> list[models.Album]:
if genre_id is not None:
return db.query(models.Album).filter(models.Album.genre_id == genre_id).all()
return db.query(models.Album).all()
def get_album(album_id: int, db: Session) -> models.Album:
return db.query(models.Album).filter(models.Album.id == album_id).first()
##########
# Tracks #
##########
def create_track(track: schemas.TrackCreate, db: Session) -> models.Track:
db_track = models.Track(**track.dict())
db.add(db_track)
db.commit()
db.refresh(db_track)
return db_track
def get_tracks(album_id: int, db: Session) -> list[models.Track]:
return db.query(models.Track).filter(models.Track.album_id == album_id).all()
|
def controllo_input(n):
if len(n) < 4:
raise TypeError("Errore: il numero deve avere minimo 4 cifre")
def ordina_crescente(n):
return "".join(sorted(n))
def ordina_decrescente(n):
return "".join(reversed(sorted(n)))
def costante_kaprekar(n):
iterazioni_max_kaprekar = 7
for i in range(iterazioni_max_kaprekar):
kaprekar = int(ordina_decrescente(str(n))) - int(ordina_crescente(str(n)))
print("Iterazioni:", i+1, ">>", int(ordina_decrescente(str(n))), "-", int(ordina_crescente(str(n))), "=", kaprekar)
n = kaprekar
if n == 6174 or n == 0:
print("Numero di iterazioni:", i+1)
break
def main():
print("Costante di Kaprekar\n")
n = str(input(">> "))
controllo_input(n)
costante_kaprekar(n)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
""" VGG Obstructed face mask plugin
Architecture and Pre-Trained Model based on...
On Face Segmentation, Face Swapping, and Face Perception
https://arxiv.org/abs/1704.06729
Source Implementation...
https://github.com/YuvalNirkin/face_segmentation
Model file sourced from...
https://github.com/YuvalNirkin/face_segmentation/releases/download/1.0/face_seg_fcn8s.zip
Caffe model re-implemented in Keras by Kyle Vrooman
"""
import numpy as np
from lib.model.session import KSession
from ._base import Masker, logger
class Mask(Masker):
""" Neural network to process face image into a segmentation mask of the face """
def __init__(self, **kwargs):
git_model_id = 5
model_filename = "Nirkin_500_softmax_v1.h5"
super().__init__(git_model_id=git_model_id, model_filename=model_filename, **kwargs)
self.name = "VGG Obstructed"
self.input_size = 500
self.vram = 3936
self.vram_warnings = 1088 # at BS 1. OOMs at higher batch sizes
self.vram_per_batch = 304
self.batchsize = self.config["batch-size"]
def init_model(self):
self.model = KSession(self.name, self.model_path,
model_kwargs=dict(), allow_growth=self.config["allow_growth"])
self.model.load_model()
self.model.append_softmax_activation(layer_index=-1)
placeholder = np.zeros((self.batchsize, self.input_size, self.input_size, 3),
dtype="float32")
self.model.predict(placeholder)
def process_input(self, batch):
""" Compile the detected faces for prediction """
input_ = [face.feed_face[..., :3] for face in batch["detected_faces"]]
batch["feed"] = input_ - np.mean(input_, axis=(1, 2))[:, None, None, :]
logger.trace("feed shape: %s", batch["feed"].shape)
return batch
def predict(self, batch):
""" Run model to get predictions """
predictions = self.model.predict(batch["feed"])
batch["prediction"] = predictions[..., 0] * -1.0 + 1.0
return batch
def process_output(self, batch):
""" Compile found faces for output """
return batch
|
from investing_algorithm_framework.core.mixins import BinanceDataProviderMixin
from investing_algorithm_framework.core.data_providers.data_provider import \
DataProvider
class BinanceDataProvider(BinanceDataProviderMixin, DataProvider):
pass
|
import re
import typing
from pathlib import Path
initialNumberRe = re.compile("^(\\d+)-(.+)$")
def parsePatchName(s: str) -> typing.Tuple[int, str]:
res = initialNumberRe.match(s)
if res:
return int(res.group(1)), res.group(2)
def getPatchFilesInADir(dirP: Path):
for el in dirP.glob("*.patch"):
if el.is_file():
yield el
|
"""Xiaomi mija weather sensor device."""
import logging
from zigpy import quirks
from zigpy.profiles import zha
from zigpy.quirks.xiaomi import TemperatureHumiditySensor
from zigpy.zcl.clusters.general import (
AnalogInput, Groups, Identify, MultistateInput, Ota, Scenes)
from zigpy.zcl.clusters.measurement import (
RelativeHumidity, TemperatureMeasurement)
from zhaquirks.xiaomi import (
BasicCluster, PowerConfigurationCluster, XiaomiCustomDevice)
TEMPERATURE_HUMIDITY_DEVICE_TYPE = 0x5F01
TEMPERATURE_HUMIDITY_DEVICE_TYPE2 = 0x5F02
TEMPERATURE_HUMIDITY_DEVICE_TYPE3 = 0x5F03
XIAOMI_CLUSTER_ID = 0xFFFF
_LOGGER = logging.getLogger(__name__)
# remove the zigpy version of this device handler
if TemperatureHumiditySensor in quirks._DEVICE_REGISTRY:
quirks._DEVICE_REGISTRY.remove(TemperatureHumiditySensor)
class Weather(XiaomiCustomDevice):
"""Xiaomi mija weather sensor device."""
signature = {
# <SimpleDescriptor endpoint=1 profile=260 device_type=24321
# device_version=1
# input_clusters=[0, 3, 25, 65535, 18]
# output_clusters=[0, 4, 3, 5, 25, 65535, 18]>
1: {
'manufacturer': 'LUMI',
'model': 'lumi.sensor_ht',
'profile_id': zha.PROFILE_ID,
'device_type': TEMPERATURE_HUMIDITY_DEVICE_TYPE,
'input_clusters': [
BasicCluster.cluster_id,
Identify.cluster_id,
XIAOMI_CLUSTER_ID,
Ota.cluster_id,
MultistateInput.cluster_id
],
'output_clusters': [
BasicCluster.cluster_id,
Groups.cluster_id,
Identify.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInput.cluster_id
],
},
# <SimpleDescriptor endpoint=2 profile=260 device_type=24322
# device_version=1
# input_clusters=[3, 18]
# output_clusters=[4, 3, 5, 18]>
2: {
'profile_id': zha.PROFILE_ID,
'device_type': TEMPERATURE_HUMIDITY_DEVICE_TYPE2,
'input_clusters': [
Identify.cluster_id,
MultistateInput.cluster_id,
],
'output_clusters': [
Groups.cluster_id,
Identify.cluster_id,
Scenes.cluster_id,
MultistateInput.cluster_id
],
},
# <SimpleDescriptor endpoint=3 profile=260 device_type=24323
# device_version=1
# input_clusters=[3, 12]
# output_clusters=[4, 3, 5, 12]>
3: {
'profile_id': zha.PROFILE_ID,
'device_type': TEMPERATURE_HUMIDITY_DEVICE_TYPE3,
'input_clusters': [
Identify.cluster_id,
AnalogInput.cluster_id
],
'output_clusters': [
Groups.cluster_id,
Identify.cluster_id,
Scenes.cluster_id,
AnalogInput.cluster_id
],
},
}
replacement = {
'endpoints': {
1: {
'device_type': TEMPERATURE_HUMIDITY_DEVICE_TYPE2,
'input_clusters': [
BasicCluster,
PowerConfigurationCluster,
Identify.cluster_id,
TemperatureMeasurement.cluster_id,
RelativeHumidity.cluster_id,
XIAOMI_CLUSTER_ID,
Ota.cluster_id
],
'output_clusters': [
BasicCluster.cluster_id,
Groups.cluster_id,
Identify.cluster_id,
Scenes.cluster_id,
Ota.cluster_id,
XIAOMI_CLUSTER_ID,
MultistateInput.cluster_id
],
},
2: {
'device_type': TEMPERATURE_HUMIDITY_DEVICE_TYPE2,
'input_clusters': [
Identify.cluster_id
],
'output_clusters': [
Groups.cluster_id,
Identify.cluster_id,
Scenes.cluster_id,
MultistateInput.cluster_id
],
},
# <SimpleDescriptor endpoint=3 profile=260 device_type=24323
# device_version=1
# input_clusters=[3, 12]
# output_clusters=[4, 3, 5, 12]>
3: {
'device_type': TEMPERATURE_HUMIDITY_DEVICE_TYPE3,
'input_clusters': [
Identify.cluster_id
],
'output_clusters': [
Groups.cluster_id,
Identify.cluster_id,
Scenes.cluster_id,
AnalogInput.cluster_id
],
},
},
}
|
from abc import ABCMeta, abstractmethod
import six
from .iter_dispatch import iter_
from .base import BaseItertool
@six.add_metaclass(ABCMeta)
class BaseFilter(BaseItertool):
def __init__(self, pred, seq):
self._predicate = pred
self._iter = iter_(seq)
@abstractmethod
def __next__(self):
pass
class ifilter(BaseFilter):
"""ifilter(function or None, iterable) --> ifilter object
Return an iterator yielding those items of iterable for which
function(item) is true. If function is None, return the items that are
true.
"""
def _keep(self, value):
predicate = bool if self._predicate is None else self._predicate
return predicate(value)
def __next__(self):
val = next(self._iter)
while not self._keep(val):
val = next(self._iter)
return val
class ifilterfalse(ifilter):
"""ifilterfalse(function or None, sequence) --> ifilterfalse object
Return those items of sequence for which function(item) is false.
If function is None, return the items that are false.
"""
def _keep(self, value):
return not super(ifilterfalse, self)._keep(value)
class takewhile(BaseFilter):
"""takewhile(predicate, iterable) --> takewhile object
Return successive entries from an iterable as long as the
predicate evaluates to true for each entry.
"""
def __next__(self):
value = next(self._iter)
if not self._predicate(value):
raise StopIteration
return value
class dropwhile(takewhile):
"""dropwhile(predicate, iterable) --> dropwhile object
Drop items from the iterable while predicate(item) is true.
Afterwards, return every element until the iterable is exhausted.
"""
def __next__(self):
value = next(self._iter)
while not getattr(self, '_started', False) and self._predicate(value):
value = next(self._iter)
self._started = True
return value
|
from pseas.instance_selection.instance_selection import InstanceSelection
from typing import Tuple, List, Optional
import numpy as np
class DiscriminationBased(InstanceSelection):
"""
Discrimination based method based on dominance of algorithms.
Parameter:
----------
- rho: the domination ratio score = #{ time(algo)/time(best algo) <= rho } / expected_time
"""
def __init__(self, rho: float) -> None:
super().__init__()
self._rho : float = rho
def ready(self, filled_perf: np.ndarray, perf_mask: np.ndarray, **kwargs) -> None:
self._scores = np.zeros((filled_perf.shape[0]))
for instance in range(self._scores.shape[0]):
if np.any(perf_mask[instance]):
times = filled_perf[instance, perf_mask[instance]]
loc = np.median(times)
self._scores[instance] = np.count_nonzero(times > np.repeat(self._rho * np.min(times), times.shape[0])) / loc
else:
self._scores[instance] = -1
def feed(self, state: Tuple[List[Optional[float]], List[float]]) -> None:
not_run_mask: np.ndarray = np.array([time is None for time in state[0]])
others = np.ones_like(self._scores) * -100
others[not_run_mask] = self._scores[not_run_mask]
self._next: int = np.argmax(others)
def choose_instance(self) -> int:
return self._next
def name(self) -> str:
return f"{self._rho:.2f}-discrimination-based"
def clone(self) -> 'DiscriminationBased':
return DiscriminationBased(self._rho)
|
# Generated by Django 3.1.5 on 2021-01-12 11:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WebApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UniversityDetail',
fields=[
('uni_id', models.IntegerField(primary_key=True, serialize=False)),
('uni_name', models.CharField(max_length=250)),
('uni_city', models.CharField(max_length=100)),
('uni_type', models.CharField(max_length=100)),
('uni_rank', models.IntegerField()),
('uni_studentNumber', models.IntegerField()),
('uni_intStudents', models.IntegerField()),
],
),
]
|
# For convenience, import collection and numpy
# into the "pyradiomics" namespace
import collections # noqa: F401
import inspect
import logging
import os
import pkgutil
import sys
import tempfile
import numpy # noqa: F401
from six.moves import urllib
from . import imageoperations
def deprecated(func):
"""
Decorator function to mark functions as deprecated. This is used to ensure deprecated feature functions are not
added to the enabled features list when enabling 'all' features.
"""
func._is_deprecated = True
return func
def setVerbosity(level):
"""
Change the amount of information PyRadiomics should print out during extraction. The lower the level, the more
information is printed to the output (stderr).
Using the ``level`` (Python defined logging levels) argument, the following levels are possible:
- 60: Quiet mode, no messages are printed to the stderr
- 50: Only log messages of level "CRITICAL" are printed
- 40: Log messages of level "ERROR" and up are printed
- 30: Log messages of level "WARNING" and up are printed
- 20: Log messages of level "INFO" and up are printed
- 10: Log messages of level "DEBUG" and up are printed (i.e. all log messages)
By default, the radiomics logger is set to level "INFO" and the stderr handler to level "WARNING". Therefore a log
storing the extraction log messages from level "INFO" and up can be easily set up by adding an appropriate handler to
the radiomics logger, while the output to stderr will still only contain warnings and errors.
.. note::
This function assumes the handler added to the radiomics logger at initialization of the toolbox is not removed from
the logger handlers and therefore remains the first handler.
.. note::
This does not affect the level of the logger itself (e.g. if verbosity level = 3, log messages with DEBUG level can
still be stored in a log file if an appropriate handler is added to the logger and the logging level of the logger
has been set to the correct level. *Exception: In case the verbosity is set to DEBUG, the level of the logger is
also lowered to DEBUG. If the verbosity level is then raised again, the logger level will remain DEBUG.*
"""
global logger, handler
if level < 10: # Lowest level: DEBUG
level = 10
if level > 60: # Highest level = 50 (CRITICAL), level 60 results in a 'quiet' mode
level = 60
handler.setLevel(level)
if handler.level < logger.level: # reduce level of logger if necessary
logger.setLevel(level)
def getFeatureClasses():
"""
Iterates over all modules of the radiomics package using pkgutil and subsequently imports those modules.
Return a dictionary of all modules containing featureClasses, with modulename as key, abstract
class object of the featureClass as value. Assumes only one featureClass per module
This is achieved by inspect.getmembers. Modules are added if it contains a member that is a class,
with name starting with 'Radiomics' and is inherited from :py:class:`radiomics.base.RadiomicsFeaturesBase`.
This iteration only runs once (at initialization of toolbox), subsequent calls return the dictionary created by the
first call.
"""
global _featureClasses
if _featureClasses is None: # On first call, enumerate possible feature classes and import PyRadiomics modules
_featureClasses = {}
for _, mod, _ in pkgutil.iter_modules([os.path.dirname(__file__)]):
if str(mod).startswith('_'): # Skip loading of 'private' classes, these don't contain feature classes
continue
__import__('radiomics.' + mod)
module = sys.modules['radiomics.' + mod]
attributes = inspect.getmembers(module, inspect.isclass)
for a in attributes:
if a[0].startswith('Radiomics'):
for parentClass in inspect.getmro(a[1])[1:]: # only include classes that inherit from RadiomicsFeaturesBase
if parentClass.__name__ == 'RadiomicsFeaturesBase':
_featureClasses[mod] = a[1]
break
return _featureClasses
def getImageTypes():
"""
Returns a list of possible image types (i.e. the possible filters and the "Original", unfiltered image type). This
function finds the image types dynamically by matching the signature ("get<imageType>Image") against functions defined
in :ref:`imageoperations <radiomics-imageoperations-label>`. Returns a list containing available image type names
(<imageType> part of the corresponding function name).
This iteration only occurs once, at initialization of the toolbox. Found results are stored and returned on subsequent
calls.
"""
global _imageTypes
if _imageTypes is None: # On first cal, enumerate possible input image types (original and any filters)
_imageTypes = [member[3:-5] for member in dir(imageoperations)
if member.startswith('get') and member.endswith("Image")]
return _imageTypes
def getTestCase(testCase, dataDirectory=None):
"""
This function provides an image and mask for testing PyRadiomics. One of seven test cases can be selected:
- brain1
- brain2
- breast1
- lung1
- lung2
- test_wavelet_64x64x64
- test_wavelet_37x37x37
Checks if the test case (consisting of an image and mask file with signature <testCase>_image.nrrd and
<testCase>_label.nrrd, respectively) is available in the ``dataDirectory``. If not available, the testCase is
downloaded from the GitHub repository and stored in the ``dataDirectory``. Also creates the ``dataDirectory`` if
necessary.
If no ``dataDirectory`` has been specified, PyRadiomics will use a temporary directory: <TEMPDIR>/pyradiomics/data.
If the test case has been found or downloaded successfully, this function returns a tuple of two strings:
``(path/to/image.nrrd, path/to/mask.nrrd)``. In case of an error ``(None, None)`` is returned.
.. note::
To get the testcase with the corresponding single-slice label, append "_2D" to the testCase.
"""
global logger, testCases
label2D = False
testCase = testCase.lower()
if testCase.endswith('_2d'):
label2D = True
testCase = testCase[:-3]
if testCase not in testCases:
raise ValueError('Testcase "%s" not recognized!' % testCase)
logger.debug('Getting test case %s', testCase)
if dataDirectory is None:
dataDirectory = os.path.join(tempfile.gettempdir(), 'pyradiomics', 'data')
logger.debug('No data directory specified, using temporary directory "%s"', dataDirectory)
im_name = '%s_image.nrrd' % testCase
ma_name = '%s_label%s.nrrd' % (testCase, '_2D' if label2D else '')
def get_or_download(fname):
target = os.path.join(dataDirectory, fname)
if os.path.exists(target):
logger.debug('File %s already downloaded', fname)
return target
# Test case file not found, so try to download it
logger.info("Test case file %s not available locally, downloading from github...", fname)
# First check if the folder is available
if not os.path.isdir(dataDirectory):
logger.debug('Creating data directory: %s', dataDirectory)
os.makedirs(dataDirectory)
# Download the test case files (image and label)
url = r'https://github.com/Radiomics/pyradiomics/releases/download/v1.0/%s' % fname
logger.debug('Retrieving file at %s', url)
_, headers = urllib.request.urlretrieve(url, target)
if headers.get('status', '') == '404 Not Found':
raise ValueError('Unable to download image file at %s!', url)
logger.info('File %s downloaded', fname)
return target
logger.debug('Getting Image file')
imageFile = get_or_download(im_name)
logger.debug('Getting Mask file')
maskFile = get_or_download(ma_name)
return imageFile, maskFile
def getParameterValidationFiles():
"""
Returns file locations for the parameter schema and custom validation functions, which are needed when validating
a parameter file using ``PyKwalify.core``.
This functions returns a tuple with the file location of the schema as first and python script with custom validation
functions as second element.
"""
dataDir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'schemas'))
schemaFile = os.path.join(dataDir, 'paramSchema.yaml')
schemaFuncs = os.path.join(dataDir, 'schemaFuncs.py')
return schemaFile, schemaFuncs
class _DummyProgressReporter(object):
"""
This class represents the dummy Progress reporter and is used for where progress reporting is implemented, but not
enabled (when the progressReporter is not set or verbosity level > INFO).
PyRadiomics expects that the _getProgressReporter function returns an object that takes an iterable and 'desc' keyword
argument at initialization. Furthermore, it should be iterable, where it iterates over the iterable passed at
initialization and it should be used in a 'with' statement.
In this class, the __iter__ function redirects to the __iter__ function of the iterable passed at initialization.
The __enter__ and __exit__ functions enable usage in a 'with' statement
"""
def __init__(self, iterable=None, desc='', total=None):
self.desc = desc # A description is not required, but is provided by PyRadiomics
self.iterable = iterable # Iterable is required
def __iter__(self):
return self.iterable.__iter__() # Just iterate over the iterable passed at initialization
def __enter__(self):
return self # The __enter__ function should return itself
def __exit__(self, exc_type, exc_value, tb):
pass # Nothing needs to be closed or handled, so just specify 'pass'
def update(self, n=1):
pass # Nothing needs to be updated, so just specify 'pass'
def getProgressReporter(*args, **kwargs):
"""
This function returns an instance of the progressReporter, if it is set and the logging level is defined at level INFO
or DEBUG. In all other cases a dummy progress reporter is returned.
To enable progress reporting, the progressReporter variable should be set to a class object (NOT an instance), which
fits the following signature:
1. Accepts an iterable as the first positional argument and a keyword argument ('desc') specifying a label to display
2. Can be used in a 'with' statement (i.e. exposes a __enter__ and __exit__ function)
3. Is iterable (i.e. at least specifies an __iter__ function, which iterates over the iterable passed at
initialization).
It is also possible to create your own progress reporter. To achieve this, additionally specify a function `__next__`,
and have the `__iter__` function return `self`. The `__next__` function takes no arguments and returns a call to the
`__next__` function of the iterable (i.e. `return self.iterable.__next__()`). Any prints/progress reporting calls can
then be inserted in this function prior to the return statement.
"""
global handler, progressReporter
if progressReporter is not None and logging.NOTSET < handler.level <= logging.INFO:
return progressReporter(*args, **kwargs)
else:
return _DummyProgressReporter(*args, **kwargs)
progressReporter = None
# 1. Set up logging
debugging = True
logger = logging.getLogger(__name__) # 'radiomics'
logger.setLevel(logging.INFO) # Set default level of logger to INFO to reflect most common setting for a log file
# Set up a handler to print out to stderr (controlled by setVerbosity())
handler = logging.StreamHandler()
# formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s", "%Y-%m-%d %H:%M") # Alternative format
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
# force level=WARNING for stderr handler, in case logging default is set differently (issue 102)
setVerbosity(logging.WARNING)
# 2. Define the available test cases
testCases = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2', 'test_wavelet_64x64x64', 'test_wavelet_37x37x37')
# 3. Attempt to load and enable the C extensions.
cMatrices = None # set cMatrices to None to prevent an import error in the feature classes.
cShape = None
try:
from radiomics import _cmatrices as cMatrices # noqa: F401
from radiomics import _cshape as cShape # noqa: F401
except ImportError as e:
if os.path.isdir(os.path.join(os.path.dirname(__file__), '..', 'data')):
# It looks like PyRadiomics is run from source (in which case "setup.py develop" must have been run)
logger.critical('Apparently running from root, but unable to load C extensions... '
'Did you run "python setup.py build_ext --inplace"?')
raise Exception('Apparently running from root, but unable to load C extensions... '
'Did you run "python setup.py build_ext --inplace"?')
else:
logger.critical('Error loading C extensions', exc_info=True)
raise e
# 4. Enumerate implemented feature classes and input image types available in PyRadiomics
_featureClasses = None
_imageTypes = None
getFeatureClasses()
getImageTypes()
# 5. Set the version using the versioneer scripts
from ._version import get_versions # noqa: I202
__version__ = get_versions()['version']
del get_versions
|
"""
Arquivos com funções utilizadas na Criptogafia de César
Autor: Thiago Souza
"""
# Função para Criptografar a mensagem
def criptografar(mensagem, chave):
# Constrói alfabeto extraído da tabela ASCII (Somente letras minúsculas)
alfabeto = []
for i in range(97, 123): # Intervalo de letras minúsculas na tabela.
alfabeto.append(chr(i))
tam_alfabeto = len(alfabeto)
cifrada = ""
# Laço para ler a mensagem
for caracter in mensagem:
nova_letra = caracter
# Verifica se o caracter pertence ao alfabeto
if caracter.lower() in alfabeto:
indice = alfabeto.index(caracter.lower())
nova_letra = alfabeto[(indice + chave)%tam_alfabeto]
if caracter.isupper():
nova_letra = nova_letra.upper()
cifrada = cifrada + nova_letra
return cifrada
# Função para descriptografar a mensagem
def descriptografar(mensagem, chave):
return criptografar(mensagem, -chave)
# Função para escrever em um arquivo a mensagem cifrada
def cifrardoc(fonte, destino, chave):
# Obtêm a mensagem de um arquivo
arquivo = open(fonte, 'r')
msg = arquivo.read()
arquivo.close()
cifrada = criptografar(msg, chave)
# Escreve mensagem criptografada em um arquivo
arquivo = open(destino, "w")
arquivo.write(cifrada)
arquivo.close()
print("Arquivo de mensagem cifrada executada com sucesso!")
# Função para escrever em um arquivo a mensagem decifrada
def decifrardoc(fonte, destino, chave):
# Obtêm a mensagem de um arquivo
arquivo = open(fonte, 'r')
msg = arquivo.read()
arquivo.close()
decifrada = descriptografar(msg, chave)
# Escreve mensagem criptografada em um arquivo
arquivo = open(destino, "w")
arquivo.write(decifrada)
arquivo.close()
print("Arquivo de mensagem cifrada executada com sucesso!")
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from dpp_nets.my_torch.linalg import custom_decomp, custom_inverse
from dpp_nets.my_torch.DPP import DPP, AllInOne
from dpp_nets.my_torch.utilities import compute_baseline
from itertools import accumulate
class NetBaseline(nn.Module):
def __init__(self, embd_dim, hidden_dim, target_dim):
super(NetBaseline, self).__init__()
self.embd_dim = embd_dim
self.hidden_dim = hidden_dim
self.target_dim = target_dim
# Prediction
self.pred_layer1 = nn.Linear(embd_dim ,hidden_dim)
self.pred_layer2 = nn.Linear(hidden_dim, hidden_dim)
self.pred_layer3 = nn.Linear(hidden_dim, target_dim)
self.pred_net = nn.Sequential(self.pred_layer1, nn.ReLU(), self.pred_layer2, nn.ReLU(), self.pred_layer3)
def forward(self, words):
batch_size, max_set_size, embd_dim = words.size()
word_sums = words.sum(1)
lengths = Variable(words.data.sum(2, keepdim=True).abs().sign().sum(1).expand_as(word_sums))
word_means = word_sums / lengths
pred = self.pred_net(word_means)
return pred
class SetNetBaseline(nn.Module):
"""
Works with different set sizes, i.e. it does masking!
"""
def __init__(self, embd_dim, hidden_dim, enc_dim, target_dim):
super(SetNetBaseline, self).__init__()
self.embd_dim = embd_dim
self.hidden_dim = hidden_dim
self.enc_dim = enc_dim
self.target_dim = target_dim
# Encodes each word into a different vector
self.enc_layer1 = nn.Linear(embd_dim, hidden_dim)
self.enc_layer2 = nn.Linear(hidden_dim, hidden_dim)
self.enc_layer3 = nn.Linear(hidden_dim, enc_dim)
self.enc_net = nn.Sequential(self.enc_layer1, nn.ReLU(), self.enc_layer2, nn.ReLU(), self.enc_layer3)
# Uses the sum of the encoded vectors to make a final prediction
self.pred_layer1 = nn.Linear(enc_dim ,hidden_dim)
self.pred_layer2 = nn.Linear(hidden_dim, hidden_dim)
self.pred_layer3 = nn.Linear(hidden_dim, target_dim)
self.pred_net = nn.Sequential(self.pred_layer1, nn.ReLU(), self.pred_layer2, nn.ReLU(), self.pred_layer3)
def forward(self, words):
"""
words is a 3D tensor with dimension: batch_size x max_set_size x embd_dim
"""
embd_dim = self.embd_dim
hidden_dim = self.hidden_dim
enc_dim = self.enc_dim
target_dim = self.target_dim
batch_size, max_set_size, embd_dim = words.size()
# Unpacking to send through encoder network
# Register indices of individual instances in batch for reconstruction
lengths = words.data.sum(2, keepdim=True).abs().sign().sum(1, keepdim=True)
s_ix = list(lengths.squeeze().cumsum(0).long() - lengths.squeeze().long())
e_ix = list(lengths.squeeze().cumsum(0).long())
# Filter out zero words
mask = words.data.sum(2, keepdim=True).abs().sign().expand_as(words).byte()
words = words.masked_select(Variable(mask)).view(-1, embd_dim)
# Send through encoder network
enc_words = self.enc_net(words)
# Compilation of encoded words for each instance in sample
# Produce summed representation (code) for each instance in batch using encoded words:
codes = []
for i, (s, e) in enumerate(zip(s_ix, e_ix)):
code = enc_words[s:e].mean(0, keepdim=True)
codes.append(code)
codes = torch.stack(codes).squeeze(1)
assert batch_size == codes.size(0)
assert enc_dim == codes.size(1)
# Produce predictions using codes
pred = self.pred_net(codes)
return pred
class AttentionBaseline(nn.Module):
"""
Works with different set sizes, i.e. it does masking!
"""
def __init__(self, embd_dim, hidden_dim, target_dim):
super(AttentionBaseline, self).__init__()
self.embd_dim = embd_dim
self.hidden_dim = hidden_dim
self.target_dim = target_dim
# Attention Network
self.attention_layer = nn.Sequential(nn.Linear(2 * embd_dim, hidden_dim), nn.Tanh())
self.v = nn.Parameter(torch.randn(hidden_dim, 1))
# Uses the sum of the encoded vectors to make a final prediction
self.pred_layer1 = nn.Linear(embd_dim ,hidden_dim)
self.pred_layer2 = nn.Linear(hidden_dim, hidden_dim)
self.pred_layer3 = nn.Linear(hidden_dim, target_dim)
self.pred_net = nn.Sequential(self.pred_layer1, nn.ReLU(), self.pred_layer2, nn.ReLU(), self.pred_layer3)
self.s_ix = []
self.e_ix = []
self.attention = []
def forward(self, words):
"""
words is a 3D tensor with dimension: batch_size x max_set_size x embd_dim
"""
embd_dim = self.embd_dim
hidden_dim = self.hidden_dim
target_dim = self.target_dim
batch_size, max_set_size, embd_dim = words.size()
# Create context
lengths = words.sum(2, keepdim=True).abs().sign().sum(1, keepdim=True)
context = (words.sum(1, keepdim=True) / lengths.expand_as(words.sum(1, keepdim=True))).expand_as(words)
# Filter out zero words
mask = words.data.sum(2, keepdim=True).abs().sign().expand_as(words).byte()
words = words.masked_select(Variable(mask)).view(-1, embd_dim)
context = context.masked_select(Variable(mask)).view(-1, embd_dim)
# Concatenate and compute attention
batch_x = torch.cat([words, context], dim=1)
attention_unnorm = self.attention_layer(batch_x).mm(self.v)
self.s_ix = list(lengths.squeeze().cumsum(0).long().data - lengths.squeeze().long().data)
self.e_ix = list(lengths.squeeze().cumsum(0).long().data)
# Apply attention
reps = []
for i, (s, e) in enumerate(zip(self.s_ix, self.e_ix)):
attention = (nn.Softmax()(attention_unnorm[s:e].t())).t()
self.attention.append(attention.data)
rep = (attention * words[s:e]).sum(0)
reps.append(rep)
weighted_words = torch.stack(reps)
assert weighted_words.size(0) == batch_size
pred = self.pred_net(weighted_words)
return pred
|
import json
import os
import copy
from models import DownloadSpec
DEFAULT_SOURCES_FILE_PATH = os.path.join(os.path.dirname(__file__), 'sources.json')
DEFAULT_ROUTING_FILE_PATH = os.path.join(os.path.dirname(__file__), 'agents_routing.json')
class SourcesManager:
def __init__(self, sources_file_path=DEFAULT_SOURCES_FILE_PATH, routing_file_path=DEFAULT_ROUTING_FILE_PATH):
with open(sources_file_path, 'r') as f:
self.__sources = json.load(f)
with open(routing_file_path, 'r') as f:
self.__routing = json.load(f)
self.__supported_sources = copy.deepcopy(self.__sources)
for source in self.__supported_sources.values():
for operating_system in source['operatingSystems'].values():
for arch in operating_system['architectures'].values():
del arch['agent']
def get_host_port_for_agent(self, agent_key):
agent = self.__routing[agent_key]
return agent['host'], agent['port']
def get_supported_sources(self):
return copy.deepcopy(self.__supported_sources)
def get_agent_key_for_spec(self, spec: DownloadSpec):
if spec.source not in self.__sources:
raise ValueError(f'Source "{spec.source}" is not supported')
operating_systems = self.__sources[spec.source]['operatingSystems']
if spec.os not in operating_systems:
raise ValueError(f'Operating system "{spec.os}" is not supported')
architectures = operating_systems[spec.os]['architectures']
if spec.architecture not in architectures:
raise ValueError(f'Architecture "{spec.architecture}" is not supported')
agent_key = architectures[spec.architecture]['agent']
return agent_key
|
#!/usr/bin/python
"""
Main Workspace Run Script
"""
import os
import sys
import logging
import coloredlogs
import json
import math
import glob
import yaml
import yamale
import scripts.functions as func
from copy import copy
from subprocess import run, call
### Enable logging
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
level=logging.INFO,
stream=sys.stdout,
)
log = logging.getLogger(__name__)
log.info("Starting...")
### Read YAML config file
#configs = list()
configs_list = dict()
#yaml_exts = ["yaml", "yml"]
config_path = str()
# Load config files with alternative extensions
#for ext in yaml_exts:
# path = f'/scripts/config.{ext}'
# if os.path.exists(path):
# configs.append(path)
# Check if multiple config files exist and load the user defined one or system/user overwritten one
if os.path.exists('/scripts/config.yaml'):
config_path = '/scripts/config.yaml'
# Validate file
schema = yamale.make_schema('/scripts/schema.yaml')
data = yamale.make_data(config_path)
valid_config = func.yaml_valid(schema, data, "INFO")
elif os.path.exists('/scripts/config.yml'):
config_path = '/scripts/config.yml'
# Validate file
schema = yamale.make_schema('/scripts/schema.yaml')
data = yamale.make_data(config_path)
valid_config = func.yaml_valid(schema, data, "INFO")
elif os.path.exists('/scripts/config.yml') and os.path.exists('/scripts/config.yaml'):
config_path = '/scripts/config.yml'
log.warning("both config.yaml and config.yml exists, using config.yml")
if os.path.exists('/scripts/config.yaml'): os.remove('/scripts/config.yaml')
# Validate file
schema = yamale.make_schema('/scripts/schema.yaml')
data = yamale.make_data(config_path)
valid_config = func.yaml_valid(schema, data, "INFO")
else:
log.debug("No yaml config files available to load")
# Load config as yaml object
if os.path.exists(config_path):
if valid_config:
log.info(f"Loading config file: '{config_path}'")
with open(config_path, "r") as f:
configs_list = yaml.load(f, Loader=yaml.FullLoader)
log.debug(configs_list)
else:
log.debug(f"Config does not exist: '{config_path}'")
### Read or set docker default envs
docker_env = {
'LOG_VERBOSITY': os.getenv("LOG_VERBOSITY", "INFO"),
'CONFIG_BACKUP_ENABLED': os.getenv("CONFIG_BACKUP_ENABLED", "true"),
'WORKSPACE_USER': os.getenv("WORKSPACE_AUTH_USER", "coder"),
'WORKSPACE_GROUP': os.getenv("WORKSPACE_AUTH_GROUP", "users"),
'WORKSPACE_USER_SHELL': os.getenv("WORKSPACE_USER_SHELL", "zsh"),
'WORKSPACE_USER_PASSWORD': os.getenv("WORKSPACE_AUTH_PASSWORD", "password"),
'RESOURCES_PATH': os.getenv("RESOURCES_PATH", "/resources"),
'WORKSPACE_HOME': os.getenv("WORKSPACE_HOME", "/workspace"),
'APPS_PATH': os.getenv("APPS_PATH", "/apps"),
'DATA_PATH': os.getenv("DATA_PATH", "/data"),
'PROXY_BASE_URL': os.getenv("PROXY_BASE_URL", "/"),
'ZSH_PROMPT': os.getenv("ZSH_PROMPT", "none"),
'ZSH_THEME': os.getenv("ZSH_THEME", "spaceship"),
'ZSH_PLUGINS': os.getenv("ZSH_PLUGINS", "all"),
'CONDA_ENV_PATH': os.getenv("CONDA_ENV_PATH", ""),
'CADDY_VIRTUAL_PORT': os.getenv("VIRTUAL_PORT", "80"),
'CADDY_VIRTUAL_HOST': os.getenv("VIRTUAL_HOST", ""),
'CADDY_VIRTUAL_BIND_NET': os.getenv("VIRTUAL_BIND_NET", "proxy"),
'CADDY_VIRTUAL_PROTO': os.getenv("VIRTUAL_PROTO", "http"),
'CADDY_VIRTUAL_BASE_URL': os.getenv("VIRTUAL_BASE_URL", "/"),
'CADDY_PROXY_ENCODINGS_GZIP': os.getenv("PROXY_ENCODINGS_GZIP", "true"),
'CADDY_PROXY_ENCODINGS_ZSTD': os.getenv("PROXY_ENCODINGS_ZSTD", "true"),
'CADDY_PROXY_TEMPLATES': os.getenv("PROXY_TEMPLATES", "true"),
'CADDY_LETSENCRYPT_EMAIL': os.getenv("LETSENCRYPT_EMAIL", "admin@example.com"),
'CADDY_LETSENCRYPT_ENDPOINT': os.getenv("LETSENCRYPT_ENDPOINT", "dev"),
'CADDY_HTTP_PORT': os.getenv("HTTP_PORT", "80"),
'CADDY_HTTPS_ENABLE': os.getenv("HTTPS_ENABLE", "true"),
'CADDY_HTTPS_PORT': os.getenv("HTTPS_PORT", "443"),
'CADDY_AUTO_HTTPS': os.getenv("AUTO_HTTPS", "true"),
'CADDY_WORKSPACE_SSL_ENABLED': os.getenv("WORKSPACE_SSL_ENABLED", "false"),
'FB_PORT': os.getenv("FB_PORT", "8055"),
'FB_BASE_URL': os.getenv("FB_BASE_URL", "/data"),
'FB_ROOT_DIR': os.getenv("FB_ROOT_DIR", "/workspace"),
'VSCODE_BIND_ADDR': os.getenv("VSCODE_BIND_ADDR", "0.0.0.0:8300"),
'VSCODE_BASE_URL': os.getenv("VSCODE_BASE_URL", "/code"),
'APP_BIND_ADDR': os.getenv("APP_BIND_ADDR", "0.0.0.0:8080"),
'APP_BASE_URL': os.getenv("APP_BASE_URL", "/app"),
'APP_ROOT_DIR': os.getenv("APP_ROOT_DIR", "/apps/app"),
'APP_USER': os.getenv("APP_USER", "admin"),
'APP_PASSWORD': os.getenv("APP_PASSWORD", "password")
}
### Set verbosity level. log.info occasinally throws EOF errors with high verbosity
if docker_env.get("LOG_VERBOSITY") in [
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"
]:
verbosity = docker_env.get("LOG_VERBOSITY")
else:
log.info("invalid verbosity: '{}".format(docker_env.get("LOG_VERBOSITY")))
verbosity = "INFO"
### opts_json cli options
opts = {
"verbosity": verbosity
}
log.setLevel(verbosity)
# Setup colored console logs
coloredlogs.install(fmt='%(asctime)s [%(levelname)s] %(message)s', level=verbosity, logger=log)
### Reconcile docker env var with corresponding config setting
system_configs = dict()
# copy and save user configs
users_config_copy = copy(configs_list["users"])
# if system not configured in yaml, then set to docker envs
if configs_list.get("system") == None:
log.info(f"System not defined in yaml config file. Importing settings from docker env.")
for env, value in docker_env.items():
log.debug(f"setting: '{env.lower()}' --> '{value}'")
system_configs[env.lower()] = value
# copy into system key
configs_list["system"] = copy(system_configs)
# copy users back
configs_list["users"] = copy(users_config_copy)
# reconcile if env appears in both
else:
for env, value in docker_env.items():
for config, setting in configs_list.get("system").items():
if config == env.lower():
if setting == value:
log.debug(f"yaml config same as docker environment value: '{config}' --> '{setting}'")
system_configs[config] = value
else:
log.warning(f"using config setting instead of docker environment value - {config}: '{value}'--> '{setting}'")
system_configs[config] = setting
if not env.lower() in list(configs_list.get("system").keys()):
log.debug(f"not set in yaml config, setting: '{env.lower()}' --> '{value}'")
system_configs[env.lower()] = value
# copy into system key
configs_list["system"] = copy(system_configs)
# copy users back
configs_list["users"] = copy(users_config_copy)
### Reset verbosity level according to yaml file. log.info occasinally throws EOF errors with high verbosity
if configs_list.get("system").get("log_verbosity") in [
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"
]:
verbosity = configs_list.get("system").get("log_verbosity")
else:
log.info("invalid verbosity: '{}".format(configs_list.get("system").get("log_verbosity")))
verbosity = "INFO"
### opts_json cli options
opts = {
"verbosity": verbosity
}
log.setLevel(verbosity)
default_user = [{
'name': configs_list.get("system").get("workspace_user"),
'group': configs_list.get("system").get("workspace_group"),
'uid': "1000",
'gid': "100",
'shell': configs_list.get("system").get("workspace_user_shell"),
'password': configs_list.get("system").get("workspace_user_password"),
'directories': [
{
'name': 'home',
'path': os.path.join("/home", configs_list.get("system").get("workspace_user")),
'mode': '755'
},
{
'name': 'resources',
'path': configs_list.get("system").get("resources_path"),
'mode': '755'
},
{
'name': 'workspace',
'path': configs_list.get("system").get("workspace_home"),
'mode': '755'
},
{
'name': 'data',
'path': configs_list.get("system").get("data_path"),
'mode': '755'
},
{
'name': 'apps',
'path': configs_list.get("system").get("apps_path"),
'mode': '755'
},
{
'name': 'app',
'path': configs_list.get("system").get("app_root_dir"),
'mode': '755'
}],
'backup_paths': [
f'/home/{configs_list.get("system").get("workspace_user")}/.config',
f'/home/{configs_list.get("system").get("workspace_user")}/.ssh',
f'/home/{configs_list.get("system").get("workspace_user")}/.zshrc',
f'/home/{configs_list.get("system").get("workspace_user")}/.bashrc',
f'/home/{configs_list.get("system").get("workspace_user")}/.profile',
f'/home/{configs_list.get("system").get("workspace_user")}/.condarc',
f'/home/{configs_list.get("system").get("workspace_user")}/.oh-my-zsh',
f'/home/{configs_list.get("system").get("workspace_user")}/.gitconfig',
f'/home/{configs_list.get("system").get("workspace_user")}/filebrowser.db',
f'/home/{configs_list.get("system").get("workspace_user")}/.local',
f'/home/{configs_list.get("system").get("workspace_user")}/.conda',
f'/home/{configs_list.get("system").get("workspace_user")}/.vscode',
f'/home/{configs_list.get("system").get("workspace_user")}/.jupyter'
],
'conda': {
'env': ''
},
'zsh': {
'set_prompt': configs_list.get("system").get("zsh_prompt"),
'set_theme': configs_list.get("system").get("zsh_theme"),
'set_plugins': configs_list.get("system").get("zsh_plugins"),
'prompt': [
'https://github.com/sindresorhus/pure'
],
'theme': [
'https://github.com/romkatv/powerlevel10k',
'https://github.com/denysdovhan/spaceship-prompt',
'https://github.com/sobolevn/sobole-zsh-theme'
],
'plugins': [
'git',
'k',
'extract',
'cp',
'yarn',
'npm',
'supervisor',
'rsync',
'command-not-found',
'autojump',
'colored-man-pages',
'git-flow',
'git-extras',
'python',
'zsh-autosuggestions',
'history-substring-search',
'zsh-completions',
'ssh-agent',
'https://github.com/zsh-users/zsh-autosuggestions',
'https://github.com/zsh-users/zsh-completions',
'https://github.com/zsh-users/zsh-syntax-highlighting',
'https://github.com/zsh-users/zsh-history-substring-search',
'https://github.com/supercrabtree/k'
]},
'ssh': {
'pub_keys': [''],
'configs': [{
'hostname': '',
'port': '',
'user': '',
'pub_key_auth': '',
'id_only': '',
'id_file_path': ''
}]
},
'filebrowser': {
'port': configs_list.get("system").get("fb_port"),
'base_url': configs_list.get("system").get("fb_base_url"),
'root_dir': configs_list.get("system").get("fb_root_dir")
},
'vscode': {
'bind_addr': configs_list.get("system").get("vscode_bind_addr"),
'base_url': configs_list.get("system").get("vscode_base_url"),
'extensions': [
'ms-python.python',
'almenon.arepl',
'batisteo.vscode-django',
'bierner.color-info',
'bierner.markdown-footnotes',
'bierner.markdown-mermaid',
'bierner.markdown-preview-github-styles',
'CoenraadS.bracket-pair-colorizer-2',
'DavidAnson.vscode-markdownlint',
'donjayamanne.githistory',
'donjayamanne.python-extension-pack',
'eamodio.gitlens',
'hbenl.vscode-test-explorer',
'henriiik.docker-linter',
'kamikillerto.vscode-colorize',
'kisstkondoros.vscode-gutter-preview',
'littlefoxteam.vscode-python-test-adapter',
'magicstack.MagicPython',
'ms-azuretools.vscode-docker',
'ms-toolsai.jupyter',
'naumovs.color-highlight',
'shd101wyy.markdown-preview-enhanced',
'streetsidesoftware.code-spell-checker',
'tht13.html-preview-vscode',
'tht13.python',
'tushortz.python-extended-snippets',
'wholroyd.jinja',
'yzhang.markdown-all-in-one'
]
},
'app': {
'bind_addr': configs_list.get("system").get("app_bind_addr"),
'base_url': configs_list.get("system").get("app_base_url"),
'root_dir': configs_list.get("system").get("app_root_dir"),
'user': configs_list.get("system").get("app_user"),
'password': configs_list.get("system").get("app_password")
}
}]
def set_user_config(user_config, default_user, level):
log.setLevel(level)
log.info(user_config.get("yaml_config_value"))
log.info(user_config.get("docker_env_value"))
if user_config.get("yaml_config_value") == None:
log.info("no setting found for '{}', setting: '{}'".format(user_config.get("yaml_config_name"), user_config.get("docker_env_value")))
if user_config.get("dict_path") == 2:
configs_list.get(user_config.get("dict_path")[0])[user_config.get("dict_path")[1]] = user_config.get("docker_env_value")
elif user_config.get("yaml_config_value") == user_config.get("docker_env_value"):
log.debug("yaml config same as docker environment value: {} --> '{}'".format(user_config.get("docker_env_name"), user_config.get("docker_env_value")))
else:
log.warning("using user config setting instead of docker environment value - {}: '{}'--> '{}'".format(user_config.get("docker_env_name"), user_config.get("docker_env_value"), user_config.get("yaml_config_value")))
user_configs = [
{
"yaml_config_name": "name",
"docker_env_name": "workspace_user",
"yaml_config_value": configs_list.get("users")[0].get("name"),
"docker_env_value": configs_list.get("system").get("workspace_user"),
"dict_path": ["users", "name"]
},
{
"yaml_config_name": "group",
"docker_env_name": "workspace_group",
"yaml_config_value": configs_list.get("users")[0].get("group"),
"docker_env_value": configs_list.get("system").get("workspace_group"),
"dict_path": ["users", "group"]
},
{
"yaml_config_name": "shell",
"docker_env_name": "workspace_user_shell",
"yaml_config_value": configs_list.get("users")[0].get("shell"),
"docker_env_value": configs_list.get("system").get("workspace_user_shell"),
"dict_path": ["users", "shell"]
},
{
"yaml_config_name": "password",
"docker_env_name": "workspace_user_password",
"yaml_config_value": configs_list.get("users")[0].get("password"),
"docker_env_value": configs_list.get("system").get("workspace_user_password"),
"dict_path": ["users", "shell"]
},
]
### Set user config
if configs_list.get("users") == None:
log.info(f"Users not defined in yaml config file. Going with single user mode and importing settings from docker env or setting from default")
configs_list["users"] = default_user
# Show to console
default_user_json = json.dumps(default_user, indent = 4)
elif len(configs_list.get("users")) == 0:
log.info("User's list empty, populate and restart container")
sys.exit()
elif len(configs_list.get("users")) == 1:
log.info("Building a single user environment")
# what's the point of this? overwrite workspace envs with corresponding user envs? Maybe not good to touch and better keep docker envs concistent with this dict. Don't overwrite with user settings. Also simpler
#for uc in user_configs:
#set_user_config(uc, default_user, verbosity)
user_count = 0
for u in configs_list.get("users"):
log.debug(f"working on user count: '{user_count}'")
for default_config, default_setting in default_user[0].items():
for config, setting in u.items():
if config == default_config:
if setting == default_setting:
log.debug(f"yaml config setting same as default: '{config}' --> '{setting}'")
else:
log.debug(f"yaml config setting differs from default - {config}: '{default_setting}'--> '{setting}'")
if config == "name":
user = setting
home = os.path.join("/home", user)
if config == "password":
password = setting
if not default_config in list(u.keys()):
log.info(f"not set in yaml config, setting from default settings: '{default_config}' --> '{default_setting}'")
configs_list.get("users")[user_count][default_config] = default_setting
user_count+=1
log.info(f"setting workspace user to: '{user}'")
elif len(configs_list.get("users")) > 1:
log.info("More than 2 users defined, haven't build this functionality yet. Remove extra users and restart container.")
sys.exit()
# Dump into JSON for passage into scripts
configs_list_json = json.dumps(configs_list)
### Write docker envs to system environment
#for env, value in docker_env.items():
# func.set_env_variable(env, value)
### Clean up envs
# opts_json arguments to json
opts_json = json.dumps(opts)
### Dynamiruny set MAX_NUM_THREADS
ENV_MAX_NUM_THREADS = os.getenv("MAX_NUM_THREADS", None)
if ENV_MAX_NUM_THREADS:
# Determine the number of availabel CPU resources, but limit to a max number
if ENV_MAX_NUM_THREADS.lower() == "auto":
ENV_MAX_NUM_THREADS = str(math.ceil(os.cpu_count()))
try:
# read out docker information - if docker limits cpu quota
cpu_count = math.ceil(
int(
os.popen("cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us")
.read()
.replace("\n", "")
)
/ 100000
)
if cpu_count > 0 and cpu_count < os.cpu_count():
ENV_MAX_NUM_THREADS = str(cpu_count)
except:
pass
if (
not ENV_MAX_NUM_THREADS
or not ENV_MAX_NUM_THREADS.isnumeric()
or ENV_MAX_NUM_THREADS == "0"
):
ENV_MAX_NUM_THREADS = "4"
if int(ENV_MAX_NUM_THREADS) > 8:
# there should be atleast one thread less compared to cores
ENV_MAX_NUM_THREADS = str(int(ENV_MAX_NUM_THREADS) - 1)
# set a maximum of 32, in most cases too many threads are adding too much overhead
if int(ENV_MAX_NUM_THREADS) > 32:
ENV_MAX_NUM_THREADS = "32"
# only set if it is not None or empty
# OMP_NUM_THREADS: Suggested value: vCPUs / 2 in which vCPUs is the number of virtual CPUs.
set_env_variable(
"OMP_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # OpenMP
set_env_variable(
"OPENBLAS_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # OpenBLAS
set_env_variable("MKL_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True) # MKL
set_env_variable(
"VECLIB_MAXIMUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Accelerate
set_env_variable(
"NUMEXPR_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Numexpr
set_env_variable(
"NUMEXPR_MAX_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Numexpr - maximum
set_env_variable(
"NUMBA_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Numba
set_env_variable(
"SPARK_WORKER_CORES", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Spark Worker
set_env_variable(
"BLIS_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True
) # Blis
set_env_variable("TBB_NUM_THREADS", ENV_MAX_NUM_THREADS, ignore_if_set=True) # TBB
# GOTO_NUM_THREADS
### Set container environment
# Get system env and display
system_env = os.environ.copy()
log.debug("System Environments:")
log.debug(func.capture_cmd_stdout('env', system_env))
# Display docker env
log.debug("Docker Environments:")
log.debug(func.capture_cmd_stdout('env', docker_env))
# Merge system, docker env as workspace env and display
workspace_env = func.merge_two_dicts(system_env, docker_env)
log.debug("Workspace Environment")
log.debug(func.capture_cmd_stdout('env', workspace_env))
# Format workspace env as json for passage into scripts
workspace_env_json = json.dumps(workspace_env)
### Configure user
log.info(f"configuring user")
run(
['python', f"/scripts/configure_user.py",
'--opts', opts_json,
'--env', workspace_env_json,
'--configs', configs_list_json
],
env=workspace_env
)
### Set workspace user and home
workspace_env['USER'] = user
workspace_env['HOME'] = home
workspace_env['WORKSPACE_USER'] = user
workspace_env['WORKSPACE_USER_HOME'] = home
workspace_env['WORKSPACE_USER_PASSWORD'] = password
### Start workspace
sys.exit(
run(
['python', '/scripts/run_workspace.py',
'--opts', opts_json],
env=workspace_env
)
)
|
"""A function for extracting batch data into a tabular format."""
import csv
import html
import json
import logging
import os
from xml.dom import minidom
import click
from amti import settings
from amti import utils
logger = logging.getLogger(__name__)
TABULAR_SUPPORTED_FILE_FORMATS = [
'csv',
'json',
'jsonl'
]
"""File formats supported by the ``tabular`` function."""
# Make sure to update the doc strings for
# ``amti.actions.extraction.tabular.tabular`` and
# ``amti.clis.extraction.tabular.tabular`` if you edit this constant.
def tabular(
batch_dir,
output_path,
file_format):
"""Extract data in ``batch_dir`` to ``output_path`` as a table.
Extract batch data into a tabular format; however, some metadata may
not be copied over. Each assignment will become it's own row in the
table with a separate column for each form field, as well as much of
the assignment's metadata. The table will be written to
``output_path`` in the format specified by ``file_format``.
Parameters
----------
batch_dir : str
the path to the batch's directory.
output_path : str
the path where the output file should be saved.
file_format : str
the file format to use when writing the data. Must be one of the
supported file formats: csv (CSV), json (JSON), jsonl (JSON
Lines).
Returns
-------
None.
"""
if file_format not in TABULAR_SUPPORTED_FILE_FORMATS:
raise ValueError(
'file_format must be one of {formats}.'.format(
formats=', '.join(TABULAR_SUPPORTED_FILE_FORMATS)))
# construct important paths
_, batch_dir_subpaths = settings.BATCH_DIR_STRUCTURE
batchid_file_name, _ = batch_dir_subpaths['batchid']
results_dir_name, results_dir_subpaths = batch_dir_subpaths['results']
_, hit_dir_subpaths = results_dir_subpaths['hit_dir']
hit_file_name, _ = hit_dir_subpaths['hit']
assignments_file_name, _ = hit_dir_subpaths['assignments']
batchid_file_path = os.path.join(
batch_dir, batchid_file_name)
results_dir = os.path.join(batch_dir, results_dir_name)
with open(batchid_file_path) as batchid_file:
batch_id = batchid_file.read().strip()
logger.info(
f'Beginning to extract batch {batch_id} to tabular format.')
rows = []
for dir_path, dir_names, file_names in os.walk(results_dir):
hit = None
assignments = None
for file_name in file_names:
if file_name == hit_file_name:
hit_path = os.path.join(dir_path, file_name)
with open(hit_path, 'r') as hit_file:
hit = json.load(hit_file)
elif file_name == assignments_file_name:
assignments_path = os.path.join(
dir_path, assignments_file_name)
with open(assignments_path, 'r') as assignments_file:
assignments = [
json.loads(ln.strip())
for ln in assignments_file
]
else:
logger.warning(
f'Unexected file ({file_name}) located in'
f' {dir_path}')
if hit is None or assignments is None:
# if both ``hit`` and ``assignments`` are ``None``, then
# this directory is simply not one we're interested in;
# however, if exactly one is ``None`` then there's likely
# been an error.
if hit is None and assignments is not None:
logger.warning(
f'Found assignments but no HIT in {dir_path}.')
elif hit is not None and assignments is None:
logger.warning(
f'Found HIT but no assignments in {dir_path}.')
continue
for assignment in assignments:
row = {}
# add relevant metadata from the HIT
row['HITId'] = hit['HIT']['HITId']
row['AssignmentDurationInSeconds'] =\
hit['HIT']['AssignmentDurationInSeconds']
row['AutoApprovalDelayInSeconds'] =\
hit['HIT']['AutoApprovalDelayInSeconds']
row['Expiration'] = hit['HIT']['Expiration']
row['CreationTime'] = hit['HIT']['CreationTime']
# add relevant metadata from the assignment
row['AssignmentId'] = assignment['AssignmentId']
row['WorkerId'] = assignment['WorkerId']
row['AssignmentStatus'] = assignment['AssignmentStatus']
row['AutoApprovalTime'] = assignment['AutoApprovalTime']
row['AcceptTime'] = assignment['AcceptTime']
row['SubmitTime'] = assignment['SubmitTime']
row['ApprovalTime'] = assignment['ApprovalTime']
# parse the response and add it to the row
xml = minidom.parseString(assignment['Answer'])
for answer_tag in xml.getElementsByTagName('Answer'):
[question_identifier_tag] =\
answer_tag.getElementsByTagName(
'QuestionIdentifier')
question_identifier = utils.xml.get_node_text(
question_identifier_tag)
if question_identifier == 'doNotRedirect':
# some workers on Mechanical Turk modify their
# browser requests to send a 'doNotRedirect'
# field when posting results.
logger.warning(
f'Found a "doNotRedirect" field in'
f' {dir_path}. Dropping the field.')
continue
[free_text_tag] = answer_tag.getElementsByTagName(
'FreeText')
free_text = html.unescape(
utils.xml.get_node_text(free_text_tag))
row[question_identifier] = free_text
rows.append(row)
with click.open_file(output_path, 'w') as output_file:
if file_format == 'csv':
csv_writer = csv.DictWriter(
output_file,
fieldnames=rows[0].keys())
csv_writer.writeheader()
csv_writer.writerows(rows)
elif file_format == 'json':
json.dump(rows, output_file)
elif file_format == 'jsonl':
output_file.write('\n'.join([
json.dumps(row)
for row in rows
]))
else:
raise NotImplementedError(
f'Support for {file_format} has not been implemented.')
logger.info(
f'Finished extracting batch {batch_id} to tabular format.')
|
#!/usr/bin/env python3
"""
===========
dswx_pge.py
===========
Module defining the implementation for the Dynamic Surface Water Extent (DSWx) PGE.
"""
import glob
import os.path
from collections import OrderedDict
from os.path import abspath, basename, exists, isdir, join, splitext
from opera.util.error_codes import ErrorCode
from opera.util.img_utils import get_geotiff_hls_dataset
from opera.util.img_utils import get_geotiff_metadata
from opera.util.img_utils import get_geotiff_processing_datetime
from opera.util.img_utils import get_geotiff_product_version
from opera.util.img_utils import get_geotiff_spacecraft_name
from opera.util.img_utils import get_hls_filename_fields
from opera.util.metadata_utils import get_sensor_from_spacecraft_name
from opera.util.metadata_utils import get_geographic_boundaries_from_mgrs_tile
from opera.util.render_jinja2 import render_jinja2
from opera.util.time import get_time_for_filename
from .base_pge import PgeExecutor
from .base_pge import PostProcessorMixin
from .base_pge import PreProcessorMixin
class DSWxPreProcessorMixin(PreProcessorMixin):
"""
Mixin class responsible for handling all pre-processing steps for the
DSWx PGE. The pre-processing phase is defined as all steps necessary prior
to SAS execution.
In addition to the base functionality inherited from PreProcessorMixin, this
mixin adds a input validation step to ensure that the input(s) defined by
the RunConfig exist and are valid.
"""
_pre_mixin_name = "DSWxPreProcessorMixin"
def _validate_inputs(self):
"""
Evaluates the list of inputs from the RunConfig to ensure they are valid.
For directories, this means checking for directory existence, and that
at least one .tif file resides within the directory. For files,
each file is checked for existence and that it has a .tif extension.
"""
for input_file in self.runconfig.input_files:
input_file_path = abspath(input_file)
if not exists(input_file_path):
error_msg = f"Could not locate specified input file/directory {input_file_path}"
self.logger.critical(self.name, ErrorCode.INPUT_NOT_FOUND, error_msg)
elif isdir(input_file_path):
list_of_input_tifs = glob.glob(join(input_file_path, '*.tif'))
if len(list_of_input_tifs) <= 0:
error_msg = f"Input directory {input_file_path} does not contain any tif files"
self.logger.critical(self.name, ErrorCode.INPUT_NOT_FOUND, error_msg)
elif not input_file_path.endswith(".tif"):
error_msg = f"Input file {input_file_path} does not have .tif extension"
self.logger.critical(self.name, ErrorCode.INVALID_INPUT, error_msg)
def run_preprocessor(self, **kwargs):
"""
Executes the pre-processing steps for DSWx PGE initialization.
The DSWxPreProcessorMixin version of this function performs all actions
of the base PreProcessorMixin class, and adds the validation check for
input files/directories.
Parameters
----------
**kwargs : dict
Any keyword arguments needed by the pre-processor
"""
super().run_preprocessor(**kwargs)
self._validate_inputs()
class DSWxPostProcessorMixin(PostProcessorMixin):
"""
Mixin class responsible for handling all post-processing steps for the DSWx
PGE. The post-processing phase is defined as all steps necessary after
SAS execution has completed.
In addition to the base functionality inherited from PostProcessorMixin, this
mixin adds a output validation step to ensure that the output file defined by
the RunConfig exist and are valid.
"""
_post_mixin_name = "DSWxPostProcessorMixin"
_cached_core_filename = None
def _validate_output(self):
"""
Evaluates the output file(s) generated from SAS execution to ensure
existence, and that the file(s) contains some content (size is greater than
0).
"""
# Get the product ID that the SAS should have used to tag all output images
product_id = self.runconfig.sas_config['runconfig']['groups']['product_path_group']['product_id']
output_products = list(
filter(
lambda filename: product_id in filename,
self.runconfig.get_output_product_filenames()
)
)
if not output_products:
error_msg = f"No SAS output file(s) containing product ID {product_id} " \
f"found within {self.runconfig.output_product_path}"
self.logger.critical(self.name, ErrorCode.OUTPUT_NOT_FOUND, error_msg)
for output_product in output_products:
if not os.path.getsize(output_product):
error_msg = f"SAS output file {output_product} was created, but is empty"
self.logger.critical(self.name, ErrorCode.INVALID_OUTPUT, error_msg)
def _core_filename(self, inter_filename=None):
"""
Returns the core file name component for products produced by the
DSWx PGE.
The core file name component of the DSWx PGE consists of:
<PROJECT>_<LEVEL>_<PGE NAME>_<SOURCE>_<SPACECRAFT_NAME>_<TILE ID>_<TIMETAG>_<PRODUCT VERSION>_<PRODUCT_COUNTER>
Callers of this function are responsible for assignment of any other
product-specific fields, such as the file extension.
Notes
-----
On first call to this function, the returned core filename is cached
for subsequent calls. This allows the core filename to be easily reused
across product types without needing to provide inter_filename for
each subsequent call.
Parameters
----------
inter_filename : str, optional
The intermediate filename of the output product to generate the
core filename for. This parameter may be used to inspect the file
in order to derive any necessary components of the returned filename.
Once the core filename is cached upon first call to this function,
this parameter may be omitted.
Returns
-------
core_filename : str
The core file name component to assign to products created by this PGE.
"""
# Check if the core filename has already been generated and cached,
# and return it if so
if self._cached_core_filename is not None:
return self._cached_core_filename
if not inter_filename:
msg = (f"No filename provided to {self.__class__.__name__}._core_filename(), "
f"First call must provide a filename before result is cached.")
self.logger.critical(self.name, ErrorCode.FILE_MOVE_FAILED, msg)
spacecraft_name = get_geotiff_spacecraft_name(inter_filename).upper()
sensor = get_sensor_from_spacecraft_name(spacecraft_name)
pixel_spacing = "30" # fixed for HLS-based products
dataset = get_geotiff_hls_dataset(inter_filename)
dataset_fields = get_hls_filename_fields(dataset)
source = dataset_fields['product']
tile_id = dataset_fields['tile_id']
acquisition_time = dataset_fields['acquisition_time']
if not acquisition_time.endswith('Z'):
acquisition_time = f'{acquisition_time}Z'
processing_datetime = get_geotiff_processing_datetime(inter_filename)
processing_time = get_time_for_filename(processing_datetime)
if not processing_time.endswith('Z'):
processing_time = f'{processing_time}Z'
product_version = get_geotiff_product_version(inter_filename)
if not product_version.startswith('v'):
product_version = f'v{product_version}'
# Assign the core file to the cached class attribute
self._cached_core_filename = (
f"{self.PROJECT}_{self.LEVEL}_{self.NAME}_{source}_{sensor}_{pixel_spacing}_"
f"{tile_id}_{acquisition_time}_{processing_time}_{product_version}_"
f"{str(self.runconfig.product_counter).zfill(3)}"
)
return self._cached_core_filename
def _geotiff_filename(self, inter_filename):
"""
Returns the file name to use for GeoTIFF's produced by the DSWx PGE.
The GeoTIFF filename for the DSWx PGE consists of:
<Core filename>_<Band Index>_<Band Name>.tif
Where <Core filename> is returned by DSWxPostProcessorMixin._core_filename()
and <Band Index> and <Band Name> are determined from the name of the
intermediate geotiff file to be renamed.
Parameters
----------
inter_filename : str
The intermediate filename of the output GeoTIFF to generate
a filename for. This parameter may be used to inspect the file
in order to derive any necessary components of the returned filename.
Returns
-------
geotiff_filename : str
The file name to assign to GeoTIFF product(s) created by this PGE.
"""
core_filename = self._core_filename(inter_filename)
# Specific output product band index and name should be the last parts
# of the filename before the extension, delimited by underscores
band_idx, band_name = splitext(inter_filename)[0].split("_")[-2:]
return f"{core_filename}_{band_idx}_{band_name}.tiff"
def _collect_dswx_product_metadata(self):
"""
Gathers the available metadata from a sample output DSWx product for
use in filling out the ISO metadata template for the DSWx-HLS PGE.
Returns
-------
output_product_metadata : dict
Dictionary containing DSWx-HLS output product metadata, formatted
for use with the ISO metadata Jinja2 template.
"""
# Find a single representative output DSWx-HLS product, they should all
# have identical sets of metadata
output_products = self.runconfig.get_output_product_filenames()
representative_product = None
for output_product in output_products:
if basename(output_product) in self.renamed_files.values():
# TODO: kludge for avoiding output products that are missing expected metadata
if get_geotiff_hls_dataset(output_product) is not None:
representative_product = output_product
break
else:
msg = (f"Could not find sample output product to derive metadata from "
f"within {self.runconfig.output_product_path}")
self.logger.critical(self.name, ErrorCode.ISO_METADATA_RENDER_FAILED, msg)
# Extract all metadata assigned by the SAS at product creation time
output_product_metadata = get_geotiff_metadata(representative_product)
# Get the Military Grid Reference System (MGRS) tile code and zone identifier
# from the name of the input HLS dataset
hls_fields = get_hls_filename_fields(
get_geotiff_hls_dataset(representative_product)
)
mgrs_tile_id = hls_fields['tile_id']
output_product_metadata['tileCode'] = mgrs_tile_id
output_product_metadata['zoneIdentifier'] = mgrs_tile_id[:2]
# Translate the MGRS tile ID to a lat/lon bounding box
(lat_min,
lat_max,
lon_min,
lon_max) = get_geographic_boundaries_from_mgrs_tile(mgrs_tile_id)
output_product_metadata['geospatial_lon_min'] = lon_min
output_product_metadata['geospatial_lon_max'] = lon_max
output_product_metadata['geospatial_lat_min'] = lat_min
output_product_metadata['geospatial_lat_max'] = lat_max
# Split the sensing time into the beginning/end portions
sensing_time = output_product_metadata.pop('SENSING_TIME')
# Sensing time for L30 datasets contain both begin and end times delimited
# by semi-colon
if ';' in sensing_time:
sensing_time_begin, sensing_time_end = sensing_time.split(';')
# S30 datasets seem to only provide a single sensing time value
else:
sensing_time_begin = sensing_time_end = sensing_time
output_product_metadata['sensingTimeBegin'] = sensing_time_begin.strip()
output_product_metadata['sensingTimeEnd'] = sensing_time_end.strip()
# Add some fields on the dimensions of the data. These values should
# be the same for all DSWx-HLS products, and were derived from the
# ADT product spec
output_product_metadata['xCoordinates'] = {
'size': 3660, # pixels
'spacing': 30 # meters/pixel
}
output_product_metadata['yCoordinates'] = {
'size': 3660, # pixels
'spacing': 30 # meters/pixel
}
return output_product_metadata
def _create_custom_metadata(self):
"""
Creates the "custom data" dictionary used with the ISO metadata rendering.
Custom data contains all metadata information needed for the ISO template
that is not found within any of the other metadata sources (such as the
RunConfig, output product, or catalog metadata).
Returns
-------
custom_data : dict
Dictionary containing the custom metadata as expected by the ISO
metadata Jinja2 template.
"""
custom_metadata = {
'ISO_OPERA_FilePackageName': self._core_filename(),
'ISO_OPERA_ProducerGranuleId': self._core_filename(),
'MetadataProviderAction': "revision" if int(self.runconfig.product_counter) > 1 else "creation",
'GranuleFilename': self._core_filename(),
'ISO_OPERA_ProjectKeywords': ['OPERA', 'JPL', 'DSWx', 'Dynamic', 'Surface', 'Water', 'Extent'],
'ISO_OPERA_PlatformKeywords': ['HLS'],
'ISO_OPERA_InstrumentKeywords': ['Landsat8', 'Sentinel 1 A/B']
}
return custom_metadata
def _create_iso_metadata(self):
"""
Creates a rendered version of the ISO metadata template for DSWx-HLS
output products using metadata sourced from the following locations:
* RunConfig (in dictionary form)
* Output products (extracted from a sample product)
* Catalog metadata
* "Custom" metadata (all metadata not found anywhere else)
Returns
-------
rendered_template : str
The ISO metadata template for DSWx-HLS filled in with values from
the sourced metadata dictionaries.
"""
runconfig_dict = self.runconfig.asdict()
product_output_dict = self._collect_dswx_product_metadata()
catalog_metadata_dict = self._create_catalog_metadata().asdict()
custom_data_dict = self._create_custom_metadata()
iso_metadata = {
'run_config': runconfig_dict,
'product_output': product_output_dict,
'catalog_metadata': catalog_metadata_dict,
'custom_data': custom_data_dict
}
iso_template_path = os.path.abspath(self.runconfig.iso_template_path)
if not os.path.exists(iso_template_path):
msg = f"Could not load ISO template {iso_template_path}, file does not exist"
self.logger.critical(self.name, ErrorCode.ISO_METADATA_TEMPLATE_NOT_FOUND, msg)
rendered_template = render_jinja2(iso_template_path, iso_metadata, self.logger)
return rendered_template
def run_postprocessor(self, **kwargs):
"""
Executes the post-processing steps for DSWx PGE job completion.
The DSWxPostProcessorMixin version of this function performs the same
steps as the base PostProcessorMixin, but inserts the output file
validation check prior to staging of the output files.
Parameters
----------
**kwargs : dict
Any keyword arguments needed by the post-processor
"""
print(f'Running postprocessor for {self._post_mixin_name}')
self._run_sas_qa_executable()
self._validate_output()
self._stage_output_files()
class DSWxExecutor(DSWxPreProcessorMixin, DSWxPostProcessorMixin, PgeExecutor):
"""
Main class for execution of a DSWx PGE, including the SAS layer.
This class essentially rolls up the DSWx-tailored pre- and post-processors
while inheriting all other functionality from the base PgeExecutor class.
"""
NAME = "DSWx"
"""Short name for the DSWx PGE"""
LEVEL = "L3"
"""Processing Level for DSWx Products"""
SAS_VERSION = "0.1"
"""Version of the SAS wrapped by this PGE, should be updated as needed with new SAS deliveries"""
def __init__(self, pge_name, runconfig_path, **kwargs):
super().__init__(pge_name, runconfig_path, **kwargs)
self.rename_by_pattern_map = OrderedDict(
{
'dswx_hls_*.tif*': self._geotiff_filename
}
)
|
import sys
import os
import datetime
import logging
from tqdm import tqdm
import copy
import numpy as np
import re
from concurrent import futures
logger = logging.getLogger("sacluster").getChild(os.path.basename(__file__))
path = "../../.."
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path + "/lib/others")
from API_method import get, post, put, delete
from info_print import printout
sys.path.append(path + "/lib/def_conf")
from config_function import conf_pattern_2
class modify_sacluster:
def __init__(self, cluster_info, cluster_id, auth_res, ext_info, fp = "", info_list = [1,0,0,0], api_index = True, max_workers = 1):
self.cluster_info = cluster_info
self.cluster_id = cluster_id
self.auth_res = auth_res
self.ext_info = ext_info
self.fp = fp
self.info_list = info_list
self.api_index = api_index
self.max_workers = max_workers
#[k for k in self.cluster_info.keys()]
self.zone_list = [zone_list for zone_list in self.cluster_info["clusterparams"]["server"].keys()]
#if "nfs" in self.cluster_info[cluster_id]["cluster_params"]:
if self.cluster_info["clusterparams"]["nfs"] != None:
self.nfs_zones = list(self.cluster_info["clusterparams"]["nfs"].keys())
self.max_node_num = 0
self.proactice_zones = []
for zone in ext_info["Zone"]:
if(self.ext_info["Zone"][zone]["Type"] == "practice" and self.ext_info["Zone"][zone]["maximum"] > 0):
self.max_node_num += self.ext_info["Zone"][zone]["maximum"]
self.proactice_zones.append(zone)
self.current_zone_max_num = 0
self.head_zone_num = 0
for zone in self.zone_list:
self.current_zone_max_num += self.ext_info["Zone"][zone]["maximum"]
if "head" in self.cluster_info["clusterparams"]["server"][zone].keys():
self.head_zone = zone
self.head_zone_num = self.ext_info["Zone"][zone]["maximum"]
self.url_list = {}
for zone in ext_info["Zone"]:
self.url_list[zone] = "https://secure.sakura.ad.jp/cloud/zone/"+ zone +"/api/cloud/1.1"
self.head_url = "https://secure.sakura.ad.jp/cloud/zone/"+ self.head_zone +"/api/cloud/1.1"
self.sub_url = ["/server","/disk","/switch","/interface","/bridge","/tag","/appliance","/power"]
self.date_modified = "Date modified:" + str(datetime.datetime.now().strftime("%Y_%m_%d"))
def __call__(self):
self.show_current_states()
if(self.mod_type == "1"):
self.modify_compute_node_number()
elif(self.mod_type == "2"):
self.modify_back_switch()
elif(self.mod_type == "3"):
self.modify_core_and_memory()
else:
_ = printout("Warning: the input must be a number from 1 to 3.", info_type = 0, info_list = self.info_list, fp = self.fp)
printout("Finished modifying the cluster", info_type = 0, info_list = self.info_list, fp = self.fp)
def show_current_states(self):
printout('' , info_type = 0, info_list = self.info_list, fp = self.fp)
text_len = len('#' *25 + ' current state ' + '#' *25)
printout('#' *25 + ' current state ' + '#' *25, info_type = 0, info_list = self.info_list, fp = self.fp)
compute_number = sum([len(val["compute"]) for key, val in self.cluster_info["clusterparams"]["server"].items()])
printout(' ' * 10 + 'The number of compute node:'.ljust(35, ' ') + str(compute_number), info_type = 0, info_list = self.info_list, fp = self.fp)
switch_back_zone = []
for key, val in self.cluster_info["clusterparams"]["switch"].items():
#if("back" in val):
if(self.cluster_info["clusterparams"]["switch"][key]["back"] != None):
switch_back_zone.append(key)
printout('' , info_type = 0, info_list = self.info_list, fp = self.fp)
if(len(switch_back_zone) == 0):
printout(' ' * 10 +'Switch of back area:'.ljust(35, ' ') + 'False', info_type = 0, info_list = self.info_list, fp = self.fp)
elif(len(self.cluster_info["clusterparams"]["switch"]) == len(switch_back_zone)):
printout(' ' * 10 +'Switch of back area:'.ljust(35, ' ') + 'True', info_type = 0, info_list = self.info_list, fp = self.fp)
elif(len(self.cluster_info["clusterparams"]["switch"]) > len(switch_back_zone)):
printout(' ' * 10 +'Switch of back area:'.ljust(35, ' ') + 'Some zones are True', info_type = 0, info_list = self.info_list, fp = self.fp)
printout(' ' * 10 +'(' + ",".join(switch_back_zone) + ')', info_type = 0, info_list = self.info_list, fp = self.fp)
node_plan = {}
for zone, val in self.cluster_info["clusterparams"]["server"].items():
if("head" in val):
head_zone = zone
head_core = val["head"]["node"]["core"]
head_memory = val["head"]["node"]["memory"]
for num, val_comp in val["compute"].items():
if(str(val_comp["node"]["core"]) + "-" + str(val_comp["node"]["memory"]) not in node_plan):
node_plan[str(val_comp["node"]["core"]) + "-" + str(val_comp["node"]["memory"])] = 0
else:
node_plan[str(val_comp["node"]["core"]) + "-" + str(val_comp["node"]["memory"])] += 1
printout('' , info_type = 0, info_list = self.info_list, fp = self.fp)
printout(' ' * 10 + 'Node information' , info_type = 0, info_list = self.info_list, fp = self.fp)
printout(' ' * 10 + '((Head node))' , info_type = 0, info_list = self.info_list, fp = self.fp)
printout(' ' * 10 + 'Core:'.ljust(35, ' ') + str(head_core), info_type = 0, info_list = self.info_list, fp = self.fp)
printout(' ' * 10 +'Memory:'.ljust(35, ' ') + str(head_memory), info_type = 0, info_list = self.info_list, fp = self.fp)
printout('' , info_type = 0, info_list = self.info_list, fp = self.fp)
printout(' ' * 10 + '((Compute node))' , info_type = 0, info_list = self.info_list, fp = self.fp)
if(len(node_plan) == 1):
for key, val in node_plan.items():
text = ' ' * 10 +'Core:'.ljust(35, ' ') + str(key.split("-")[0])
printout(text, info_type = 0, info_list = self.info_list, fp = self.fp)
text = ' ' * 10 +'Memory:'.ljust(35, ' ') + str(key.split("-")[1])
printout(text, info_type = 0, info_list = self.info_list, fp = self.fp)
else:
count = 0
for key, val in node_plan.items():
printout(' ' * 10 +'compute node type ' + str(count), info_type = 0, info_list = self.info_list, fp = self.fp)
printout(' ' * 10 +'Core:'.ljust(35, ' ') + str(key.split("-")[0]) , info_type = 0, info_list = self.info_list, fp = self.fp)
printout(' ' * 10 +'Memory:'.ljust(35, ' ') + str(key.split("-")[1]), info_type = 0, info_list = self.info_list, fp = self.fp)
count += 1
printout('#' * text_len, info_type = 0, info_list = self.info_list, fp = self.fp)
self.mod_type = self.answer_response(' \n<<Contents to modify>>\n1. The number of compute node\n2. Switch of back area\n3. Core or memory of nodes', ["1", "2", "3"], "1 to 3", input_comment = "Please input a content number", opp = 1)
def modify_back_switch(self):
switch_back_zone = []
for key, val in self.cluster_info["clusterparams"]["switch"].items():
#if("back" in val):
if(self.cluster_info["clusterparams"]["switch"][key]["back"] != None):
switch_back_zone.append(key)
printout('' , info_type = 0, info_list = self.info_list, fp = self.fp)
if(len(switch_back_zone) == 0):
printout('Switch of back area is False in the current state', info_type = 0, info_list = self.info_list, fp = self.fp)
input_val = self.answer_response("Can a switch be installed in the back area?", ["yes", "y", "no", "n"], "yes/y or no/n")
if(input_val == "yes" or input_val == "y"):
self.bar = tqdm(total = 100)
self.bar.set_description('Progress rate')
self.progress_sum = 0
if(len(self.zone_list) > 1):
progress_val = 20
else:
progress_val = 35
for zone in self.zone_list:
self.cluster_info["clusterparams"]["switch"][zone]["back"] = {}
self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"] = self.create_switch(zone)
self.progress_bar(int(30 / len(self.zone_list)))
node_num = len(self.cluster_info["clusterparams"]["server"][zone]["compute"])
for key, val in self.cluster_info["clusterparams"]["server"][zone]["compute"].items():
nic_id = self.add_interface(zone, val["node"]["id"])
self.progress_bar(int(progress_val / (len(zone) * node_num)))
self.cluster_info["clusterparams"]["server"][zone]["compute"][key]["nic"]["back"] = {}
self.cluster_info["clusterparams"]["server"][zone]["compute"][key]["nic"]["back"]["id"] = nic_id
self.connect_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"], self.cluster_info["clusterparams"]["server"][zone]["compute"][key]["nic"]["back"]["id"])
self.progress_bar(int(progress_val / (len(zone) * node_num)))
if(len(self.zone_list) > 1):
bridge_id = self.create_bridge()
self.progress_bar(10)
self.cluster_info["clusterparams"]["bridge"]["back"] = {}
self.cluster_info["clusterparams"]["bridge"]["back"]["id"] = bridge_id
for zone in self.zone_list:
self.progress_bar(int(20/len(self.zone_list)))
_ = self.connect_bridge_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"], self.cluster_info["clusterparams"]["bridge"]["back"]["id"])
self.bar.update(100 - self.progress_sum)
self.bar.close()
else:
printout('Please start the operation over from the beginning.', info_type = 0, info_list = self.info_list, fp = self.fp)
sys.exit()
elif(len(self.cluster_info["clusterparams"]["switch"]) == len(switch_back_zone)):
printout('Switch of back area is True in the current state', info_type = 0, info_list = self.info_list, fp = self.fp)
input_val = self.answer_response("Can a switch be deleted in the back area?", ["yes", "y", "no", "n"], "yes/y or no/n")
if(input_val == "yes" or input_val == "y"):
self.bar = tqdm(total = 100)
self.bar.set_description('Progress rate')
self.progress_sum = 0
if(len(self.zone_list) > 1):
progress_val = 20
else:
progress_val = 50
for zone in self.zone_list:
node_num = len(self.cluster_info["clusterparams"]["server"][zone]["compute"])
for key, val in self.cluster_info["clusterparams"]["server"][zone]["compute"].items():
self.dis_connect_server_switch(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][key]["nic"]["back"]["id"])
self.progress_bar(int(progress_val / (len(zone) * node_num)))
self.delete_interface(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][key]["nic"]["back"]["id"])
self.progress_bar(int(progress_val / (len(zone) * node_num)))
self.cluster_info["clusterparams"]["server"][zone]["compute"][key]["nic"]["back"] = None
for zone in self.zone_list:
if(len(self.zone_list) > 1):
self.disconnect_bridge_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"])
self.progress_bar(int(20/len(self.zone_list)))
self.delete_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"])
self.cluster_info["clusterparams"]["switch"][zone]["back"] = None
self.progress_bar(int(30 / len(self.zone_list)))
if(len(self.zone_list) > 1):
self.delete_bridge(self.cluster_info["clusterparams"]["bridge"]["back"]["id"])
self.cluster_info["clusterparams"]["bridge"]["back"] = None
self.progress_bar(10)
self.bar.update(100 - self.progress_sum)
self.bar.close()
else:
printout('Please start the operation over from the beginning.', info_type = 0, info_list = self.info_list, fp = self.fp)
sys.exit()
elif(len(self.cluster_info["clusterparams"]["switch"]) > len(switch_back_zone)):
printout('This cluster is not a target of sacluster operation', info_type = 0, info_list = self.info_list, fp = self.fp)
#input_val = self.mod_type = self.answer_response(" \n<<select option>>\n1. Delete the switch of back area in " + ",".join(switch_back_zone) + "\n2. Add switches in areas other than " + ",".join(switch_back_zone), ["1", "2"], "1 or 2", input_comment = "Please input a opption number", opp = 1)
#CoreとMemory数の変更
def modify_core_and_memory(self):
node_type = ["head node", "compute nodes"][int(self.answer_response(' \nPlease select the node type to modify the setting\n1. Head node\n2. Compute nodes', ["1", "2"], "1 or 2", input_comment = "Please input a number", opp = 1)) - 1]
node_plan, core_plan, memory_plan = self.core_memory_setting(node_type)
if(node_type == "head node"):
self.bar = tqdm(total = 100)
self.bar.set_description('Progress rate')
self.progress_sum = 0
self.change_node_plan(self.head_zone, self.cluster_info["clusterparams"]["server"][self.head_zone]["head"]["node"]["id"], node_plan)
self.bar.update(100)
self.bar.close()
else:
while(True):
self.bar = tqdm(total = 100)
self.bar.set_description('Progress rate')
self.progress_sum = 0
future = []
with futures.ThreadPoolExecutor(max_workers = self.max_workers, thread_name_prefix="thread") as executor:
for zone in self.zone_list:
for i in self.cluster_info["clusterparams"]["server"][zone]["compute"].keys():
logger.debug(self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["node"]["id"])
logger.debug(node_plan)
future.append(executor.submit(self.change_node_plan, zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["node"]["id"], node_plan, ind = 1))
future_result = []
for f in futures.as_completed(future):
if(f.result()[0] == True):
self.progress_bar(int(100/(len(self.zone_list) * len(self.cluster_info["clusterparams"]["server"][zone]["compute"].keys()))))
#self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["node"]["id"] = f.result()[2]
future_result.append(f.result()[0])
future_msg = [f.result()[1] for f in future if(f.result()[0] == False and f.result()[1] != "")]
if(len(future_msg) > 0):
future_msg = future_msg[0]
if False in future_result:
printout("\n".join(future_msg), info_type = 0, info_list = self.info_list, fp = self.fp)
temp = conf_pattern_2("Try again??", ["y", "n", "yes", "no"], "no", info_list = self.info_list, fp = self.fp)
printout("", info_type = 0, info_list = self.info_list, fp = self.fp)
logger.info(temp)
if temp == "no" or temp == "n":
printout("Stop processing.", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
self.bar.close()
sys.exit()
else:
self.bar.close()
else:
break
self.bar.update(100 - self.progress_sum)
self.bar.close()
def modify_compute_node_number(self):
while(True):
new_compute_setting, num_comp = self.setting_compute_node()
printout('', info_type = 0, info_list = self.info_list, fp = self.fp)
printout('#' * 10 + ' Setting the number of compute node ' + '#' *10, info_type = 0, info_list = self.info_list, fp = self.fp)
printout(' ' * 5 + 'The number of compute nodes: ' + str(num_comp) + ' ' * 5, info_type = 0, info_list = self.info_list, fp = self.fp)
for k,v in new_compute_setting.items():
printout(' ' * 5 + 'The number of compute nodes in ' + str(k) + ': ' + str(v) + ' ' * 5, info_type = 0, info_list = self.info_list, fp = self.fp)
printout('#' *10 + '#' *len(' Setting the number of compute nodes ') + '#' *10, info_type = 0, info_list = self.info_list, fp = self.fp)
res = self.answer_response("Are the above settings correct?", ["yes", "y", "no", "n"], "yes/y or no/n")
if(res == "yes" or res == "y"):
username, password = self.setting_middle(new_compute_setting)
break
self.bar = tqdm(total = 100)
self.bar.set_description('Progress rate')
self.progress_sum = 0
self.change_compute_number(new_compute_setting, username, password)
self.bar.update(100 - self.progress_sum)
self.bar.close()
def setting_compute_node(self):
node_number = {}
printout('', info_type = 0, info_list = self.info_list, fp = self.fp)
if(self.ext_info["Zone"][self.head_zone]["Type"] == "practice"):
new_node_number = self.answer_response_node_number("Please input the number of compute nodes", 1, self.max_node_num + self.cluster_info["baseparams"]["compute_number"], self.cluster_info["baseparams"]["compute_number"], sp_type = 1)
else:
new_node_number = self.answer_response_node_number("Please input the number of compute nodes", 1, self.current_zone_max_num + self.cluster_info["baseparams"]["compute_number"], self.cluster_info["baseparams"]["compute_number"], sp_type = 1)
if(new_node_number > self.cluster_info["baseparams"]["compute_number"]):
if(new_node_number > self.current_zone_max_num + self.cluster_info["baseparams"]["compute_number"]):
select_zones = list(set(self.proactice_zones) - set(self.zone_list))
if(self.max_node_num + self.cluster_info["baseparams"]["compute_number"] == new_node_number):
for zone in self.proactice_zones:
node_number[zone] = self.ext_info["Zone"][zone]["maximum"]
else:
#ゾーンの追加
select_zones = list(set(self.proactice_zones) - set(self.zone_list))
printout('', info_type = 0, info_list = self.info_list, fp = self.fp)
printout('Info: the specified number of nodes cannot be installed in the current zone. Additional zones are needed', info_type = 0, info_list = self.info_list, fp = self.fp)
current_zone_node_num = self.current_zone_max_num + self.cluster_info["baseparams"]["compute_number"]
add_zone_list = []
count = 1
rem_min_node_num = min([self.ext_info["Zone"][zone]["maximum"] for zone in select_zones])
if((self.max_node_num + self.cluster_info["baseparams"]["compute_number"] - rem_min_node_num) < new_node_number):
add_zone_list.extend(select_zones)
for add_zone in add_zone_list:
current_zone_node_num += self.ext_info["Zone"][add_zone]["maximum"]
_ = printout("Info: Additional zones is automatically set to " + ",".join(add_zone_list), info_type = 0, info_list = self.info_list, fp = self.fp)
else:
printout('', info_type = 0, info_list = self.info_list, fp = self.fp)
while(True):
add_zone = select_zones[int(self.answer_response_memory("Please select an additional zone " + str(count), select_zones, select_zones[0]))]
current_zone_node_num += self.ext_info["Zone"][add_zone]["maximum"]
add_zone_list.append(add_zone)
if(current_zone_node_num >= new_node_number):
break
select_zones = list(set(select_zones) - set([add_zone]))
count += 1
zone_compute_num = {}
if(current_zone_node_num == new_node_number):
for zone in self.zone_list:
zone_compute_num[zone] = self.ext_info["Zone"][zone]["maximum"]
_ = printout("Info: The number of compute nodes in " + zone + " is automatically set to " + str(self.ext_info["Zone"][zone]["maximum"]), info_type = 0, info_list = self.info_list, fp = self.fp)
for zone in add_zone_list:
zone_compute_num[zone] = self.ext_info["Zone"][zone]["maximum"]
_ = printout("Info: The number of compute nodes in " + zone + " is automatically set to " + str(self.ext_info["Zone"][zone]["maximum"]), info_type = 0, info_list = self.info_list, fp = self.fp)
#for key, val in self.ext_info["Zone"].items():
#print(key + str(val["maximum"]))
else:
remain_node_num = new_node_number
for i in range(len(self.zone_list)):
min_val = remain_node_num - (sum([self.ext_info["Zone"][self.zone_list[j]]["maximum"] + len(self.cluster_info["clusterparams"]["server"][self.zone_list[j]]["compute"]) for j in range(i+1, len(self.zone_list))]) + sum([self.ext_info["Zone"][zone]["maximum"] for zone in add_zone_list]))
if(min_val < 1):
min_val = 1
max_val = self.ext_info["Zone"][self.zone_list[i]]["maximum"] + len(self.cluster_info["clusterparams"]["server"][self.zone_list[i]]["compute"])
if(min_val != max_val):
node_number[self.zone_list[i]] = self.answer_response_node_number("Please input the number of compute nodes in " + str(self.zone_list[i]), min_val, max_val)
else:
node_number[self.zone_list[i]] = min_val
_ = printout("Info: The number of compute nodes in " + self.zone_list[i] + " is automatically set to " + str(node_number[self.zone_list[i]]), info_type = 0, info_list = self.info_list, fp = self.fp)
remain_node_num = remain_node_num - int(node_number[self.zone_list[i]])
for i in range(len(add_zone_list)):
min_val = remain_node_num - sum([self.ext_info["Zone"][add_zone_list[j]]["maximum"] for j in range(i+1, len(add_zone_list))])
if(min_val < 1):
min_val = 1
if(remain_node_num > self.ext_info["Zone"][add_zone_list[i]]["maximum"]):
max_val = self.ext_info["Zone"][add_zone_list[i]]["maximum"]
else:
max_val = remain_node_num
if(min_val != max_val):
node_number[add_zone_list[i]] = self.answer_response_node_number("Please input the number of compute nodes in " + str(add_zone_list[i]), min_val, max_val)
else:
node_number[add_zone_list[i]] = min_val
_ = printout("Info: The number of compute nodes in " + add_zone_list[i] + " is automatically set to " + str(node_number[add_zone_list[i]]), info_type = 0, info_list = self.info_list, fp = self.fp)
remain_node_num = remain_node_num - int(node_number[add_zone_list[i]])
else:
remain_node_num = new_node_number
for i in range(len(self.zone_list)):
min_val = remain_node_num - sum([self.ext_info["Zone"][self.zone_list[j]]["maximum"] + len(self.cluster_info["clusterparams"]["server"][self.zone_list[j]]["compute"]) for j in range(i+1, len(self.zone_list))])
if(min_val < 1):
min_val = 1
if(remain_node_num > self.ext_info["Zone"][self.zone_list[i]]["maximum"]):
max_val = self.ext_info["Zone"][self.zone_list[i]]["maximum"]
else:
if(i +1 == len(self.zone_list)):
max_val = remain_node_num
else:
max_val = remain_node_num - 1
if(min_val != max_val):
node_number[self.zone_list[i]] = self.answer_response_node_number("Please input the number of compute nodes in " + str(self.zone_list[i]), min_val, max_val)
else:
node_number[self.zone_list[i]] = min_val
_ = printout("Info: The number of compute nodes in " + self.zone_list[i] + " is automatically set to " + str(node_number[self.zone_list[i]]), info_type = 0, info_list = self.info_list, fp = self.fp)
remain_node_num = remain_node_num - int(node_number[self.zone_list[i]])
elif(new_node_number < self.cluster_info["baseparams"]["compute_number"]):
if(len(self.zone_list) == 1):
node_number[self.zone_list[0]] = new_node_number
else:
order_zone = []
order_num = []
for zone in self.zone_list:
if(zone != self.head_zone and self.cluster_info["clusterparams"]["nfs"][zone] == None):
order_zone.append(zone)
order_num.append(self.ext_info["Zone"][zone]["maximum"] + len(self.cluster_info["clusterparams"]["server"][zone]["compute"]))
order_zone = [order_zone[i] for i in np.argsort(order_num)]
order_num = np.sort(order_num)
nfs_head_num = self.ext_info["Zone"][self.head_zone]["maximum"] + len(self.cluster_info["clusterparams"]["server"][self.head_zone]["compute"])
nfs_zones = []
for zone in list(self.cluster_info["clusterparams"]["nfs"].keys()):
if(zone != self.head_zone and self.cluster_info["clusterparams"]["nfs"][zone] != None):
nfs_head_num += self.ext_info["Zone"][zone]["maximum"] + len(self.cluster_info["clusterparams"]["server"][zone]["compute"])
nfs_zones.append(zone + " (nfs exists)")
_ = printout("", info_type = 0, info_list = self.info_list, fp = self.fp)
if(nfs_head_num >= new_node_number):
#if(self.ext_info["Zone"][self.head_zone]["maximum"] + len(self.cluster_info["clusterparams"]["server"][self.head_zone]["compute"]) >= new_node_number):
delete_zone = order_zone
if(delete_zone != []):
_ = printout("Info: The following zones will be deleted (" + ",".join(delete_zone) + ")", info_type = 0, info_list = self.info_list, fp = self.fp)
use_zone = list(set(self.zone_list) - set(delete_zone))
if(len(nfs_zones) != 0):
_ = printout("Info: The zone is automatically set to the following zones: " + self.head_zone + " (head zone)" + ", " + ", ".join(nfs_zones), info_type = 0, info_list = self.info_list, fp = self.fp)
else:
_ = printout("Info: The zone is automatically set to the following zones: " + self.head_zone + " (head zone)", info_type = 0, info_list = self.info_list, fp = self.fp)
else:
count = 1
while(True):
if(sum(order_num[count:]) + nfs_head_num < new_node_number):
break
count += 1
delete_zone = order_zone[:count - 1]
use_zone = list(set(self.zone_list) - set(delete_zone))
other_zone = list(set(use_zone) - set(nfs_zones) - set([self.head_zone]))
if(delete_zone != []):
_ = printout("Info: The following zones will be deleted (" + ",".join(delete_zone) + ")", info_type = 0, info_list = self.info_list, fp = self.fp)
if(len(nfs_zones) != 0):
if(len(other_zone) != 0):
_ = printout("Info: The zone is automatically set to the following zones: " + self.head_zone + " (head zone)" + ", " + ", ".join(nfs_zones) + "," + ",".join(other_zone), info_type = 0, info_list = self.info_list, fp = self.fp)
else:
_ = printout("Info: The zone is automatically set to the following zones: " + self.head_zone + " (head zone)" + ", " + ", ".join(nfs_zones), info_type = 0, info_list = self.info_list, fp = self.fp)
else:
if(len(other_zone) != 0):
_ = printout("Info: The zone is automatically set to the following zones: " + self.head_zone + " (head zone)" + "," + ",".join(other_zone), info_type = 0, info_list = self.info_list, fp = self.fp)
else:
_ = printout("Info: The zone is automatically set to the following zones: " + self.head_zone + " (head zone)", info_type = 0, info_list = self.info_list, fp = self.fp)
remain_node_num = new_node_number
for zone in delete_zone:
node_number[zone] = 0
for i in range(len(use_zone)):
if(remain_node_num == 0):
node_number[use_zone[i]] = 0
_ = printout("Info: The number of compute nodes in " + use_zone[i] + " is automatically set to " + str(node_number[use_zone[i]]), info_type = 0, info_list = self.info_list, fp = self.fp)
else:
min_val = remain_node_num - sum([self.ext_info["Zone"][use_zone[j]]["maximum"] + len(self.cluster_info["clusterparams"]["server"][use_zone[j]]["compute"]) for j in range(i+1, len(use_zone))])
if(min_val < 1):
min_val = 1
if(remain_node_num > self.ext_info["Zone"][use_zone[i]]["maximum"]):
max_val = self.ext_info["Zone"][use_zone[i]]["maximum"]
else:
max_val = remain_node_num
if(min_val != max_val):
node_number[use_zone[i]] = self.answer_response_node_number("Please input the number of compute nodes in " + str(use_zone[i]), min_val, max_val)
else:
node_number[use_zone[i]] = min_val
_ = printout("Info: The number of compute nodes in " + use_zone[i] + " is automatically set to " + str(node_number[use_zone[i]]), info_type = 0, info_list = self.info_list, fp = self.fp)
remain_node_num = remain_node_num - int(node_number[use_zone[i]])
#self.change_compute_number_zone(node_number)
return node_number, new_node_number
def setting_middle(self, new_compute_setting):
username = None
password = None
inc_zones = []
for zone, comp_num in new_compute_setting.items():
if(zone not in self.cluster_info["clusterparams"]["server"] and comp_num > 0):
inc_zones.append(zone)
else:
if(len(self.cluster_info["clusterparams"]["server"][zone]["compute"]) < comp_num):
inc_zones.append(zone)
if(len(inc_zones) != 0):
_ = printout("In the following zones, the number of compute nodes will increase, so please specify the password and username for the new compute nodes. (" + ", ".join(inc_zones) + ")", info_type = 0, info_list = self.info_list, fp = self.fp)
username, password = self.set_app_params()
return username, password
def input_username(self):
re_alnum = re.compile(r'^[a-z]{1}[-a-z0-9]{0,30}$')
while(True):
username = printout("[username] >> ", info_type = 1, info_list = self.info_list, fp = self.fp)
ans = re.search(re_alnum, username)
if(ans != None):
return username
else:
logger.debug('The username is wrong')
printout('The username is wrong. Username must be 32 characters long and consist of numbers, lowercase letters, and hyphens (Initial is a lowercase letter).', info_type = 0, info_list = self.info_list, fp = self.fp)
def set_app_params(self):
logger.debug('setting username and password')
username = self.input_username()
#username = printout("[username] >> ", info_type = 1, info_list = self.info_list, fp = self.fp).replace(" ","")
password = printout("[password] >> ", info_type = 1, info_list = self.info_list, fp = self.fp).replace(" ","")
printout("", info_type = 0, info_list = self.info_list, fp = self.fp).replace(" ","")
printout("", info_type = 0, info_list = self.info_list, fp = self.fp).replace(" ","")
if(username == ""):
logger.info('username and password are automatically set to sacloud')
username = "sacloud"
password = "sacloud"
return username, password
def change_compute_number(self, new_setting, username, password):
if(len(new_setting) > len(self.zone_list)):
if(len(self.zone_list) == 1 and len(new_setting) == 2 and self.cluster_info["clusterparams"]["bridge"]["front"] == None):
self.cluster_info["clusterparams"]["bridge"]["front"] = {}
self.cluster_info["clusterparams"]["bridge"]["front"]["id"] = self.create_bridge()
self.progress_bar(3)
self.connect_bridge_switch(self.head_zone, self.cluster_info["clusterparams"]["switch"][self.head_zone]["front"]["id"], self.cluster_info["clusterparams"]["bridge"]["front"]["id"])
self.progress_bar(4)
if(self.cluster_info["clusterparams"]["switch"][self.zone_list[0]]["back"] != None and self.cluster_info["clusterparams"]["bridge"]["back"] == None):
self.cluster_info["clusterparams"]["bridge"]["back"] = {}
self.cluster_info["clusterparams"]["bridge"]["back"]["id"] = self.create_bridge()
self.progress_bar(3)
self.connect_bridge_switch(self.head_zone, self.cluster_info["clusterparams"]["switch"][self.head_zone]["back"]["id"], self.cluster_info["clusterparams"]["bridge"]["back"]["id"])
self.progress_bar(4)
self.progress_bar(14 - self.progress_sum)
for zone in new_setting.keys():
if(zone not in self.cluster_info["clusterparams"]["switch"]):
self.cluster_info["clusterparams"]["server"][zone] = {}
self.cluster_info["clusterparams"]["server"][zone]["compute"] = {}
self.cluster_info["clusterparams"]["switch"][zone] = {}
self.cluster_info["clusterparams"]["switch"][zone]["front"] = {}
self.cluster_info["clusterparams"]["switch"][zone]["front"]["id"] = self.create_switch(zone)
self.progress_bar(int(4/len(new_setting)))
self.connect_bridge_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["front"]["id"], self.cluster_info["clusterparams"]["bridge"]["front"]["id"])
self.progress_bar(int(4/len(new_setting)))
if(self.cluster_info["clusterparams"]["bridge"]["back"] != None):
self.cluster_info["clusterparams"]["switch"][zone]["back"] = {}
self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"] = self.create_switch(zone)
self.progress_bar(int(4/len(new_setting)))
self.connect_bridge_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"], self.cluster_info["clusterparams"]["bridge"]["back"]["id"])
self.progress_bar(int(4/len(new_setting)))
self.progress_bar(30 - self.progress_sum)
count = 0
for zone, comp_num in new_setting.items():
current_comp_num = len(self.cluster_info["clusterparams"]["server"][zone]["compute"])
if(current_comp_num > comp_num):
with futures.ThreadPoolExecutor(max_workers = self.max_workers, thread_name_prefix="thread") as executor:
while(True):
future = []
for i in range(current_comp_num -1, comp_num - 1, -1):
future.append(executor.submit(self.red_comp_server, zone, i, comp_num, current_comp_num, new_setting))
future_result = [f.result()[0] for f in future]
future_msg = [f.result()[1] for f in future if(f.result()[0] == False and f.result()[1] != "")]
if(len(future_msg) > 0):
future_msg = future_msg[0]
if False in future_result:
temp = conf_pattern_2("\n".join(future_msg) + "\nTry again??", ["yes", "no"], "no", info_list = self.info_list, fp = self.fp)
if temp == "no":
self.bar.close()
printout("Stop processing." , info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
sys.exit()
else:
break
#self.red_comp_server(zone, i, comp_num, current_comp_num, new_setting)
"""
self.dis_connect_server_switch(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["nic"]["front"]["id"])
if(self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["nic"]["back"] != None):
self.dis_connect_server_switch(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["nic"]["back"]["id"])
self.delete_server(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["node"]["id"], self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["node"]["name"])
for j in range(len(self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["disk"])):
self.delete_disk(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["disk"][j]["id"], self.cluster_info["clusterparams"]["server"][zone]["compute"][i]["node"]["name"])
self.progress_bar(50/(len(new_setting) * (len(self.cluster_info["clusterparams"]["server"][zone]["compute"]) - comp_num)))
"""
elif(len(self.cluster_info["clusterparams"]["server"][zone]["compute"]) < comp_num):
server_disk_config = {}
server_disk_config["node_planid"] = int(self.ext_info["Server"][str(self.cluster_info["clusterparams"]["server"][self.head_zone]["compute"][0]["node"]["core"])][str(self.cluster_info["clusterparams"]["server"][self.head_zone]["compute"][0]["node"]["memory"])])
server_disk_config["disk_type_id"] = int(self.cluster_info["clusterparams"]["server"][self.head_zone]["compute"][0]["disk"][0]["type"])
server_disk_config["disk_connection_type"] = self.cluster_info["clusterparams"]["server"][self.head_zone]["compute"][0]["disk"][0]["connection"]
server_disk_config["os_name"] = self.cluster_info["clusterparams"]["server"][self.head_zone]["compute"][0]["disk"][0]["os"]
server_disk_config["disk_size"] = int(self.cluster_info["clusterparams"]["server"][self.head_zone]["compute"][0]["disk"][0]["size"])
server_disk_config["password"] = password
server_disk_config["username"] = username
with futures.ThreadPoolExecutor(max_workers = self.max_workers, thread_name_prefix="thread") as executor:
while(True):
future = []
for i in range(current_comp_num, comp_num):
future.append(executor.submit(self.add_comp_server, i, zone, server_disk_config, comp_num, current_comp_num, new_setting))
future_result = [f.result()[0] for f in future]
future_msg = [f.result()[1] for f in future if(f.result()[0] == False and f.result()[1] != "")]
if(len(future_msg) > 0):
future_msg = future_msg[0]
if False in future_result:
temp = conf_pattern_2("\n".join(future_msg) + "\nTry again??", ["yes", "no"], "no", info_list = self.info_list, fp = self.fp)
if temp == "no":
self.bar.close()
printout("Stop processing." , info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
sys.exit()
else:
break
#self.add_comp_server(i, zone, server_disk_config, comp_num, current_comp_num, new_setting)
#if i < 9:
#compute_node_name = "compute_node_00"+str(i + 1)
#elif i >= 9:
#compute_node_name = "compute_node_0"+str(i + 1)
#server_response = self.build_server(zone, compute_node_name, node_planid, self.cluster_info["clusterparams"]["switch"][zone]["front"]["id"])
#disk_res = self.add_disk(zone, compute_node_name, disk_type_id, disk_connection_type, disk_size, os_name, password, username)
#self.connect_server_disk(zone, disk_res["Disk"]["ID"], server_response["Server"]["ID"])
#if(self.cluster_info["clusterparams"]["switch"][zone]["back"] != None):
#nic_id = self.add_interface(zone, server_response["Server"]["ID"])
#self.connect_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"], nic_id)
#self.progress_bar(50/(len(new_setting) * (comp_num - len(self.cluster_info["clusterparams"]["server"][zone]["compute"]))))
count += 1
self.progress_bar(int(50 - int(50/len(new_setting))))
new_zones = [k for k, v in new_setting.items() if v != 0]
if(len(new_zones) != len(self.zone_list)):
delete_zone = list(set(self.zone_list) - set(new_zones))
for zone in delete_zone:
self.disconnect_bridge_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["front"]["id"])
self.delete_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["front"]["id"])
self.progress_bar(int(5/len(delete_zone)))
if(self.cluster_info["clusterparams"]["switch"][zone]["back"] != None):
self.disconnect_bridge_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"])
self.delete_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"])
self.progress_bar(int(4/len(delete_zone)))
if(len(self.zone_list) == 2 and len(new_zones) == 1 and self.cluster_info["clusterparams"]["bridge"]["front"] != None):
self.disconnect_bridge_switch(self.head_zone, self.cluster_info["clusterparams"]["switch"][self.head_zone]["front"]["id"])
self.delete_bridge(self.cluster_info["clusterparams"]["bridge"]["front"]["id"])
self.progress_bar(int(5/len(delete_zone)))
if(self.cluster_info["clusterparams"]["bridge"]["back"] != None):
self.disconnect_bridge_switch(self.head_zone, self.cluster_info["clusterparams"]["switch"][self.head_zone]["back"]["id"])
self.delete_bridge(self.cluster_info["clusterparams"]["bridge"]["back"]["id"])
self.progress_bar(int(4/len(delete_zone)))
def red_comp_server(self, zone, number, comp_num, current_comp_num, new_setting):
if(number in self.cluster_info["clusterparams"]["server"][zone]["compute"]):
if(self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["nic"]["front"]["id"] != None):
res, ind = self.dis_connect_server_switch(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["nic"]["front"]["id"], com_index = True)
if(ind == False):
return ind, res
else:
self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["nic"]["front"]["id"] = None
if(self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["nic"]["back"] != None):
if(self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["nic"]["back"]["id"] != None):
res, ind = self.dis_connect_server_switch(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["nic"]["back"]["id"], com_index = True)
if(ind == False):
return ind, res
else:
self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["nic"]["back"]["id"] = None
if(self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["node"]["id"] != None):
res, ind = self.delete_server(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["node"]["id"], self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["node"]["name"], com_index = True)
if(ind == False):
return ind, res
else:
self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["node"]["id"] = None
if(self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["disk"] != None):
for j in range(len(self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["disk"])):
if(self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["disk"][j]["id"] != None):
res, ind = self.delete_disk(zone, self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["disk"][j]["id"], self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["node"]["name"], com_index = True)
if(ind == False):
return ind, res
else:
self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["disk"][j]["id"] = None
self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["disk"] = None
self.cluster_info["clusterparams"]["server"][zone]["compute"].pop(number)
self.progress_bar(int(50/(len(new_setting) * (current_comp_num - comp_num))))
return True, ""
def add_comp_server(self, number, zone, server_disk_config, comp_num, current_comp_num, new_setting):
if number < 9:
compute_node_name = "compute_node_00"+str(number + 1)
elif number >= 9:
compute_node_name = "compute_node_0"+str(number + 1)
if not number in self.cluster_info["clusterparams"]["server"][zone]["compute"].keys():
server_response,res_index = self.build_server(zone, compute_node_name, server_disk_config["node_planid"], self.cluster_info["clusterparams"]["switch"][zone]["front"]["id"])
self.cluster_info["clusterparams"]["server"][zone]["compute"][number] = {}
self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["node"] = {}
if res_index == True:
if(self.api_index == True):
server_id = server_response["Server"]["ID"]
self.cluster_info["clusterparams"]["server"][zone]["compute"][number]["node"]["id"] = server_id
else:
server_id = 000000000000
disk_res = self.add_disk(zone, compute_node_name, server_disk_config["disk_type_id"], server_disk_config["disk_connection_type"], server_disk_config["disk_size"], server_disk_config["os_name"], server_disk_config["password"], server_disk_config["username"])
if(self.api_index == True):
disk_id = disk_res["Disk"]["ID"]
else:
disk_id = 000000000000
self.connect_server_disk(zone, disk_id, server_id)
if(self.cluster_info["clusterparams"]["switch"][zone]["back"] != None):
nic_id = self.add_interface(zone, server_id)
self.connect_switch(zone, self.cluster_info["clusterparams"]["switch"][zone]["back"]["id"], nic_id)
self.progress_bar(int(50/(len(new_setting) * (comp_num - current_comp_num))))
else:
res_index = True
server_response = ""
return res_index, server_response
#新規CoreとMemory数の指定
def core_memory_setting(self, server_type):
while(True):
printout('' , info_type = 0, info_list = self.info_list, fp = self.fp)
#ノードにおけるCore数
logger.debug('setting core number')
candidate = list(self.ext_info["Server"].keys())
core_plan = int(self.answer_response_core("The number of core for " + str(server_type), candidate, candidate[0]))
#ノードにおけるメモリ容量
logger.debug('setting memory size')
candidate = [str(i) + " (" + str(int(round(int(i), -3)/1000)) + "GB)" for i in list(self.ext_info["Server"][str(core_plan)].keys())]
memory_plan = list(self.ext_info["Server"][str(core_plan)].keys())[self.answer_response_memory("Size of memory for " + str(server_type), candidate, candidate[0])]
node_plan = self.ext_info["Server"][str(core_plan)][memory_plan]
printout('#' *5 + ' New ' + server_type + ' setting ' + '#' *5, info_type = 0, info_list = self.info_list, fp = self.fp)
core_comment = 'Core:' + str(core_plan)
printout(core_comment.center(len('#' *5 + ' New ' + server_type + ' setting ' + '#' *5)) , info_type = 0, info_list = self.info_list, fp = self.fp)
memory_comment = 'Memory:' + str(memory_plan)
printout(memory_comment.center(len('#' *5 + ' New ' + server_type + ' setting ' + '#' *5)) , info_type = 0, info_list = self.info_list, fp = self.fp)
printout('#' * len('#' *5 + ' New ' + server_type + ' setting ' + '#' *5), info_type = 0, info_list = self.info_list, fp = self.fp)
printout('', info_type = 0, info_list = self.info_list, fp = self.fp)
res = self.answer_response("Are the above settings correct?", ["yes", "y", "no", "n"], "yes/y or no/n")
if(res == "yes" or res == "y"):
break
return node_plan, core_plan, memory_plan
#サーバーの追加
def build_server(self, zone, node_name, node_planid, head_switch_id):
printout("constructing " + node_name + " ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("constructing " + node_name + " ……")
param = {"Server":{"Name":node_name,"ServerPlan":{"ID":node_planid},"Tags":["cluster ID: " +self.cluster_id, self.date_modified],"ConnectedSwitches":[{"ID": head_switch_id}]},"Count":0}
if (self.api_index == True):
while(True):
logger.debug("build server")
server_response = post(self.url_list[zone] + self.sub_url[0], self.auth_res, param)
check, msg = self.res_check(server_response, "post", com_index = True)
if check == True:
node_id = server_response["Server"]["ID"]
logger.info(node_name + " ID: " + node_id + "-Success")
res_index = True
break
else:
logger.debug("Error:cannot build server")
res_index = False
return msg,res_index
else:
server_response = "API is not used."
node_id = "000"
logger.debug("constructed " + node_name)
res_index = True
return server_response, res_index
#ディスクの追加
def add_disk(self, zone, disk_name, disk_type_id, disk_connection_type, disk_size, os_name, password, username):
printout("creating disk ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("creating disk for " + disk_name)
param = {"Disk":{"Name":disk_name,"Plan":{"ID": disk_type_id}, "Connection": disk_connection_type ,"SizeMB":disk_size,"SourceArchive":{"Availability": "available","ID": int(self.ext_info["OS"][os_name][zone])},"Tags":["cluster ID: " + str(self.cluster_id)]},"Config":{"Password": str(password), "HostName": str(username)}}
if (self.api_index == True):
while(True):
disk_res = post(self.url_list[zone] + self.sub_url[1], self.auth_res, param)
check, msg = self.res_check(disk_res, "post")
if check == True:
disk_id = disk_res["Disk"]["ID"]
logger.info("disk ID: " + disk_id + "-Success")
break
else:
self.build_error()
else:
disk_res = "API is not used."
disk_id = "0000"
return disk_res
#サーバとディスクの接続
def connect_server_disk(self, zone, disk_id, server_id):
printout("connecting disk to server ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("connecting disk to server")
url_disk = "/disk/" + str(disk_id) + "/to/server/" + str(server_id)
if (self.api_index == True):
while(True):
server_disk_res = put(self.url_list[zone] + url_disk, self.auth_res)
check, msg = self.res_check(server_disk_res, "put")
if check == True:
logger.debug("connected disk to server: " + server_id + "-" + disk_id)
break
else:
self.build_error()
else:
server_disk_res = "API is not used."
return server_disk_res
def delete_server(self,zone,node_id, node_name, com_index = False):
printout("deleting " + node_name + " ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
if(self.api_index == True):
while(True):
delete_res = delete(self.url_list[zone] + self.sub_url[0] + "/" + str(node_id), self.auth_res)
check, msg = self.res_check(delete_res,"delete", com_index = True)
if (check == True):
logger.debug("Delete a server:" + str(node_id))
res_index = True
break
else:
if com_index == False:
logger.debug("Error:cannot delete a server")
res_index = False
self.build_error()
else:
logger.debug("Error:cannot delete a server")
res_index = False
return msg,res_index
else:
delete_res = "API is not used."
node_id = "000"
logger.debug("Deleted a server")
res_index = True
return delete_res, res_index
def delete_disk(self,zone,disk_id, node_name, com_index = False):
printout("deleting disk of " + node_name + " ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
if(self.api_index == True):
while(True):
delete_res = delete(self.url_list[zone] + self.sub_url[1] + "/" + str(disk_id), self.auth_res)
check, msg = self.res_check(delete_res,"delete", com_index = True)
if (check == True):
logger.debug("Delete this disk:" + str(disk_id))
res_index = True
break
else:
if com_index == False:
logger.debug("Error:cannot delete a disk")
res_index = False
self.build_error()
else:
logger.debug("Error:cannot delete a disk")
res_index = False
return msg,res_index
else:
delete_res = "API is not used."
node_id = "000"
logger.debug("Deleted a disk")
res_index = True
return delete_res, res_index
#ノードプランの変更
def change_node_plan(self, zone, server_id, plan_id, ind = 0, node_num = "*"):
ind_dic = {0: False, 1:True}
printout("changing node plan ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("changing node plan")
url = self.url_list[zone] + self.sub_url[0] + "/" + str(server_id) + "/to/plan/" + str(plan_id)
if (self.api_index == True):
while(True):
response = put(url, self.auth_res)
check, msg = self.res_check(response, "put", com_index = ind_dic[ind])
if check == True:
res = response["Server"]["ID"]
break
else:
if(ind == 0):
self.build_error()
else:
return False, msg, ""
else:
res = "API is not used."
msg = ""
#if(ind == 1):
#self.cluster_info["clusterparams"]["server"][zone]["compute"][node_num]["node"]["id"] =
#import random
#temp = random.randint(0, 1)
#if(temp == 0):
#logger.debug("True")
#return True, msg, response["Server"]["ID"]
#else:
#logger.debug("False")
#return False, msg, response["Server"]["ID"]
return True, msg, res
#スイッチの追加
def create_switch(self, zone):
printout("creating a switch in back area ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("creating a switch in back area")
switch_name = "Switch in back area"
param = {"Switch":{"Name":switch_name,"Tags":["cluster ID: " + self.cluster_id]},"Count":0}
if (self.api_index == True):
while(True):
switch_response = post(self.url_list[zone] + self.sub_url[2], self.auth_res, param)
check, msg = self.res_check(switch_response, "post")
if check == True:
switch_id = int(switch_response["Switch"]["ID"])
logger.info("switch ID: " + str(switch_id) + "-Success")
break
else:
self.build_error()
else:
switch_response = "API is not used."
switch_id = 0000
return switch_id
#スイッチの削除
def delete_switch(self, zone, switch_id):
printout("deleting a switch in back area ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("deleting a switch in back area")
sub_url = self.sub_url[2] + "/" + str(switch_id)
if (self.api_index == True):
while(True):
switch_response = delete(self.url_list[zone] + sub_url, self.auth_res)
check, msg = self.res_check(switch_response, "delete")
if check == True:
break
else:
self.build_error()
else:
switch_response = "API is not used."
return switch_response
#NICをスイッチに接続
def connect_switch(self, zone, switch_id, nic_id):
printout("connecting switch to nic ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("connecting switch to nic")
sub_url_con = self.sub_url[3] + "/" + str(nic_id) + "/to/switch/" + str(switch_id)
if (self.api_index == True):
while(True):
connect_switch_response = put(self.url_list[zone] + sub_url_con, self.auth_res)
check, msg = self.res_check(connect_switch_response, "put")
if check == True:
logger.debug("connected switch to nic: " + str(switch_id) + "-" + str(nic_id))
break
else:
self.build_error()
else:
connect_switch_response = "API is not used."
return connect_switch_response
"""
#NICとスイッチの接続を解除
def dis_connect_switch(self, zone, nic_id):
printout("disconnecting switch from nic ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("disconnecting switch to nic")
sub_url_con = self.sub_url[3] + "/" + str(nic_id) + "/to/switch"
if (self.api_index == True):
while(True):
disconnect_switch_response = delete(self.url_list[zone] + sub_url_con, self.auth_res)
check, msg = self.res_check(disconnect_switch_response, "delete")
if check == True:
logger.debug("disconnected switch to nic: " + str(nic_id))
break
else:
self.build_error()
else:
disconnect_switch_response = "API is not used."
return disconnect_switch_response
"""
def dis_connect_server_switch(self,zone, nic_id, com_index = False):
printout("disconnecting server from switch ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
if(self.api_index == True):
while(True):
delete_res = delete(self.url_list[zone] + self.sub_url[3] + "/" + str(nic_id) + "/to" + self.sub_url[2], self.auth_res)
if(com_index == False):
check, msg = self.res_check(delete_res, "delete")
else:
check, msg = self.res_check(delete_res, "delete", com_index = True)
if (check == True):
logger.debug("Disconnect server and switch(NIC ID): " + str(nic_id))
res_index = True
break
else:
if com_index == False:
logger.debug("Error:cannot disconnect switch from server")
res_index = False
self.build_error()
else:
logger.debug("Error:cannot disconnect switch from server")
res_index = False
return msg,res_index
else:
delete_res = "API is not used."
node_id = "000"
logger.debug("Disconnect switch from server")
res_index = True
return delete_res, res_index
#NICを追加
def add_interface(self, zone, node_id):
printout("adding nic ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("adding nic")
add_nic_param = {"Interface":{"Server":{"ID":str(node_id)}}, "Count":0}
if (self.api_index == True):
while(True):
add_nic_response = post(self.url_list[zone] + self.sub_url[3], self.auth_res, add_nic_param)
check, msg = self.res_check(add_nic_response, "post")
if check == True:
nic_id = int(add_nic_response["Interface"]["ID"])
logger.info("nic ID: " + str(nic_id) + "-Success")
break
else:
self.build_error()
else:
add_nic_response = "API is not used."
nic_id = 000000000000
return nic_id
#NICを削除
def delete_interface(self, zone, nic_id):
printout("deleting nic ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("deleting nic")
sub_url_con = self.sub_url[3] + "/" + str(nic_id)
if (self.api_index == True):
while(True):
del_nic_response = delete(self.url_list[zone] + sub_url_con, self.auth_res)
check, msg = self.res_check(del_nic_response, "delete")
if check == True:
logger.info("delelte nic ID: " + str(nic_id) + "-Success")
break
else:
self.build_error()
else:
del_nic_response = "API is not used."
return del_nic_response
#ブリッジを作成
def create_bridge(self):
printout("creating bridge ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("creating bridge")
bridge_name = "Bridge for " + self.cluster_info["baseparams"]["config_name"]
param = {"Bridge":{"Name":bridge_name}}
if (self.api_index == True):
while(True):
bridge_res = post(self.head_url + self.sub_url[4], self.auth_res, param)
check, msg = self.res_check(bridge_res, "post")
if check == True:
bridge_id = int(bridge_res["Bridge"]["ID"])
break
else:
self.build_error()
else:
bridge_res = "API is not used."
bridge_id = 0000
return bridge_id
#ブリッジを削除
def delete_bridge(self, bridge_id):
printout("deleting bridge ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
logger.debug("deleting bridge")
sub_url = self.sub_url[4] + "/" + str(bridge_id)
if (self.api_index == True):
while(True):
bridge_res = delete(self.head_url + sub_url, self.auth_res)
check, msg = self.res_check(bridge_res, "delete")
if check == True:
break
else:
self.build_error()
else:
bridge_res = "API is not used."
return bridge_res
#スイッチをブリッジに接続
def connect_bridge_switch(self, zone, switch_id, bridge_id):
printout("connecting switch to bridge ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
url_bridge = self.sub_url[2] + "/" + str(switch_id) + "/to/bridge/" + str(bridge_id)
if (self.api_index == True):
while(True):
bridge_switch_res = put(self.url_list[zone] + url_bridge, self.auth_res)
check, msg = self.res_check(bridge_switch_res, "put")
if check == True:
logger.debug("connected switch to bridge: " + str(switch_id) + "-" + str(bridge_id))
break
else:
self.build_error()
else:
bridge_switch_res = "API is not used."
return bridge_switch_res
#ブリッジとスイッチの接続を解除
def disconnect_bridge_switch(self, zone, switch_id):
printout("disconnecting switch from bridge ……", info_type = 0, info_list = self.info_list, fp = self.fp, overwrite = True)
url_bridge = self.sub_url[2] + "/" + str(switch_id) + "/to/bridge"
if (self.api_index == True):
while(True):
bridge_switch_res = delete(self.url_list[zone] + url_bridge, self.auth_res)
check, msg = self.res_check(bridge_switch_res, "delete")
if check == True:
logger.debug("disconnected switch to bridge: " + str(switch_id))
break
else:
self.build_error()
else:
bridge_switch_res = "API is not used."
return bridge_switch_res
#APIのエラー処理
def build_error(self):
logger.debug("decision of repeating to request")
while(True):
conf = printout("Try again??(yes/no):", info_type = 2,info_list = self.info_list, fp = self.fp)
if conf == "yes":
break
elif conf == "no":
printout("Stop processing.", info_type = 0, info_list = self.info_list, fp = self.fp)
sys.exit()
else:
_ = printout("Please answer yes or no.",info_list = self.info_list,fp = self.fp)
#APIレスポンスの確認・処理
def res_check(self, res, met, com_index = False):
met_dict = {"get": "is_ok", "post": "is_ok", "put": "Success", "delete": "is_ok"}
index = met_dict[met]
msg = ""
logger.debug("confirm API request(" + str(met) + ")")
if (index in res.keys()):
if res[index] == True:
logger.debug("API processing succeeded")
check = True
return check, msg
else:
logger.warning("API processing failed")
if com_index == False:
check = False
return check, msg
else:
msg = list("Error:")
check = False
return check, msg
elif ("is_fatal" in res.keys()):
logger.warning("API processing failed")
if com_index == False:
printout("Status:" + res["status"], info_type = 0, info_list = self.info_list, fp = self.fp)
printout("Error:" + res["error_msg"], info_type = 0, info_list = self.info_list, fp = self.fp)
check = False
return check, msg
else:
msg = ["Status:" + res["status"], "Error:" + res["error_msg"]]
check = False
return check, msg
def progress_bar(self, up):
self.bar.update(int(up))
self.progress_sum += int(up)
def answer_response(self, comment, candidate, candidate_comment, input_comment = "", opp = 0):
if(opp == 0):
while(True):
input_val = printout(comment + "(" + candidate_comment + ") >>", info_type = 1, info_list = self.info_list, fp = self.fp)
if(input_val in candidate):
return input_val
else:
printout('' , info_type = 0, info_list = self.info_list, fp = self.fp)
_ = printout("Warning: the input must be selected from " + candidate_comment, info_type = 0, info_list = self.info_list, fp = self.fp)
elif(opp == 1):
while(True):
printout(comment, info_type = 0, info_list = self.info_list, fp = self.fp)
input_val = printout("[[" + input_comment + "]]>>" , info_type = 1, info_list = self.info_list, fp = self.fp)
if(input_val in candidate):
return input_val
else:
printout('' , info_type = 0, info_list = self.info_list, fp = self.fp)
_ = printout("Warning: the input must be selected from " + candidate_comment, info_type = 0, info_list = self.info_list, fp = self.fp)
def answer_response_memory(self, comment, candidate, default):
pos_index = 5
if(len(candidate) <= pos_index):
while(True):
printout("[[" + str(comment) + "]]", info_type = 0, info_list = self.info_list, fp = self.fp)
printout(str(0) + ": " + str(candidate[0]) + " (default)", info_type = 0, info_list = self.info_list, fp = self.fp)
for i in range(1,len(candidate)):
printout(str(i) + ": " + str(candidate[i]), info_type = 0, info_list = self.info_list, fp = self.fp)
val = printout(">>>", info_type = 1, info_list = self.info_list, fp = self.fp)
printout("", info_type = 0, info_list = self.info_list, fp = self.fp)
if(val == ""):
return 0
elif(val.isdigit() != True):
printout("Warning: Please specify in the index", info_type = 0, info_list = self.info_list, fp = self.fp)
elif((int(val) < 0) or (int(val) >= len(candidate))):
printout("Warning: An unexpected value", info_type = 0, info_list = self.info_list, fp = self.fp)
else:
return int(val)
else:
while(True):
printout("[[" + str(comment) + "]]", info_type = 0, info_list = self.info_list, fp = self.fp)
printout(str(0) + ": " + str(candidate[0]) + " (default)", info_type = 0, info_list = self.info_list, fp = self.fp)
for i in range(1, pos_index):
printout(str(i) + ": " + str(candidate[i]), info_type = 0, info_list = self.info_list, fp = self.fp)
if(pos_index < len(candidate)):
printout(str(pos_index) + ": others", info_type = 0, info_list = self.info_list, fp = self.fp)
val = printout(">>>", info_type = 1, info_list = self.info_list, fp = self.fp)
printout("", info_type = 0, info_list = self.info_list, fp = self.fp)
if(val == ""):
return 0
elif(val.isdigit() != True):
printout("Warning: Please specify in the index", info_type = 0, info_list = self.info_list, fp = self.fp)
elif(int(val) == pos_index):
pos_index = len(candidate)
elif((pos_index < len(candidate)) and ((int(val) < 0) or (int(val) > pos_index))):
printout("Warning: An unexpected value", info_type = 0, info_list = self.info_list, fp = self.fp)
elif((pos_index == len(candidate)) and ((int(val) < 0) or (int(val) >= pos_index))):
printout("Warning: An unexpected value", info_type = 0, info_list = self.info_list, fp = self.fp)
else:
return int(val)
def answer_response_core(self, comment, candidates, default):
while(True):
val = printout("[" + str(comment) + " {" + ",".join(candidates) + "}, (default: " + str(default) + ")] >> ", info_type = 1, info_list = self.info_list, fp = self.fp).replace(" ","")
if(val == ""):
return default
else:
if(val in candidates):
return val
else:
_ = printout("Warning: " + comment + " must be selected one from " + ",".join(candidates), info_type = 0, info_list = self.info_list, fp = self.fp)
def answer_response_node_number(self, comment, min_val, max_val, current_val = 0, sp_type = 0):
while(True):
val = printout("[[" + str(comment) + " {" + str(min_val) + "~" + str(max_val) + "}]]>> ", info_type = 1, info_list = self.info_list, fp = self.fp).replace(" ","")
if(val.isdecimal() == False):
_ = printout("Warning: " + comment + " must be number from " + str(min_val) + " to " + str(max_val), info_type = 0, info_list = self.info_list, fp = self.fp)
elif(current_val == int(val) and sp_type == 1):
_ = printout("Warning: the specified number is the same as the current compute number", info_type = 0, info_list = self.info_list, fp = self.fp)
elif(min_val <= int(val) <= max_val):
return int(val)
else:
_ = printout("Warning: " + comment + " must be number from " + str(min_val) + " to "+ str(max_val), info_type = 0, info_list = self.info_list, fp = self.fp)
|
from domains.interval import Interval, IntervalAbstractState
import random
def test_single_interval_comparisons():
random_intervals = []
for _ in range(100):
# These bounds are chosen arbitrarily.
lower = random.randint(-100000, +100000)
upper = random.randint(lower, +100000)
random_intervals.append(Interval(lower, upper))
random_intervals.append(Interval(lower, float("inf")))
random_intervals.append(Interval(float("-inf"), upper))
# First, we test that Top is greater than everything else and Bottom is
# less than everything else.
top = Interval(float("-inf"), float("inf"))
bottom = Interval(float("inf"), float("-inf"))
assert bottom <= top
for interval in random_intervals:
assert bottom <= interval <= top
# Next, we test that nothing else is greater than Top or less than Bottom
for interval in random_intervals:
assert not interval >= top
assert not interval <= bottom
# Non-containing intervals should be incomparable.
assert not (Interval(5, 100) <= Interval(6, 101))
assert not (Interval(5, 100) >= Interval(6, 101))
def test_interval_state_creation_query():
state1 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(6, float("inf")),
})
assert state1.interval_of("a") == Interval(-100, 50)
assert state1.interval_of("b") == Interval(float("-inf"), 5)
assert state1.interval_of("c") == Interval(100, 200)
assert state1.interval_of("d") == Interval(6, float("inf"))
def test_interval_state_creation_change_query():
state1 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(6, float("inf")),
})
state1.set_interval("a", Interval(-99, 50))
assert state1.interval_of("a") == Interval(-99, 50)
assert state1.interval_of("b") == Interval(float("-inf"), 5)
assert state1.interval_of("c") == Interval(100, 200)
assert state1.interval_of("d") == Interval(6, float("inf"))
def test_interval_state_equality():
state1 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(6, float("inf")),
})
state2 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(6, float("inf")),
})
assert state1 == state2
state2.set_interval("a", Interval(-99, 50))
assert state1 != state2
def test_interval_state_ineq():
state1 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 5),
"c": Interval(100, 200),
"d": Interval(float("inf"), float("-inf")),
})
state2 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), float("inf")),
"c": Interval(100, 201),
"d": Interval(6, float("inf")),
})
state3 = IntervalAbstractState({
"a": Interval(-100, 50),
"b": Interval(float("-inf"), 4),
"c": Interval(100, 201),
"d": Interval(7, float("inf")),
})
assert state1 <= state2
assert not (state2 <= state1)
assert not (state1 <= state3)
assert not (state3 <= state1)
assert not (state2 <= state3)
assert state3 <= state2
assert state2 >= state1
assert not (state1 >= state2)
assert not (state3 >= state1)
assert not (state1 >= state3)
assert not (state3 >= state2)
assert state2 >= state3
|
#!/usr/bin/env python3
# Copyright (c) 2010 Anil Kumar
# All rights reserved.
#
# License: BSD
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from . import filecontext
class FileContextView(QTabWidget):
sig_goto_line = pyqtSignal(int)
sig_ed_cursor_changed = pyqtSignal(int, int)
def __init__(self, parent=None):
QTabWidget.__init__(self)
self.setTabPosition(QTabWidget.South)
def add_page(self, page, title):
page.sig_goto_line.connect(self.sig_goto_line)
self.addTab(page, title)
self.setCurrentWidget(page)
def run(self, filename):
self.filename = filename
filecontext.run_plugins(filename, self)
def rerun(self, filename):
inx = self.currentIndex()
self.clear()
self.run(filename)
self.setCurrentIndex(inx)
def focus_search_ctags(self):
for inx in range(self.count()):
page = self.widget(inx)
if hasattr(page, 'le') and hasattr(page.le, 'setFocus'):
self.setCurrentWidget(page)
page.le.setFocus()
break
def get_already_opened_cmd_list(self):
a_cmd_list = []
for inx in range(self.count()):
page = self.widget(inx)
if hasattr(page, 'cmd'):
a_cmd_list.append(page.cmd)
return a_cmd_list
def get_plugin_cmd_list(self):
a_cmd_list = self.get_already_opened_cmd_list()
cmd_list = []
for p in filecontext.fc_plugins:
if not hasattr(p, 'cmd_name'):
continue
cmd_name = p.cmd_name()
if not cmd_name:
continue
if not isinstance(cmd_name, list):
if cmd_name == '':
continue
cmd_name = [ cmd_name ]
for cmd in cmd_name:
if cmd in a_cmd_list:
continue
cmd_list.append((cmd, p))
return cmd_list
def menu_act_triggered_cb(self, act):
act.plugin.run_plugin(self.filename, self, cmd=act.cmd_name)
def mousePressEvent(self, m_ev):
cmd_list = self.get_plugin_cmd_list()
if len(cmd_list) == 0:
return
if (m_ev.button() == Qt.RightButton):
# setup popup menu
pmenu = QMenu()
pmenu.triggered.connect(self.menu_act_triggered_cb)
for (cmd_name, p) in cmd_list:
act = pmenu.addAction(cmd_name)
act.plugin = p
act.cmd_name = cmd_name
pmenu.exec_(QCursor.pos())
pmenu = None
|
# Python3 Program to
# Write a program to accept minutes and convert it into hours and minute
#storing the number of minutes in the variable 'minutes'
minutes = int(input("Enter the total number of minutes: "))
#converting it to hours and minute
minute_left = minutes % 60
hours = minutes // 60
#final output
print(f"time converted is {hours} hours and {minute_left} minutes")
|
import pandas as pd
from neurotin.evamed.parsers import parse_thi
from neurotin.io import read_csv_evamed
from neurotin.psd import (
blocks_count_success,
blocks_difference_between_consecutive_phases,
ratio,
)
from neurotin.psd.viz import plot_joint_clinical_nfb_performance
#%% CLI commands
"""
# Compute PSDs
# -a accepts 'mean' or 'integrate'
# only difference is the scale, variations are identical -> integrate used.
neurotin_psd_avg_band preprocessed/ica/ psds/alpha.pcl -p 57 60 61 63 65 66 68 72 73 -d 4 -o 2 --reject --fmin 8 --fmax 13 -a mean --n_jobs 35
neurotin_psd_avg_band preprocessed/ica/ psds/delta.pcl -p 57 60 61 63 65 66 68 72 73 -d 4 -o 2 --reject --fmin 1 --fmax 4 -a mean --n_jobs 35
# Apply weights and remove outliers (python or IPython console)
import pandas as pd
from neurotin.psd import weights_apply_session_mask, add_average_column, remove_outliers
df = pd.read_pickle('psds/alpha.pcl')
df = weights_apply_session_mask(df, 'data/Participants')
df = add_average_column(df)
df = remove_outliers(df, score=2.)
df.to_pickle('psds/alpha_.pcl', compression=None)
"""
#%% Participants
participants = []
#%% PSDs - Alpha
fname = r""
df_alpha = pd.read_pickle(fname)
diff_alpha = blocks_difference_between_consecutive_phases(
df_alpha, column="avg"
)
#%% PSDs - Delta
fname = r""
df_delta = pd.read_pickle(fname)
diff_delta = blocks_difference_between_consecutive_phases(
df_delta, column="avg"
)
#%% Ratio
df_ratio = ratio(df_alpha, df_delta)
diff_ratio = blocks_difference_between_consecutive_phases(
df_ratio, column="ratio"
)
#%% THI
fname = r""
df = read_csv_evamed(fname)
thi = parse_thi(df, participants)
#%% Plot
df_positives, _ = blocks_count_success(diff_ratio, group_session=True)
plot_joint_clinical_nfb_performance(df_positives, thi, "THI", participants)
|
"""
Author : Sitanshu15
Bot : Nora
"""
import asyncio
import datetime
import difflib
import functools
import io
import math
import os
import json
import random
import re
import textwrap
import time
import typing as t
import unicodedata
from difflib import get_close_matches
from io import BytesIO
import aiohttp
import asyncpg
import discord
import dotenv
import humanize
import paginator
import requests
import wavelink
import wikipedia
import xkcd_wrapper
from asyncdagpi import Client, ImageFeatures
from discord.ext import commands
#from discord.ext.buttons import Paginator
from discord.ext.commands import BucketType, Cog, Greedy
from dotenv import load_dotenv
from jishaku.codeblocks import codeblock_converter
from jishaku.functools import executor_function
from mystbin import Client
from PIL import Image, ImageFilter
from requests.utils import quote
from random_words import RandomWords
from core.utils import Quiz as quiz
class CantBuyMoreThanOne(commands.CommandError):
pass
class CantSellMoreThanOne(commands.CommandError):
pass
class Economy(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.SHARE_THRESHOLD = 175
@commands.command()
async def open(self, ctx):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if accounts:
await ctx.reply(f"You alredy opened an account")
return False
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
@commands.command()
@commands.cooldown(1, 30, commands.BucketType.user)
async def beg(self, ctx):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
val = random.randint(0, 170)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] + val, ctx.author.id)
no_msg = ["No lol", "Imagine begging lol", "I need to buy my air pods", "Be gone thot"]
ppl = ["Zezus", "Niki minaj", "Ed sheran", "The guy you hate", "Taylor swift", "Vyy", "", "A stranger", "The guy you hate", "Mr.Beast", "Your mom", "Kevin Jones", "Pauly D", "Tom hanks", "Tom criuse", "Leanardo DiCaprio", "Pia Mia", "Morgan feeman", "Zach king", "Charlie Damilio", "Addison rae", "Sommer ray", "Faze rug", "Morgz", "Brent", "Jimmin"]
if val == 0:
msg = f"{random.choice(ppl)}: {random.choice(no_msg)}"
if val > 1:
msg = f"{random.choice(ppl)} gave you, **{val}** "
e = discord.Embed(description=f"{msg}", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
@commands.command()
@commands.cooldown(1, 70, commands.BucketType.user)
async def mine(self, ctx):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
pi_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", ctx.author.id, "pickaxe")
ec_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
pickaxe = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", ctx.author.id, "pickaxe")
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
if not pickaxe:
e = discord.Embed(title=f"Item missing", description=f"You need a pickaxe to mine!! ⛏", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if 0 > pi_data["dur"]:
await self.bot.db.execute("DELETE FROM inve WHERE user_id = ($1) AND item = ($2)", ctx.author.id, "pickaxe")
await ctx.reply("You broke your pickaxe! ⛏")
return
else:
ore_amt = random.randint(1, 10)
ore_val = random.randint(125, 150)
dur_minus = random.randint(3, 15)
amt = ore_val * ore_amt
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ec_data["wallet"] + amt, ctx.author.id)
await self.bot.db.execute("UPDATE inve SET dur = $1 WHERE user_id = $2 AND item = $3", pi_data["dur"] - dur_minus, ctx.author.id, "pickaxe")
e = discord.Embed(description=f"⛏ You used **{dur_minus}%** of your pickaxe and found {ore_amt} noranics(Nora ore).\nnoranics was going up for {ore_val} you sold your {ore_amt} and profited {amt} !!")
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
# if await self.bot.db.fetchrow("SELECT econ_index FROM econ WHERE user_id = $1", ctx.author.id) <= self.SHARE_THRESHOLD:
# mm = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
# await self.bot.db.execute("UPDATE econ SET econ_index = $1 WHERE user_id = $2", mm["econ_index"] + 1, ctx.author.id)
@commands.command()
@commands.cooldown(1, 70, commands.BucketType.user)
async def fish(self, ctx):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
fr_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", ctx.author.id, "fishing_rod")
ec_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
fishing_rod = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", ctx.author.id, "fishing_rod")
pet = await self.bot.db.fetch("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
if not fishing_rod:
e = discord.Embed(title=f"Item missing", description=f"You need a fishing rod to fish!! <:fishing_pole:835185055433097329> ", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if random.randint(1, 100) < 100:
f_img = "🐡"
f_amt = random.randint(3, 15)
f_pri = 100
f_nme = "Blow-fish"
if random.randint(1, 100) < 30:
f_img = "🐟"
f_amt = random.randint(3, 8)
f_pri = 500
f_nme = "Blue-fish"
if random.randint(1, 100) < 10:
f_img = "🐠"
f_amt = random.randint(1, 4)
f_pri = 1000
f_nme = "Tropical-fish"
if random.randint(1, 100) == 1:
f_img = "🦈"
f_amt = random.randint(1, 2)
f_pri = 5000
f_nme = "Shark"
else:
grnd_ttl = f_amt * f_pri
if pet:
pet_data = await self.bot.db.fetchrow("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
if pet_data["name"] == "dog":
grnd_ttl = int(f_amt*f_pri+f_amt*f_pri*0.1)
if pet_data["name"] == "cat":
grnd_ttl = int(f_amt*f_pri+f_amt*f_pri*0.2)
if pet_data["name"] == "parrot":
grnd_ttl = int(f_amt*f_pri+f_amt*f_pri*0.3)
if pet_data["name"] == "dragon":
grnd_ttl = int(f_amt*f_pri+f_amt*f_pri*0.4)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ec_data["wallet"] + grnd_ttl, ctx.author.id)
e = discord.Embed(description=f"You casted you line in to the water and found `{f_amt}` of the {f_nme}(s).\nThe {f_nme} was going up in the market for {f_pri} you sold your {f_amt} and profited {grnd_ttl} !! `{f_img}`")
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
# if await self.bot.db.fetchrow("SELECT econ_index FROM econ WHERE user_id = $1", ctx.author.id) <= self.SHARE_THRESHOLD:
# mm = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
# await self.bot.db.execute("UPDATE econ SET econ_index = $1 WHERE user_id = $2", mm["econ_index"] + 1, ctx.author.id)
@commands.command(aliases=["post meme", "pm"])
@commands.cooldown(1, 60, commands.BucketType.user)
async def post_meme(self, ctx):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
lp_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", ctx.author.id, "laptop")
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
laptop = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", ctx.author.id, "laptop")
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
if not laptop:
e = discord.Embed(title=f"Item missing", description=f"You need a laptop to post memes!! <:laptop:835185820230615091> ", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if random.randint(1, 100) < 25:
await self.bot.db.execute("DELETE FROM inve WHERE user_id = ($1) AND item = ($2)", ctx.author.id, "laptop")
e = discord.Embed(title="You broke your laptop!", description="Imagine breaking your laptop, lol.." , color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
amt = random.randint(100, 955)
e = discord.Embed(title=f"Choose a genre of meme", description=f"React for each genre!\n\n`1️⃣`● Dank Meme\n`2️⃣`● Dark Meme\n`3️⃣`● Comic Meme\n`4️⃣`● Intellectual Meme\n`5️⃣`● Reposted Meme\n`6️⃣`● Fresh Meme")
msg = await ctx.send(embed=e)
def check(reaction, user):
return (user == ctx.author) and (str(reaction.emoji) in ["1️⃣", "2️⃣", "3️⃣", "4️⃣", "5️⃣", "6️⃣"])
await msg.add_reaction('1️⃣')
await msg.add_reaction('2️⃣')
await msg.add_reaction('3️⃣')
await msg.add_reaction('4️⃣')
await msg.add_reaction('5️⃣')
await msg.add_reaction('6️⃣')
try:
reaction, _user = await self.bot.wait_for('reaction_add', timeout=60, check=check)
except asyncio.TimeoutError:
error = discord.Embed(description="Reaction timed out.", color=discord.Color.dark_blue())
await msg.edit(content=None, embed=error)
try:
await msg.clear_reactions()
except discord.errors.HTTPException:
pass
else:
if str(reaction.emoji) == "1️⃣":
meme_type = "Dank Meme"
if str(reaction.emoji) == "2️⃣":
meme_type = "Dark Meme"
if str(reaction.emoji) == "3️⃣":
meme_type = "Comic Meme"
if str(reaction.emoji) == "4️⃣":
meme_type = "Intellectual Meme"
if str(reaction.emoji) == "5️⃣":
meme_type = "Reposted Meme"
if str(reaction.emoji) == "6️⃣":
meme_type = "Fresh Meme"
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] + amt, ctx.author.id)
e = discord.Embed(title=f"{meme_type}", description=f"You earned {amt} from ad cents", color=discord.Color.dark_blue())
await msg.delete()
await ctx.send(embed=e)
# if await self.bot.db.fetchrow("SELECT econ_index FROM econ WHERE user_id = $1", ctx.author.id) <= self.SHARE_THRESHOLD:
# mm = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
# await self.bot.db.execute("UPDATE econ SET econ_index = $1 WHERE user_id = $2", mm["econ_index"] + 1, ctx.author.id)
@commands.command()
@commands.cooldown(1, 100, commands.BucketType.user)
async def hunt(self, ctx):
pet = await self.bot.db.fetch("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
ht_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", ctx.author.id, "h_rifle")
ec_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
ht_rf = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", ctx.author.id, "h_rifle")
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
if not ht_rf:
e = discord.Embed(title=f"Item missing", description=f"You need a hunting rifle to hunt!! <:hunting_rifle:835185487542747225> ", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if random.randint(1, 100) < 100:
f_img = "🦨"
f_amt = random.randint(1, 8)
f_pri = 50
f_nme = "Skunk"
if random.randint(1, 100) < 60:
f_img = "🐇"
f_amt = random.randint(1, 8)
f_pri = 100
f_nme = "Rabbit"
if random.randint(1, 100) < 50:
f_img = "🦆"
f_amt = random.randint(1, 8)
f_pri = 300
f_nme = "Duck"
if random.randint(1, 100) < 40:
f_img = "🦌"
f_amt = random.randint(1, 8)
f_pri = 500
f_nme = "Deer"
if random.randint(1, 100) < 30:
f_img = "🐗"
f_amt = random.randint(1, 4)
f_pri = 1000
f_nme = "Boar"
if random.randint(1, 100) < 5:
f_img = "🐲"
f_amt = random.randint(1, 2)
f_pri = 5000
f_nme = "Dragon"
else:
amt = f_amt*f_pri
if pet:
pet_data = await self.bot.db.fetchrow("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
if pet_data["name"] == "parrot":
amt = int(f_amt*f_pri+f_amt*f_pri*0.1)
if pet_data["name"] == "dragon":
amt = int(f_amt*f_pri+f_amt*f_pri*0.2)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ec_data["wallet"] + amt, ctx.author.id)
e = discord.Embed(description=f"You went hunting and found {f_amt} {f_img}{f_nme}(s)\nYou sold each one for {f_pri} and earned a total of {amt} .", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
# if await self.bot.db.fetchrow("SELECT econ_index FROM econ WHERE user_id = $1", ctx.author.id) <= self.SHARE_THRESHOLD:
# mm = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
# await self.bot.db.execute("UPDATE econ SET econ_index = $1 WHERE user_id = $2", mm["econ_index"] + 1, ctx.author.id)
@commands.command(aliases=["bet"])
@commands.cooldown(1, 50, commands.BucketType.user)
async def gamble(self, ctx, amount=None):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
bot_choice = random.randint(1, 10)
u_choice = random.randint(1, 10)
b_amt = int(amount) + random.randint(70, 300)
amt = int(amount) + b_amt
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if amount == "all":
amount = data["wallet"]
if amount == "wallet":
amount = data["wallet"]
if amount == "wallet":
amount_up = data["wallet"]
amount = 0.5*amount_up
if amount == None:
e = discord.Embed(title="Please enter amount", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if int(amount) > data["wallet"]:
e = discord.Embed(description=f"Please enter a valid amount you are trying to gamble more than you have", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if u_choice == int(bot_choice):
e = discord.Embed(title=f"{ctx.author.name}'s gamble", description=f"It was a draw!!", color=discord.Color.dark_blue())
e.add_field(name=f"{ctx.author.name}", value=f"> `{u_choice}`")
e.add_field(name=f"{self.bot.name}", value=f"> `{bot_choice}`")
await ctx.send(embed=e)
return
if bot_choice < int(u_choice):
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] + amt, ctx.author.id)
e = discord.Embed(title=f"{ctx.author.name}'s gamble", description=f"You won {amt} !", color=discord.Color.dark_blue())
e.add_field(name=f"{ctx.author.name}", value=f"> `{u_choice}`")
e.add_field(name=f"Nora", value=f"> `{bot_choice}`")
await ctx.send(embed=e)
return
if u_choice < int(bot_choice):
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] - amt, ctx.author.id)
e = discord.Embed(title=f"{ctx.author.name}'s gamble", description=f"You lost {amt} !", color=discord.Color.dark_blue())
e.add_field(name=f"{ctx.author.name}", value=f"> `{u_choice}`")
e.add_field(name=f"Nora", value=f"> `{bot_choice}`")
await ctx.send(embed=e)
return
# if await self.bot.db.fetchrow("SELECT econ_index FROM econ WHERE user_id = $1", ctx.author.id) <= self.SHARE_THRESHOLD:
# mm = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
# await self.bot.db.execute("UPDATE econ SET econ_index = $1 WHERE user_id = $2", mm["econ_index"] + 1, ctx.author.id)
@commands.command(aliases=["bal"])
async def balance(self, ctx, *, member: discord.Member = None):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
member = member or ctx.author
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", member.id)
if not accounts:
await ctx.send(f"{member} has no balance")
return False
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
wal_amt = data["wallet"]
bank_amt = data["bank"]
e = discord.Embed(title=f"{member}'s balance", description=f"Wallet: {wal_amt} \nBank: {bank_amt} ", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
e.set_footer(text=f"What a scrub")
await ctx.send(embed=e)
@commands.command(aliases=["dep"])
async def deposit(self, ctx, amount=None):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if amount == "all":
amount = data["wallet"]
if amount == "max":
amount = data["wallet"]
if amount == "half":
amount_up = data["wallet"]
amount = 0.5*amount_up
if amount == None:
e = discord.Embed(title="Please enter amount", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if int(amount) > data["wallet"]:
e = discord.Embed(description=f"Please enter a valid amount you are trying to deposit more than you have", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
else:
walt = data["wallet"] - int(amount)
bnk = data["bank"] + int(amount)
await self.bot.db.execute("UPDATE econ SET wallet = $1, bank = $2 WHERE user_id = $3",walt , bnk, ctx.author.id)
e = discord.Embed(title=f"Deposit success", description=f"I have deposited **{amount}** to your bank" , color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
@commands.command(aliases=["with", "wd"])
async def withdraw(self, ctx, amount=None):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if amount == "all":
amount = data["bank"]
if amount == "max":
amount = data["bank"]
if amount == "half":
amount_up = data["bank"]
amount = 0.5*amount_up
if amount == None:
e = discord.Embed(title="Please enter amount", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if int(amount) > data["bank"]:
e = discord.Embed(description=f"Please enter a valid amount you are trying to withdraw more than you have", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
else:
walt = data["wallet"] + int(amount)
bnk = data["bank"] - int(amount)
await self.bot.db.execute("UPDATE econ SET wallet = $1, bank = $2 WHERE user_id = $3",walt , bnk, ctx.author.id)
e = discord.Embed(title=f"Withdrawal success", description=f"I have withdrew **{amount}** from your bank to your pocket" , color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
@commands.command(aliases=["rob"])
@commands.cooldown(1, 35, commands.BucketType.user)
async def steal(self, ctx, member: discord.Member):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
m_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
if not m_data:
await ctx.send(f"{member.name} has no balance!")
return
if 100 > ctx_data["wallet"]:
l = ctx_data["wallet"]
e = discord.Embed(title=f"You need at least 100", description=f"You will need 100 to pay the fine if you get caught of course" ,color=discord.Color.dark_blue())
e.set_footer(text=f"Your wallet's balance is {l} ")
await ctx.send(embed=e)
return
if 100 > m_data["wallet"]:
l = m_data["wallet"]
e = discord.Embed(title=f"{member} is broke", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
e.set_footer(text=f"Their wallet's balance is {l} ")
await ctx.send(embed=e)
return
if random.randint(1, 100) < 20:
fine_nmb = random.randint(60, 100)
e = discord.Embed(description=f"You try to rob {member}, but the police see you and let you go with a {fine_nmb} fine and a warning", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
else:
member_m = m_data["wallet"]*0.3
amount = random.randint(100, member_m)
m_walt = member_m - amount
ctx_walt = amount
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2",m_walt , member.id)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2",ctx_walt , ctx.author.id)
e = discord.Embed(title=f"You robbed {member}", description=f"You robbed them for {amount} now they have got {m_walt} ", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
@commands.command(aliases=["give", "send"])
async def share(self, ctx, member: discord.Member, perms=None, amount=None):
if perms == "--sudo":
if ctx.author.id == 814030950597132321 or 728260210464129075:
ff = int(amount)
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", member.id)
if not accounts:
await ctx.send("They have no account")
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] + ff, member.id)
await ctx.reply(f"Added {amount} to {member}'s balance")
else:
await ctx.send("You cant run this command with sudo permissions. Only my developers are eligible to run this command wuth sudo permissions")
if not perms:
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", ctx.author.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
if not await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", member.id):
e = discord.Embed(description=f"{member} does not have an account", color=discord.color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
if data["econ_index"] >= self.SHARE_THRESHOLD:
if amount == "all":
amount = data["wallet"]
if amount == "max":
amount = data["wallet"]
if amount == "half":
amount_up = data["wallet"]
amount = 0.5*amount_up
if amount == None:
e = discord.Embed(title="Please enter amount", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if int(amount) > data["wallet"]:
e = discord.Embed(description=f"Please enter a valid amount you are trying to give {member} more than you have", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
member_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
if member_data:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] - amount, ctx.author.id)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] + amount, member.id)
e = discord.Embed(description=f"I have sent {amount} to {member}!", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
else:
e = discord.Embed(title="You are a normie!", description="You cant share money with other people yet, you are still a noob!(In all seriouness, this is a method of stopping alt accounts from sending money to a main account.)", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
if perms != "--sudo":
await ctx.reply(f"{perms} is not a valid permission flag.")
#BUY AND SELL
@commands.command()
async def buy(self, ctx, item, amount=1):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
amt = int(amount)
if item.lower() == "pickaxe":
if amt > 1:
raise CantBuyMoreThanOne
else:
await self.buy_pick(ctx, ctx.author)
return
if item.lower() == "pick":
if amt > 1:
raise CantBuyMoreThanOne
else:
await self.buy_pick(ctx, ctx.author)
return
if item.lower() == "fishing_rod":
if amt > 1:
raise CantBuyMoreThanOne
else:
await self.buy_fr(ctx, ctx.author)
return
if item.lower() == "fr":
if amt > 1:
raise CantBuyMoreThanOne
else:
await self.buy_fr(ctx, ctx.author)
return
if item.lower() == "lp":
if amt > 1:
raise CantBuyMoreThanOne
else:
await self.buy_lp(ctx, ctx.author)
return
if item.lower() == "hr":
if amt > 1:
raise CantBuyMoreThanOne
else:
await self.buy_h_r(ctx, ctx.author)
return
if item.lower() == "guitar":
if amt > 1:
raise CantBuyMoreThanOne
else:
await self.buy_guitar(ctx, ctx.author)
return
if item.lower() == "drum":
if amt > 1:
raise CantBuyMoreThanOne
else:
await self.buy_drums(ctx, ctx.author)
return
if item.lower() == "alcohol":
await self.buy_alc(ctx, ctx.author, amount)
return
if item.lower() == "life_saver":
await self.buy_ls(ctx, ctx.author, amount)
return
if item.lower() == "ls":
await self.buy_ls(ctx, ctx.author, amount)
return
if item.lower() == "noramedal":
await self.buy_nrmdl(ctx, ctx.author, amount)
return
if item.lower() == "noratrophy":
await self.buy_nr_trophy(ctx, ctx.author, amount)
return
if item.lower() == "lotterytk":
await self.buy_lotterytk(ctx, ctx.author)
return
@buy.error
async def buy_error(self, ctx, exc):
if isinstance(exc, CantBuyMoreThanOne):
await ctx.send("`CantBuyMoreThanOne` **:** You cant buy more than one of that item!")
@commands.command()
async def sell(self, ctx, item, amount=1):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
amt = int(amount)
if item.lower() == "pickaxe":
if amt > 1:
raise CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "pickaxe", 7500)
if item.lower() == "fishing_rod":
if amt > 1:
raise CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "fishing_rod", 8500)
if item.lower() == "laptop":
if amt > 1:
raise CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "laptop", 10000)
if item.lower() == "hr":
if amt > 1:
raise CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "h_rifle", 50000)
if item.lower() == "guitar":
if amt > 1:
raise CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "guitar", 75000)
if item.lower() == "drum":
if amt > 1:
raise CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "drum", 100000)
if item.lower() == "lotterytk":
if amt > 1:
raise CantSellMoreThanOne
else:
await self.sell_1(ctx, ctx.author, "ltk", 2500)
if item.lower() == "alcohol":
await self.sell_inr(ctx, ctx.author, "alcohol", 8500, amt)
return
if item.lower() == "life_saver":
await self.sell_inr(ctx, ctx.author, "life_saver", 25000, amt)
return
if item.lower() == "Noramedal":
await self.sell_inr(ctx, ctx.author, "nr_medal", 10000000, amt)
return
if item.lower() == "Noratrophy":
await self.sell_inr(ctx, ctx.author, "nr_trophy", 50000000, amt)
return
if item.lower() == "lotterytk":
await self.sell_inr(ctx, ctx.author, "ltk", 2500, amt)
return
@commands.command(aliases=["store"])
async def shop(self, ctx, page=None):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
if page == None:
page = "1"
if page == "1":
e = discord.Embed(title="Welcome to the Nora store", description="Use `nr.buy <itemname>` to buy something. The `<itemname>` must match the given `key` or some other secret keys!. Your `[itemamount]` should also follow the given `limit`", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.add_field(name="<:lottery_ticket:847447419498790952> **Lottery Ticket** — 〄 2,500", value="Key: `lotterytk`\nDescription: Buy this and get a higher chance of winning the lottery, nr.lottery for inro on te next, comming or present lottery. Join the support server for 15% more!(Note)\nUsage: `None - Read description`\nLimit: `1`", inline=False)
e.add_field(name="<a:pickaxe:836163392796229642> **Pickaxe** — 〄 7,500", value="Key: `pick`\nDescription: Go mining for epic noranics ore!\nUsage: `nr.mine`\nLimit: `1`", inline=False)
e.add_field(name="<:fishing_pole:835185055433097329> **Fishing Rod** — 〄 8,500", value="Key: `fishing_rod`\nDescription: Go out to your nearest lake chill, fish and sell them for sweet money!\nUsage: `nr.fish`\nLimit: `1`", inline=False)
e.add_field(name="<:alcohol:836203535728771092> **Alcohol** — 〄 8,500", value="Key: `alcohol`\nDescription: Drink it and you might get lucky, just maybe\nUsage: `nr.use alcohol`\nLimit: `None`", inline=False)
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
e.set_footer(text="Page Index 1/3")
await ctx.send(embed=e)
return
if page == "2":
e = discord.Embed(title="Welcome to the Nora store", description="Use `nr.buy <itemname>` to buy something. The `<itemname>` must match the given `key` or some other secret keys!. Your `[itemamount]` should also follow the given `limit`", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.add_field(name="<:laptop:835185820230615091> **Laptop** — 〄 10,000", value="Key: `lp`\nDescription: Post meme's for the ad cents.\nUsage: `nr.pm`\nLimit: `1`", inline=False)
e.add_field(name="<:lifesaver:836562944950796309> **Life Saver** — 〄 25,000", value="Key: `life_saver`\nDescription: If a life saver is in your inventory at the time of death, this item will be consumed and prevent you from dying! You will keep your coins and items.\nUsage: `None - Read description`\nLimit: `None`", inline=False)
e.add_field(name="<:hunting_rifle:835185487542747225> **Hunting Rifle** — 〄 50,000", value="Key: `hr`\nDescription: Go and hunt for animals!\nUsage: `nr.hunt`\nLimit: `1`", inline=False)
e.add_field(name="<:classical_guitar:836802689635450880> **Guitar** — 〄 75,000", value="Key: `guitar`\nDescription: Go busking(term for: Street performance)\nUsage: `nr.use guitar`\nLimit: `1`", inline=False)
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
e.set_footer(text="Page Index 2/3")
await ctx.send(embed=e)
return
if page == "3":
e = discord.Embed(title="Welcome to the Nora store", description="Use `nr.buy <itemname>` to buy something. The `<itemname>` must match the given `key` or some other secret keys!. Your `[itemamount]` should also follow the given `limit`", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.add_field(name="<:drums:836815664189669427> **Drums** — 〄 100,000", value="Key: `drum`\nDescription: Perform in the public!\nUsage: `nr.use drum`\nLimit: `1`", inline=False)
e.add_field(name="<:noramedal:836832817307844618> **Nora Medal** — 〄 10,000,000", value="Key: `noramedal`\nDescription: A medal only the top 1% of players have!\nUsage: `Show-off`\nLimit: `None`", inline=False)
e.add_field(name="<:noratrophy:836834560556662784> **Nora Trophy** — 〄 50,000,000", value="Key: `noratrophy`\nDescription: Literally only the richest of the richest of the richest of the richest of the richest of the rich will hold these beloved trophies.\nUsage: `None - Read description`\nLimit: `None`", inline=False)
e.set_footer(text="Page Index 3/3")
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
@commands.command(aliases=["inv"])
async def inventory(self, ctx, *, member: discord.Member = None):
bl_users = await self.bot.db.fetch("SELECT * FROM bl WHERE user_id = $1", ctx.author.id)
if bl_users:
e = discord.Embed(title="You cant use that command", description="You are blacklisted please join the support sever for further assistance", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
member = member or ctx.author
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", member.id)
inve = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", member.id)
if not accounts:
await ctx.send(f"{member} has no balance")
return
if not inve:
e = discord.Embed(title=f"{member} has no inventory", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
return
ltk = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "ltk")
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "fishing_rod")
hr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "h_rifle")
lp = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "laptop")
pick = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "pickaxe")
drums = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "drum")
guitar = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "guitar")
alcohol = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
alcohol_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
life_saver = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
lifesaver_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
Nora_medal = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_medal")
Nora_medal_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_medal")
Nora_trophy = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_trophy")
Nora_trophy_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_trophy")
e = discord.Embed(title=f"{member}'s inventory", timestamp=datetime.datetime.utcnow(), color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
if ltk:
ltk_amt = Nora_trophy_data["amt"]
e.add_field(name="<:lottery_ticket:847447419498790952> Lottery Ticket", value=f"Amount: 1")
if fr:
e.add_field(name="<:fishing_pole:835185055433097329> Fishing Rod", value="Amount: 1")
if hr:
e.add_field(name="<:hunting_rifle:835185487542747225> Hunting Rifle", value="Amount: 1")
if lp:
e.add_field(name="<:laptop:835185820230615091> Laptop", value="Amount: 1")
if pick:
e.add_field(name="<a:pickaxe:836163392796229642> Pickaxe", value="Amount: 1")
if drums:
e.add_field(name="<:drums:836815664189669427> Drums", value="Amount: 1")
if guitar:
e.add_field(name="<:classical_guitar:836802689635450880> Guitar", value="Amount: 1")
if alcohol and alcohol_data["amt"] > 0:
alcohol_amt = alcohol_data["amt"]
e.add_field(name="<:alcohol:836203535728771092> Alcohol", value=f"Amount: {alcohol_amt}")
if life_saver and lifesaver_data["amt"] > 0:
life_saver_amt = lifesaver_data["amt"]
e.add_field(name="<:lifesaver:836562944950796309> Life Saver", value=f"Amount: {life_saver_amt}")
if Nora_medal and Nora_medal_data["amt"] > 0:
Nora_medal_amt = Nora_medal_data["amt"]
e.add_field(name="<:noramedal:836832817307844618> Nora Medal", value=f"Amount: {Nora_medal_amt}")
if Nora_trophy and Nora_trophy_data["amt"] > 0:
Nora_trophy_amt = Nora_trophy_data["amt"]
e.add_field(name="<:noratrophy:836834560556662784> Nora Trophy", value=f"Amount: {Nora_trophy_amt}")
await ctx.send(embed=e)
@commands.command()
async def petshop(self, ctx):
e = discord.Embed(title="PET SHOP", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
e.add_field(name="<:spotted_dog:839739554886058004> Dog — 50,000", value='Key: `dog`\nAdvantages: + 10% while fishing', inline=False)
e.add_field(name="<:brown_cat:839739869692428288> Cat — 75,000", value='Key: `cat`\nAdvantages: + 20% while fishing', inline=False)
e.add_field(name="<:blue_yellow_orange_parrot:839740445574692884> Parrot — 100,000", value='Key: `parrot`\nAdvantages: + 30% while fishing and +10% while hunting', inline=False)
e.add_field(name="<:red_yellow_dragon:839742030930509827> Dragon — 5,000,000", value='Key: `dragon`\nAdvantages: + 40% while fishing and +20% while hunting', inline=False)
await ctx.send(embed=e)
@commands.command()
@commands.cooldown(1, 60, commands.BucketType.user)
async def busk(self, ctx, item_to_busk):
if item_to_busk.lower() == "drum":
await self.busk_drum(ctx, ctx.author)
elif item_to_busk.lower() == "guitar":
await self.busk_guitar(ctx, ctx.author)
@command.command()
@commands.cooldown(1, 40, commands.BucketType)
async def drink(self, ctx, item_to_drink):
if item_to_drink.lower() == "alcohol":
await self.drink_alcohol(ctx, ctx.author)
else:
await ctx.send("That item does not exist")
@commands.command()
async def pet(self, ctx):
pet = await self.bot.db.fetch("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
if not pet:
await ctx.send("You dont have a pet to check the shop type `nr.petshop`")
return
if pet:
pet_data = await self.bot.db.fetchrow("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
pet_name = pet_data["name"]
pet_nick = pet_data["nick"]
e = discord.Embed(title=f"{ctx.author.name}'s Pet", description=f"Your pet is a {pet_name}\nNickname: {pet_nick}", color=discord.Color.dark_blue())
e.set_author(name=f"{ctx.author}", icon_url=ctx.author.avatar.url)
await ctx.send(embed=e)
@commands.command()
async def petnick(self, ctx, *, nick):
pet = await self.bot.db.fetch("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
if not pet:
await ctx.send("You dont have a pet to check the shop type `nr.petshop`")
return
if pet:
await self.bot.db.execute("UPDATE pets SET nick = $1 WHERE user_id = $2", nick, ctx.author.id)
await ctx.reply(f"Your pet's nickname is now {nick}")
return
else:
await ctx.send("There was an error in command petnick: Something went wrong.")
@commands.command()
async def petdisown(self, ctx):
pet = await self.bot.db.fetch("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
if not pet:
await ctx.send("You dont have a pet to check the shop type `nr.petshop`")
return
if pet:
pet_data = await self.bot.db.fetchrow("SELECT * FROM pets WHERE user_id = $1", ctx.author.id)
pt_name = pet_data["name"]
pt_nick = pet_data["nick"]
await ctx.reply(f"You disowned {pt_name}({pt_nick})")
await self.bot.db.execute("DELETE FROM pets WHERE user_id = $1", ctx.author.id)
return
else:
await ctx.send("There was an error in command petdisown: Something went wrong.")
@commands.command()
async def buypet(self, ctx, *, pet_name):
if pet_name.lower() == "dog":
await self.buy_pet(ctx, ctx.author, "dog", 50000)
return
if pet_name.lower() == "cat":
await self.buy_pet(ctx, ctx.author, "cat", 75000)
return
if pet_name.lower() == "parrot":
await self.buy_pet(ctx, ctx.author, "parrot", 100000)
return
if pet_name.lower() == "dragon":
await self.buy_pet(ctx, ctx.author, "dragon", 5000000)
return
else:
await ctx.send("That is not a valid pet please check the pet shop again!")
@commands.command()
@commands.is_owner()
async def picklotterywinner(self, ctx):
members = await self.bot.db.fetch("SELECT user_id FROM inve WHERE item = $1", "ltk")
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", ctx.author.id)
ltwinner_up = str(random.choice(members))
ltwinner_up1 = ltwinner_up.replace('>','')
ltwinner = ltwinner_up1.replace('<Record user_id=','')
await ctx.send(ltwinner)
# <-FUNCTIONS->
async def sell_1(self, ctx, member, item, item_price):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
it = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, item)
value = item_price*0.5
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if not it:
e = discord.Embed(title="You cant sell that", description=f"You cant sell the {item} you dont even have it!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
ctx.reply(embed=e)
return
else:
await self.bot.db.execute("DELETE FROM inve WHERE user_id = ($1) AND item = ($2)", member.id, item)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] + value, member.id)
e = discord.Embed(title=f"Item sold", description=f"You sold your {item} for 50% of its value at {value} ", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def sell_inr(self, ctx, member, item, item_price, item_amount):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
it = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, item)
it_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, item)
value1 = item_price*item_amount
value = value1*0.5
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if not it:
e = discord.Embed(title="You cant sell that", description=f"You cant sell the {item} you dont even have it!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
ctx.reply(embed=e)
return
if it_data["amt"] == 0:
await ctx.reply("You dont have any of that item")
return
if item_amount > it_data["amt"]:
await ctx.reply(f"You dont have {item_amount} of {item}.")
return
if item_amount == 1:
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", it_data["amt"] - 1, member.id, item)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] + value, member.id)
e = discord.Embed(title=f"Item sold", description=f"You sold {item_amount} {item}(s) for 50% of its value at {value} ", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if item_amount > 1:
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", it_data["amt"] - item_amount, member.id, item)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] + value, member.id)
e = discord.Embed(title=f"Item sold", description=f"You sold {item_amount} {item}(s) for 50% of its value at {value} ", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_lotterytk(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
lottery_ticket = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "ltk")
value = 7500
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url= member.avatar.url)
await ctx.send(embed=e)
if lottery_ticket:
e = discord.Embed(title=f"You alredy have a lottery ticket, Leave some fun", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n2500 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, dur, amt) VALUES($1, $2, $3, $4)", member.id, "ltk", 500, 1)
e = discord.Embed(title=f"Bought Item", description=f'Good luck on the lottery check inro on the lottery by running `nr.lottery`', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_pick(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
pickaxe = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "pickaxe")
value = 7500
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url= member.avatar.url)
await ctx.send(embed=e)
if pickaxe:
e = discord.Embed(title=f"You alredy have a pickaxe", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n7500 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, dur, amt) VALUES($1, $2, $3, $4)", member.id, "pickaxe", 500, 1)
e = discord.Embed(title=f"Bought Item", description=f'You bought a pick axe run the `nr.mine` command to make use of it', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_fr(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "fishing_rod")
value = 8500
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if fr:
e = discord.Embed(title=f"You alredy have a fishing rod!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n8500 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "fishing_rod", 1)
e = discord.Embed(title=f"Bought Item", description=f'You bought a fishing rod run the `nr.fish` command to make use of it', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_lp(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "laptop")
value = 10000
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if fr:
e = discord.Embed(title=f"You alredy have a laptop!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n10000 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "laptop", 1)
e = discord.Embed(title=f"Bought Item", description=f'You bought a laptop run the `nr.pm` command to make use of it', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_h_r(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "h_rifle")
value = 50000
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if fr:
e = discord.Embed(title=f"You alredy have a hunting rifle!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n50000 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "h_rifle", 1)
e = discord.Embed(title=f"Bought Item", description=f'You bought a hunting rifle run the `nr.hunt` command to make use of it', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_alc(self, ctx, member, amount):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
fr_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
value = 8500*amount
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n{value} is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
if not fr_data:
amt = int(amount)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "alcohol", amt)
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` alcohol run the `nr.use alcohol` command to make use of it!', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if fr_data:
amt = int(amount) + fr_data['amt']
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", amt, ctx.author.id, "alcohol")
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought some alcohol run the `nr.use alcohol` command to make use of it!', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
async def buy_ls(self, ctx, member, amount):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
ls = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
ls_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
value = 25000*amount
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n{value} is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
if not ls_data:
amt = int(amount)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "life_saver", amt)
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` life saver(s) the next time you die your life will be saved', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if ls_data:
amt = int(amount) + ls_data['amt']
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", amt, ctx.author.id, "life_saver")
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` life saver(s) the next time you die your life will be saved', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
async def buy_drums(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "drum")
value = 100000
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if fr:
e = discord.Embed(title=f"You alredy have a drum!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n100000 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "drum", 1)
e = discord.Embed(title=f"Bought Item", description=f'You bought a drum run the `nr.use drum` command to make use of it', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_guitar(self, ctx, member):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
fr = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "guitar")
value = 75000
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if fr:
e = discord.Embed(title=f"You alredy have a drum!", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n75000 is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "guitar", 1)
e = discord.Embed(title=f"Bought Item", description=f'You bought a guitar run the `nr.use guitar` command to make use of it', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
async def buy_nrmdl(self, ctx, member, amount):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
ls = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_medal")
ls_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_medal")
value = 10000000*amount
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n{value} is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
if not ls_data:
amt = int(amount)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "nr_medal", amt)
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` Nora medal(s) welcome to the money gang', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if ls_data:
amt = int(amount) + ls_data['amt']
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", amt, ctx.author.id, "nr_medal")
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` Nora medal(s) welcome to the money gang', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
async def buy_nr_trophy(self, ctx, member, amount):
ctx_data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
ls = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_trophy")
ls_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "nr_trophy")
value = int(50000000*amount)
if not ctx_data:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if value > ctx_data["wallet"]:
e = discord.Embed(description=f"You are trying to buy an item costing more money than you have\n{value} is the price!", color=discord.Color.dark_blue())
await ctx.send(embed=e)
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", ctx_data["wallet"] - value, member.id)
if not ls_data:
amt = int(amount)
await self.bot.db.execute("INSERT INTO inve(user_id, item, amt) VALUES($1, $2, $3)", member.id, "nr_trophy", amt)
e = discord.Embed(title=f"Bought Item(s)", description=f'You bought `{amount}` Nora trophy/trophies welcome to the elite gang', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
if ls_data:
amt = int(amount) + ls_data['amt']
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", amt, ctx.author.id, "nr_trophy")
'You bought `{amount}` Nora trophy/trophies welcome to the elite gang', color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
return
async def player_kill(self, ctx, member, item, msg):
ls = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
ls_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "life_saver")
if not ls:
await self.bot.db.execute("DELETE FROM inve WHERE user_id = $1", member.id)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", 0, member.id)
await ctx.send(f"You died, {msg}")
if ls:
if ls_data["amt"] == 1:
await ctx.send(f"You almost died {item} but you were saved by your life saver!")
await self.bot.db.execute("DELETE FROM inve WHERE user_id = ($1) AND item = ($2)", member.id, "life_saver")
return
if 1 < ls_data["amt"]:
await ctx.send(f"You almost died {item} but you were saved by your life saver!")
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", ls_data["amt"] - 1, member.id, "life_saver")
return
async def drink_alcohol(self, ctx, member):
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
alcohol = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
a_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "alcohol")
if not alcohol:
await ctx.send("You have no alcohol to drink..")
return
if a_data["amt"] == 1:
await self.bot.db.execute("DELETE FROM inve WHERE user_id = ($1) AND item = ($2)", ctx.author.id, "alcohol")
if 1 < a_data["amt"]:
await self.bot.db.execute("UPDATE inve SET amt = $1 WHERE user_id = $2 AND item = $3", a_data["amt"] - 1, ctx.author.id, "alcohol")
if random.randint(1, 100) < 20:
amt = random.randint(250, 300)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] + amt, ctx.author.id)
await ctx.send(f"You got some new friends and they gave you {amt} !")
return
if random.randint(1, 100) < 30:
await self.player_kill(ctx ,ctx.author, 'drinking alcohol','You fell into the river on alcohol and drowned, All your walllet money was lost!')
return
if random.randint(1, 100) < 100 and data["wallet"] > 100:
walt_1 = data["wallet"]
walt_2 = 0.5*walt_1
amt = random.randint(int(walt_2), walt_1)
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] - amt, ctx.author.id)
await ctx.reply(f"You got way to drunk and a random guy robbed you of {amt}<a:coinor:880004542161354782>", )
return
else:
await ctx.send("You drank alcohol had a good time and went home, nothing new.")
async def busk_guitar(self, ctx, member):
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
gui = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "guitar")
g_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "guitar")
val = random.randint(100, 1000)
if not gui:
await ctx.send("You dont have a guitar to go busking with")
return
if random.randint(1, 100) < 20:
await self.bot.db.execute("DELETE FROM inve WHERE user_id = ($1) AND item = ($2)", ctx.author.id, "guitar")
await ctx.send("You dropped your guitar and it got ran over by a car , poor you.")
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] + val, ctx.author.id)
await ctx.send(f"You got {val}<a:coinor:880004542161354782> by strumming your guitar. You will do anything for money , people these days..")
return
async def busk_drum(self, ctx, member):
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
gui = await self.bot.db.fetch("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "drum")
g_data = await self.bot.db.fetchrow("SELECT * FROM inve WHERE user_id = $1 AND item = $2", member.id, "drum")
val = random.randint(100, 1000)
if not gui:
await ctx.send("You dont have a drum to go busking with")
return
if random.randint(1, 100) < 20:
await self.bot.db.execute("DELETE FROM inve WHERE user_id = ($1) AND item = ($2)", ctx.author.id, "drum")
await ctx.send("Your drum got ran over by a car , poor you.")
return
else:
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] + val, ctx.author.id)
await ctx.send(f"You got {val}<a:coinor:880004542161354782> by playing your drum. You will do anything for money , people these days..")
return
async def buy_pet(self, ctx, member, pet_name, pet_price):
accounts = await self.bot.db.fetch("SELECT * FROM econ WHERE user_id = $1", member.id)
data = await self.bot.db.fetchrow("SELECT * FROM econ WHERE user_id = $1", member.id)
pet = await self.bot.db.fetch("SELECT * FROM pets WHERE user_id = $1", member.id)
if not accounts:
await self.bot.db.execute("INSERT INTO econ(user_id, bank, wallet) VALUES($1, 250, 125)", member.id)
e = discord.Embed(title="Ayo, i've opened a account for you!", description="Go ahead and run the beg command to get started", color=discord.Color.dark_blue())
e.set_author(name=f"{member}", icon_url=member.avatar.url)
await ctx.send(embed=e)
if pet:
pet_data = await self.bot.db.fetchrow("SELECT * FROM pets WHERE user_id = $1", member.id)
pt_name = pet_data["name"]
pt_nick = pet_data["nick"]
await ctx.send(f"You alredy have a pet don't you remember {pt_name}({pt_nick})?, dont tell it this but you can disown it using `nr.disownpet`")
return
if pet_price > data["wallet"]:
walt = data["wallet"]
await ctx.send(f"The {pet_name} you are trying to buy is is {pet_price} but you only have {walt} ")
return
else:
await self.bot.db.execute("INSERT INTO pets(user_id, name, nick) VALUES($1, $2, $3)", member.id, pet_name, "No nick")
await self.bot.db.execute("UPDATE econ SET wallet = $1 WHERE user_id = $2", data["wallet"] - pet_price, member.id)
await ctx.send(f"Congrats!! You adopted a {pet_name} for {pet_price}<a:coinor:880004542161354782>. If you want to change your pet's name type `nr.petnick <NickName>`")
def setup(bot):
bot.add_cog(Economy(bot)) |
from flask_wtf.form import FlaskForm
from flask_wtf.file import FileAllowed, FileRequired, FileField
from wtforms import PasswordField, SubmitField, TextAreaField, StringField, BooleanField
from wtforms.validators import DataRequired, Email, EqualTo, Length, Regexp, ValidationError
from models import Article, User
import datetime
from captcha import getCaptcha
class LoginForm(FlaskForm):
# openid=StringField('openid',validators=[DataRequired()])#Datarequired()确保字段中有数据
remember_me = BooleanField('Remember me')
nickname = StringField('Nickname', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Sign in')
class RegistrationForm(FlaskForm):
nickname = StringField('Nickname', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password1', validators=[DataRequired()])
password2 = PasswordField('Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Submit')
def validate_nickname(self, nickname):
user = User.query.filter_by(nickname=nickname.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class UploadForm(FlaskForm):
title = StringField("Title", validators=[DataRequired(), Length(min=1, max=50)])
author = StringField("Author", validators=[DataRequired()])
subject = StringField("Subjects", validators=[DataRequired()], render_kw={'placeholder': 'Split subjects by space'})
highlight = TextAreaField("Highlight")
email = StringField("Email", validators=[DataRequired(), Email(message="email error")],
render_kw={'placeholder': 'Email'})
file = FileField("File(img)", validators=[FileRequired(), FileAllowed(['pdf','png','jpg'])])
submit = SubmitField("Submit", render_kw={'class': 'btn btn-primary'})
def to_Article(self):
return Article(title=self.title.data, author=self.author.data, highlight=self.highlight.data,
subject=self.subject.data, date=datetime.datetime.now(), email=self.email.data)
class CommentForm(FlaskForm):
email = StringField("Email(You should activate your email beform you comment)",
validators=[DataRequired(), Email(message="email error")],
render_kw={'placeholder': 'Email'})
comment = TextAreaField("Comment",
validators=[DataRequired(), Length(min=5, message='At least 5 letters!')])
submit = SubmitField("Submit", render_kw={'class': 'btn btn-primary'})
class SearchArticleForm(FlaskForm):
content = StringField("Search:", render_kw={"class": "form-control"}, validators=[DataRequired()])
submit = SubmitField("Submit", render_kw={'class': 'btn btn-primary'})
class EmailValidateForm(FlaskForm):
email = StringField("Email", validators=[DataRequired(), Email(message="email error")],
render_kw={'placeholder': 'Email'})
submit = SubmitField("Submit", render_kw={'class': 'btn btn-primary'})
|
# contains implementations for DFU methods. This allows for unit testing.
# hmm, class or functions, class or functions... let's go with functions for now.
# If we wind up having to pass in a ton of crap every time try an OO approach.
# TODO move more method implementations here - or in other files - as needed.
# TODO mypy this sucker up
import copy
import time
from installed_clients.baseclient import ServerError as WorkspaceError
def log(message, prefix_newline=False):
print(('\n' if prefix_newline else '') + str(time.time()) + ': ' + str(message))
# Copies of this in AssemblyUtil and GenomeFileUtil may be redundant since we're sorting here
# Note that insertion order is only maintained since python 3.6 and is only guaranteed since 3.7
# In 3.6 it was an implementation detail.
# As of 2020/12/12 Python 3.6 is provided in the kbase/sdkbase2 image
def _sort_dict(in_struct):
"""
Recursively sort a dictionary by dictionary keys.
"""
if isinstance(in_struct, dict):
return {k: _sort_dict(in_struct[k]) for k in sorted(in_struct)}
elif isinstance(in_struct, list):
return [_sort_dict(k) for k in in_struct]
else:
return in_struct
def save_objects(ws, params, prov):
# TODO unit tests
objs = params.get('objects')
if not objs:
raise ValueError('Required parameter objects missing')
wsid = params.get('id')
if not wsid:
raise ValueError('Required parameter id missing')
objs_to_save = []
for o in objs:
obj_to_save = {}
prov_to_save = prov
if 'extra_provenance_input_refs' in o:
# need to make a copy so we don't clobber other objects
prov_to_save = copy.deepcopy(prov)
extra_input_refs = o['extra_provenance_input_refs']
if extra_input_refs:
if len(prov) > 0:
if 'input_ws_objects' in prov[0]:
prov_to_save[0]['input_ws_objects'].extend(extra_input_refs)
else:
prov_to_save[0]['input_ws_objects'] = extra_input_refs
else:
prov_to_save = [{'input_ws_objects': extra_input_refs}]
keys = ['type', 'name', 'objid', 'meta', 'hidden']
for k in keys:
if k in o:
obj_to_save[k] = o[k]
"""
Sorting the data is important for 2 reasons:
1) It prevents the workspace from rejecting the save because the sort takes too much memory.
The workspace puts limits on memory use because it has to service many apps / UIs / etc.
at once.
2) It distributes the sort across the app worker nodes rather than concentrating them on
the workspace node.
"""
# TODO sort in WSLargeDataIO as well
if 'data' in o:
obj_to_save['data'] = _sort_dict(o['data'])
obj_to_save['provenance'] = prov_to_save
objs_to_save.append(obj_to_save)
try:
return ws.save_objects({'id': wsid, 'objects': objs_to_save})
except WorkspaceError as e:
log('Logging workspace error on save_objects: {}\n{}'.format(e.message, e.data))
raise
|
#! /usr/bin/env python
# coding: utf-8
import os
import uuid
import hashlib
from werkzeug.utils import secure_filename
from flask import Flask, Blueprint, send_from_directory, request, jsonify, url_for, make_response
from flask_helper.utils.folder import create_folder2
__author__ = '鹛桑够'
def support_upload(app_or_blue, upload_route="upload", get_route="file", static_folder=None):
if isinstance(app_or_blue, (Flask, Blueprint)) is Flask:
raise RuntimeError("only support Flask or Blueprint object")
if static_folder is None:
if not app_or_blue.has_static_folder:
raise RuntimeError('No static folder for this object')
static_folder = app_or_blue.static_folder
def get_upload(filename):
cache_timeout = app_or_blue.get_send_file_max_age(filename)
return send_from_directory(static_folder, filename, cache_timeout=cache_timeout)
get_endpoint = "%s_get_upload" % get_route.replace("/", "_")
app_or_blue.add_url_rule("/" + get_route + '/<path:filename>', endpoint=get_endpoint, view_func=get_upload)
get_endpoint = "%s.%s" % (app_or_blue.name, get_endpoint)
@app_or_blue.route("/" + upload_route + "/", methods=["POST"])
def handle_upload():
r = dict()
for key in request.files:
file_item = request.files[key]
filename = secure_filename(file_item.filename)
extension = filename.rsplit(".", 1)[-1].lower()
save_name = uuid.uuid4().hex + ".%s" % extension
file_item.save(os.path.join(static_folder, save_name))
r[key] = url_for(get_endpoint, filename=save_name)
return jsonify({"status": True, "data": r})
def support_upload2(app_or_blue, folder_root, file_url_prefix, sub_folders, upload_route, **kwargs):
allow_extensions = kwargs.pop("allow_extensions", None)
rename_mode = kwargs.pop("rename_mode", "uuid")
if isinstance(app_or_blue, (Flask, Blueprint)) is False:
raise RuntimeError("only support Flask or Blueprint object")
if file_url_prefix.endswith("/") is False:
file_url_prefix += "/"
if isinstance(sub_folders, str):
static_folder = create_folder2(folder_root, sub_folders)
url = file_url_prefix + sub_folders
else:
static_folder = create_folder2(folder_root, *sub_folders)
url = file_url_prefix + "/".join(sub_folders)
upload_route = upload_route.lstrip("/")
if upload_route.endswith("/") is False:
upload_route += "/"
endpoint = upload_route.replace("/", "_")
def handle_upload():
r = dict()
for key in request.files:
file_item = request.files[key]
filename = secure_filename(file_item.filename)
extension = filename.rsplit(".", 1)[-1].lower()
if allow_extensions is not None and extension not in allow_extensions:
return make_response("Not Allow File Extension", 400)
if rename_mode == "sha1":
save_name = hashlib.sha1(file_item.read()).hexdigest() + ".%s" % extension
save_path = os.path.join(static_folder, save_name)
if os.path.exists(save_path) is False:
file_item.seek(0)
file_item.save(save_path)
else:
save_name = uuid.uuid4().hex + ".%s" % extension
file_item.save(os.path.join(static_folder, save_name))
r[key] = url + "/" + save_name
return jsonify({"status": True, "data": r})
app_or_blue.add_url_rule("/" + upload_route, endpoint=endpoint, view_func=handle_upload, methods=["POST"])
|
import os
import argparse
import pandas as pd
import numpy as np
import re
from os import walk
from collections import Counter
import random
import shutil
def sort_moas(cpds_moa):
"""
Sort MOAs based on the number of compounds that are attributed to them in ASCENDING order.
This is HIGHLY Required before performing the compounds split into train & test.
"""
cpds_moa_split = {cpd:cpds_moa[cpd].split('|') for cpd in cpds_moa}
moa_listts = [moa for moa_lt in cpds_moa_split.values() for moa in moa_lt]
moa_count_dict = {ky:val for ky,val in sorted(Counter(moa_listts).items(),key=lambda item: item[1])}
moa_lists = list(moa_count_dict.keys())
return moa_lists
def create_cpd_moa_df(cpds_moa):
"""
Create a dataframe that comprises of compounds with their corresponding MOAs, including three additional
columns: "test", "train" & "marked" which are needed for the compounds split.
"""
cpds_moa_split = {cpd:cpds_moa[cpd].split('|') for cpd in cpds_moa}
df_pert_cpds_moas = pd.DataFrame([(key, moa) for key,moa_list in cpds_moa_split.items() for moa in moa_list],
columns = ['pert_iname', 'moa'])
df_pert_cpds_moas['train'] = False
df_pert_cpds_moas['test'] = False
df_pert_cpds_moas['marked'] = df_pert_cpds_moas['train'] | df_pert_cpds_moas['test']
return df_pert_cpds_moas
def split_cpds_moas(cpd_moas_dict, train_ratio=0.8, test_ratio=0.2):
"""
This function splits compounds into test & train data based on the number of MOAs that are attributed to them,
i.e. if the MOAs are present in just one compound, the compounds for those specific MOAs are given to only the
train data, but if present in more than one compound, the compounds for that MOA are divided into Train/Test
split based on the test/train ratio.
- This function was extracted from https://rpubs.com/shantanu/lincs_split_moa
and then refactored to Python
Args:
cpd_moas_dict: Dictionary comprises of compounds as the keys and their respective MOAs (Mechanism of actions)
as the values
train_ratio: A decimal value that represent what percent of the data should be given to the train set
test_ratio: A decimal value that represent what percent of the data should be given to the test set
Returns:
df: pandas dataframe containing compounds, MOAs and three new boolean columns (Train, Test, Marked)
indicating whether a compound is in Train or Test dataset.
"""
##preliminary funcs
moa_list = sort_moas(cpd_moas_dict)
df = create_cpd_moa_df(cpd_moas_dict)
random.seed(333)
for moa in moa_list:
df_moa = df[df['moa'] == moa].reset_index(drop=True)
no_cpd = df_moa.shape[0]
if no_cpd == 1:
n_trn, n_tst = 1, 0
else:
n_trn, n_tst = np.floor(no_cpd*train_ratio), np.ceil(no_cpd*test_ratio),
n_tst_mk = sum(df_moa.test)
n_trn_mk = sum(df_moa.train)
moa_mk = df_moa[df_moa['marked']].copy()
moa_not_mk = df_moa[~df_moa['marked']].copy()
trn_needed = int(n_trn - n_trn_mk)
tst_needed = int(n_tst - n_tst_mk)
n_cpds_needed = trn_needed + tst_needed
##print(moa, df_moa.shape[0], moa_not_mk.shape[0], n_cpds_needed, trn_needed, tst_needed)
trn_needed = max(trn_needed, 0)
tst_needed = max(tst_needed, 0)
trn_flg = list(np.concatenate((np.tile(True, trn_needed), np.tile(False, tst_needed))))
trn_flg = random.sample(trn_flg, n_cpds_needed)
tst_flg = [not boolean for boolean in trn_flg]
moa_not_mk.train = trn_flg
moa_not_mk.test = tst_flg
if moa_not_mk.shape[0] > 0:
moa_not_mk.marked = True
df_moa = pd.concat([moa_not_mk, moa_mk], axis=0, ignore_index=True)
df_other_moa = df[df['moa'] != moa].reset_index(drop=True)
df_otrs_mk = df_other_moa[df_other_moa['marked']].reset_index(drop=True)
df_otrs_not_mk= df_other_moa[~df_other_moa['marked']].reset_index(drop=True)
df_otrs_not_mk = df_otrs_not_mk[['pert_iname', 'moa']].merge(moa_not_mk.drop(['moa'], axis=1),
on=['pert_iname'], how='left').fillna(False)
df = pd.concat([df_moa, df_otrs_mk, df_otrs_not_mk], axis=0, ignore_index=True)
df[['train', 'test']] = df[['train', 'test']].apply(lambda x: x.astype(bool))
return df |
random_numbers = [1, 22, 333, 4444, 55555]
print("\n".join([str(n) for n in random_numbers]))
|
import os
from sklearn.ensemble import RandomForestClassifier
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import multiprocessing
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
from params import global_param
from TsmlExtractorWrapper import get_dataset_fold
import tensorflow as tf
from logic_components import get_covariance_functions_multi_ll, get_likelihood_features, z_score_normalize
if __name__ == '__main__':
## Definition of parameters for LOGIC training
random_restarts: int = 5
dataset = "Beef"
n = 470 # length of a single time series in dataset 'Beef'
w = n // 20
r = w // 10
fold = 0
global_param.p_batch_metric_aggregator = tf.math.reduce_max
## Loading dataset
X_train_raw, X_test_raw, y_train_raw, y_test_raw = get_dataset_fold(dataset, fold)
## Determine Gaussian Process Models
covariance_functions = get_covariance_functions_multi_ll(X_train_raw, w, r, random_restarts)
## Likelihood Feature Space Embedding
X_train_feat = get_likelihood_features(X_train_raw, covariance_functions, w, r)
X_test_feat = get_likelihood_features(X_test_raw, covariance_functions, w, r)
## Normalize Feature Embeddings for training and test input data
X_train_feat, X_test_feat = z_score_normalize(X_train_feat, X_test_feat)
## Train an exemplary general-purpose classifier given the feature vectors and corresponding target values (y)
algo = RandomForestClassifier()
algo.fit(tf.squeeze(X_train_feat, axis=-1), tf.squeeze(tf.squeeze(y_train_raw, axis=-1), axis=-1))
accuracy = algo.score(tf.squeeze(X_test_feat, axis=-1), tf.squeeze(tf.squeeze(y_test_raw, axis=-1), axis=-1))
print(accuracy)
|
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics
# other imports
from KratosMultiphysics.time_based_ascii_file_writer_utility import TimeBasedAsciiFileWriterUtility
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return PointOutputProcess(Model, settings["Parameters"])
class PointOutputProcess(KratosMultiphysics.Process):
"""This process writes results from a geometrical position (point) in the model to a file
It first searches the entity containing the requested output location and then interpolates
the requested variable(s)
The output can be requested for elements, conditions and nodes. For nodes no geometrical
interpolation is performed, the exact coordinates have to be specified.
This process works in MPI as well as with restarts
It can serve as a basis for other processes (e.g. MultiplePointsOutputProcess)
Furthermore it can be used for testing in MPI where the node numbers can change
"""
def __init__(self, model, params):
KratosMultiphysics.Process.__init__(self)
default_settings = KratosMultiphysics.Parameters('''{
"help" : "This process writes results from a geometrical position (point) in the model to a file. It first searches the entity containing the requested output location and then interpolates the requested variable(s). The output can be requested for elements, conditions and nodes. For nodes no geometrical interpolation is performed, the exact coordinates have to be specified. This process works in MPI as well as with restarts. It can serve as a basis for other processes (e.g. MultiplePointsOutputProcess). Furthermore it can be used for testing in MPI where the node numbers can change",
"model_part_name" : "",
"entity_type" : "element",
"position" : [],
"output_variables" : [],
"historical_value" : true,
"search_tolerance" : 1e-6,
"print_format" : "",
"output_file_settings": {}
}''')
self.model = model
self.params = params
self.params.ValidateAndAssignDefaults(default_settings)
# These quantites are lists such that they can be looped
# => needed for mpi in case the point is in a different partition
self.output_file = []
self.entity = []
self.area_coordinates = []
self.output_variables = []
self.format = self.params["print_format"].GetString()
self.historical_value = self.params["historical_value"].GetBool()
self.search_tolerance = self.params["search_tolerance"].GetDouble()
def ExecuteInitialize(self):
# getting the ModelPart from the Model
model_part_name = self.params["model_part_name"].GetString()
if model_part_name == "":
raise Exception('No "model_part_name" was specified!')
self.model_part = self.model[model_part_name]
# retrieving the position of the entity
point_position = self.params["position"].GetVector()
if point_position.Size() != 3:
raise Exception('The position has to be provided with 3 coordinates!')
point = KratosMultiphysics.Point(point_position[0],
point_position[1],
point_position[2])
# retrieving the output variables
output_var_names = self.params["output_variables"]
variable_names = [ output_var_names[i].GetString() for i in range( output_var_names.size() ) ]
output_vars = [ KratosMultiphysics.KratosGlobals.GetVariable( var ) for var in variable_names ]
if len(output_vars) == 0:
raise Exception('No variables specified for output!')
self.output_variables.append(output_vars)
# validate types of variables
for var in self.output_variables[0]:
if self.historical_value:
self.__CheckVariableIsSolutionStepVariable(var)
if type(var) == KratosMultiphysics.DoubleVariable:
continue
elif type(var) == KratosMultiphysics.Array1DVariable3:
continue
else:
err_msg = 'Type of variable "' + var.Name() + '" is not valid\n'
err_msg += 'It can only be double, component or array3d!'
raise Exception(err_msg)
# retrieving the entity type
entity_type = self.params["entity_type"].GetString()
if entity_type == "node":
found_id = KratosMultiphysics.BruteForcePointLocator(self.model_part).FindNode(point, self.search_tolerance)
if found_id > -1:
self.entity.append(self.model_part.Nodes[found_id]) # note that this is a find!
self.area_coordinates.append("dummy") # needed for looping later
elif entity_type == "element":
self.sf_values = KratosMultiphysics.Vector()
found_id = KratosMultiphysics.BruteForcePointLocator(self.model_part).FindElement(point, self.sf_values, self.search_tolerance)
if found_id > -1:
self.entity.append(self.model_part.Elements[found_id]) # note that this is a find!
self.area_coordinates.append(self.sf_values)
elif entity_type == "condition":
self.sf_values = KratosMultiphysics.Vector()
found_id = KratosMultiphysics.BruteForcePointLocator(self.model_part).FindCondition(point, self.sf_values, self.search_tolerance)
if found_id > -1:
self.entity.append(self.model_part.Conditions[found_id]) # note that this is a find!
self.area_coordinates.append(self.sf_values)
else:
err_msg = 'Invalid "entity_type" specified, it can only be:\n'
err_msg += '"node", "element", "condition"'
raise Exception(err_msg)
# Check if a point was found, and initalize output
# NOTE: If the search was not successful (i.e. found_id = -1), we fail silently and
# do nothing. This is BY DESIGN, as we are supposed to work on MPI too, and the point
# in question might lie on a different partition.
# Here we also check if the point has been found in more than one partition
# In sich a case only one rank (the one with the larger PID) writes the output!
my_rank = -1 # dummy to indicate that the point is not in my partition
comm = self.model_part.GetCommunicator().GetDataCommunicator()
if found_id > -1: # the point lies in my partition
my_rank = comm.Rank()
writing_rank = comm.MaxAll(my_rank) # The partition with the larger rank writes
if my_rank == writing_rank:
file_handler_params = KratosMultiphysics.Parameters(self.params["output_file_settings"])
file_header = GetFileHeader(entity_type, found_id, point, self.output_variables[0])
self.output_file.append(TimeBasedAsciiFileWriterUtility(
self.model_part, file_handler_params, file_header).file)
def ExecuteBeforeSolutionLoop(self):
pass
def ExecuteInitializeSolutionStep(self):
pass
def ExecuteFinalizeSolutionStep(self):
time = self.model_part.ProcessInfo[KratosMultiphysics.TIME]
# zip works with the shortes list, which is what we want here
# i.e. if no entity was found then also no output_file will be
# initialized which means that the loop body will never be executed
for var_list,ent,coord,f in zip(self.output_variables, self.entity, self.area_coordinates, self.output_file):
# not formatting time in order to not lead to problems with time recognition
# in the file writer when restarting
out = str(time)
for var in var_list:
value = Interpolate(var, ent, coord, self.historical_value)
if IsArrayVariable(var):
out += " " + " ".join( format(v,self.format) for v in value )
else:
out += " " + format(value,self.format)
out += "\n"
f.write(out)
def ExecuteBeforeOutputStep(self):
pass
def ExecuteAfterOutputStep(self):
pass
def ExecuteFinalize(self):
for f in self.output_file:
f.close()
def __CheckVariableIsSolutionStepVariable(self, var):
# if the requested Variable is a component we check the source Variable
if type(var) == KratosMultiphysics.DoubleVariable: # TODO check this, might no longer work!
var = var.GetSourceVariable()
if not self.model_part.HasNodalSolutionStepVariable(var):
err_msg = 'ModelPart "' + self.model_part.Name + '" does not have'
err_msg += ' "' + var.Name() + '" as SolutionStepVariable!'
raise Exception(err_msg)
def GetFileHeader(entity_type, entity_id, point, output_variables):
header = '# Results for "' + entity_type + '" '
header += 'with Id # ' + str(entity_id) + ' at position: '
header += 'x: ' + "{0:.12g}".format(point.X) + '; '
header += 'y: ' + "{0:.12g}".format(point.Y) + '; '
header += 'z: ' + "{0:.12g}".format(point.Z) + '\n'
header += '# time'
for var in output_variables:
# if this is a Variable< array_1d< double,3 > >
if IsArrayVariable(var):
header += " {0}_X {0}_Y {0}_Z".format(var.Name())
else:
header += " " + var.Name()
header += "\n"
return header
def Interpolate(variable, entity, sf_values, historical_value):
if type(entity) == KratosMultiphysics.Node:
if historical_value:
return entity.GetSolutionStepValue(variable)
else:
return entity.GetValue(variable)
else: # entity is element or condition
nodes = entity.GetNodes()
# Initializing 'value' like this, i don't need to know its type
# => this way it works both for scalar and array3 variables
if historical_value:
value = nodes[0].GetSolutionStepValue(variable) * sf_values[0]
for n,c in zip(nodes[1:], sf_values[1:]):
value = value + c * n.GetSolutionStepValue(variable)
else:
value = nodes[0].GetValue(variable) * sf_values[0]
for n,c in zip(nodes[1:], sf_values[1:]):
value = value + c * n.GetValue(variable)
return value
def IsArrayVariable(var):
return type(var) == KratosMultiphysics.Array1DVariable3
|
from google.cloud.aiplatform.helpers import value_converter
__all__ = (value_converter,)
|
# -*- coding: utf-8 -*-
"""NOTICE: This file was generated automatically by the command: xpoetry setup-py."""
from distutils.core import setup
packages = ["flake8_nitpick", "flake8_nitpick.files"]
package_data = {"": ["*"]}
install_requires = ["attrs", "dictdiffer", "flake8>=3.0.0", "jmespath", "python-slugify", "pyyaml", "requests", "toml"]
entry_points = {"flake8.extension": ["NIP = flake8_nitpick.plugin:NitpickChecker"]}
setup_kwargs = {
"name": "flake8-nitpick",
"version": "0.10.3",
"description": "Flake8 plugin to enforce the same lint configuration (flake8, isort, mypy, pylint) across multiple Python projects",
"long_description": '# flake8-nitpick\n\n[](https://pypi.python.org/pypi/flake8-nitpick)\n[](https://travis-ci.com/andreoliwa/flake8-nitpick)\n[](https://flake8-nitpick.readthedocs.io/en/latest/?badge=latest)\n[](https://coveralls.io/github/andreoliwa/flake8-nitpick?branch=master)\n[](https://codeclimate.com/github/andreoliwa/flake8-nitpick/maintainability)\n[](https://codeclimate.com/github/andreoliwa/flake8-nitpick/test_coverage)\n[](https://pypi.org/project/flake8-nitpick/)\n[](https://pypi.org/project/flake8-nitpick/)\n[](https://github.com/ambv/black)\n\nFlake8 plugin to enforce the same lint configuration (flake8, isort, mypy, pylint) across multiple Python projects.\n\nA "nitpick code style" is a [TOML](https://github.com/toml-lang/toml) file with settings that should be present in config files from other tools. E.g.:\n\n- `pyproject.toml` and `setup.cfg` (used by [flake8](http://flake8.pycqa.org/), [black](https://black.readthedocs.io/), [isort](https://isort.readthedocs.io/), [mypy](https://mypy.readthedocs.io/));\n- `.pylintrc` (used by [pylint](https://pylint.readthedocs.io/) config);\n- more files to come.\n\n---\n\n- [Installation and usage](#installation-and-usage)\n- [Style file](#style-file)\n- [setup.cfg](#setupcfg)\n\n---\n\n## Installation and usage\n\nSimply install the package (in a virtualenv or globally, wherever) and run `flake8`:\n\n $ pip install -U flake8-nitpick\n $ flake8\n\nYou will see warnings if your project configuration is different than [the default style file](https://raw.githubusercontent.com/andreoliwa/flake8-nitpick/master/nitpick-style.toml).\n\n## Style file\n\n### Configure your own style file\n\nChange your project config on `pyproject.toml`, and configure your own style like this:\n\n [tool.nitpick]\n style = "https://raw.githubusercontent.com/andreoliwa/flake8-nitpick/master/nitpick-style.toml"\n\nYou can set `style` with any local file or URL. E.g.: you can use the raw URL of a [GitHub Gist](https://gist.github.com).\n\nYou can also use multiple styles and mix local files and URLs:\n\n [tool.nitpick]\n style = ["/path/to/first.toml", "/another/path/to/second.toml", "https://example.com/on/the/web/third.toml"]\n\nThe order is important: each style will override any keys that might be set by the previous .toml file.\nIf a key is defined in more than one file, the value from the last file will prevail. \n\n### Default search order for a style file\n\n1. A file or URL configured in the `pyproject.toml` file, `[tool.nitpick]` section, `style` key, as [described above](#configure-your-own-style-file).\n\n2. Any `nitpick-style.toml` file found in the current directory (the one in which `flake8` runs from) or above.\n\n3. If no style is found, then [the default style file from GitHub](https://raw.githubusercontent.com/andreoliwa/flake8-nitpick/master/nitpick-style.toml) is used.\n\n### Style file syntax\n\nA style file contains basically the configuration options you want to enforce in all your projects.\n\nThey are just the config to the tool, prefixed with the name of the config file.\n\nE.g.: To [configure the black formatter](https://github.com/ambv/black#configuration-format) with a line length of 120, you use this in your `pyproject.toml`:\n\n [tool.black]\n line-length = 120\n\nTo enforce that all your projects use this same line length, add this to your `nitpick-style.toml` file:\n\n ["pyproject.toml".tool.black]\n line-length = 120\n\nIt\'s the same exact section/key, just prefixed with the config file name (`"pyproject.toml".`)\n\nThe same works for `setup.cfg`.\nTo [configure mypy](https://mypy.readthedocs.io/en/latest/config_file.html#config-file-format) to ignore missing imports in your project:\n\n [mypy]\n ignore_missing_imports = true\n\nTo enforce all your projects to ignore missing imports, add this to your `nitpick-style.toml` file:\n\n ["setup.cfg".mypy]\n ignore_missing_imports = true\n\n### Absent files\n\nTo enforce that certain files should not exist in the project, you can add them to the style file.\n\n [[files.absent]]\n file = "myfile1.txt"\n\n [[files.absent]]\n file = "another_file.env"\n message = "This is an optional extra string to display after the warning"\n\nMultiple files can be configured as above.\nThe `message` is optional.\n\n## setup.cfg\n\n### Comma separated values\n\nOn `setup.cfg`, some keys are lists of multiple values separated by commas, like `flake8.ignore`.\n\nOn the style file, it\'s possible to indicate which key/value pairs should be treated as multiple values instead of an exact string.\nMultiple keys can be added.\n\n ["setup.cfg".nitpick]\n comma_separated_values = ["flake8.ignore", "isort.some_key", "another_section.another_key"]\n',
"author": "W. Augusto Andreoli",
"author_email": "andreoliwa@gmail.com",
"url": "https://github.com/andreoliwa/flake8-nitpick",
"packages": packages,
"package_data": package_data,
"install_requires": install_requires,
"entry_points": entry_points,
"python_requires": ">=3.6,<4.0",
}
setup(**setup_kwargs) # type: ignore
|
from typing import List, MutableMapping, NamedTuple, Tuple, Union
from enum import Enum, auto
from itertools import product
class Operator(Enum):
addition = auto()
subtraction = auto()
division = auto()
multiplication = auto()
pop = auto()
insert = auto()
convert = auto()
exponential = auto()
switch = auto()
class Token(NamedTuple):
value: Union[int, Tuple[int, int]]
operation: Operator
def __repr__(self):
if self.operation == Operator.convert:
return "{} => {}".format(*self.value)
else:
symbol = "*" if self.operation == Operator.multiplication else ("-" if self.operation == Operator.subtraction else (
"+" if self.operation == Operator.addition else "/" if self.operation == Operator.division else "<<" if self.operation == Operator.pop else "^" if self.operation == Operator.exponential else "+/-" if self.operation == Operator.switch else""))
return f"{symbol}{self.value if self.value != None else ''}"
def solve(goal: int, moves: int, start: int, tokens: List[Token]) -> List[List[Token]]:
possibilities = []
for args in product(tokens, repeat=moves):
possibilities.append(args)
winning_patterns = []
for possibility in possibilities:
begin = start
used = []
solved = False
if possibility[0].operation in (Operator.multiplication, Operator.division) and begin == 0:
continue
for token in possibility:
if begin == goal:
solved = True
break
if token.operation == Operator.addition:
begin += token.value
used.append(token)
elif token.operation == Operator.division:
used.append(token)
if token.value > begin:
break
begin = int(begin / token.value)
elif token.operation == Operator.multiplication:
used.append(token)
begin *= token.value
elif token.operation == Operator.subtraction:
used.append(token)
begin -= token.value
elif token.operation == Operator.pop:
used.append(token)
new_begin = str(begin)[:-1]
if new_begin.isdigit():
begin = int(new_begin)
else:
begin = 0
elif token.operation == Operator.insert:
used.append(token)
begin = int(f"{begin if begin != 0 else ''}{token.value}")
elif token.operation == Operator.convert:
used.append(token)
begin = int(str(begin).replace(
str(token.value[0]), str(token.value[1]))
)
elif token.operation == Operator.exponential:
used.append(token)
begin **= token.value
elif token.operation == Operator.switch:
used.append(token)
begin = -begin
if begin == goal:
solved = True
break
if solved:
winning_patterns.append(used)
for seq, item in enumerate(winning_patterns):
if winning_patterns.count(item) > 1:
winning_patterns.pop(seq)
return winning_patterns
level_one = solve(2, 2, 0, [Token(1, Operator.addition)])
level_two = solve(8, 3, 0, [
Token(2, Operator.addition),
Token(3, Operator.addition)]
)
level_three = solve(12, 3, 0, [
Token(2, Operator.addition),
Token(1, Operator.addition),
Token(4, Operator.multiplication)]
)
level_four = solve(7, 3, 1, [
Token(4, Operator.addition),
Token(2, Operator.subtraction)]
)
level_five = solve(20, 3, 0, [
Token(4, Operator.multiplication),
Token(4, Operator.addition)
])
level_six = solve(40, 4, 0, [
Token(2, Operator.addition),
Token(4, Operator.multiplication)
])
level_seven = solve(10, 4, 100, [
Token(3, Operator.addition),
Token(5, Operator.division)
])
level_eight = solve(4, 3, 4321, [Token(None, Operator.pop)])
level_nine = solve(4, 3, 0, [
Token(None, Operator.pop),
Token(8, Operator.addition),
Token(5, Operator.multiplication)
])
level_ten = solve(9, 4, 50, [
Token(None, Operator.pop),
Token(5, Operator.division),
Token(3, Operator.multiplication)
])
level_eleven = solve(100, 3, 99, [
Token(None, Operator.pop),
Token(8, Operator.subtraction),
Token(11, Operator.multiplication)
])
level_twelve = solve(404, 5, 0, [
Token(8, Operator.addition),
Token(10, Operator.multiplication),
Token(2, Operator.division)
])
level_thirteen = solve(23, 4, 171, [
Token(None, Operator.pop),
Token(2, Operator.multiplication),
Token(9, Operator.subtraction)
])
level_fourteen = solve(21, 5, 0, [
Token(None, Operator.pop),
Token(5, Operator.addition),
Token(3, Operator.multiplication),
Token(5, Operator.multiplication)
])
level_fifteen = solve(50, 3, 10, [
Token(3, Operator.multiplication),
Token(2, Operator.multiplication),
Token(5, Operator.subtraction)
])
level_sixteen = solve(2, 5, 0, [
Token(None, Operator.pop),
Token(4, Operator.addition),
Token(9, Operator.multiplication)
])
level_seventeen = solve(11, 2, 0, [Token(1, Operator.insert)])
level_eighteen = solve(101, 3, 0, [
Token(1, Operator.insert),
Token(0, Operator.insert)
])
level_nineteen = solve(44, 3, 0, [
Token(2, Operator.insert),
Token(2, Operator.multiplication)
])
level_twenty = solve(35, 2, 0, [
Token(3, Operator.addition),
Token(5, Operator.insert)
])
level_twenty_one = solve(56, 3, 0, [
Token(1, Operator.insert),
Token(5, Operator.addition)
])
level_twenty_two = solve(9, 4, 0, [
Token(2, Operator.addition),
Token(3, Operator.division),
Token(1, Operator.insert)
])
level_twenty_three = solve(10, 4, 15, [
Token(0, Operator.insert),
Token(2, Operator.addition),
Token(5, Operator.division)
])
level_twenty_four = solve(210, 5, 0, [
Token(5, Operator.addition),
Token(5, Operator.subtraction),
Token(5, Operator.insert),
Token(2, Operator.insert),
])
level_twenty_five = solve(2020, 4, 40, [
Token(0, Operator.insert),
Token(4, Operator.addition),
Token(2, Operator.division),
])
level_twenty_six = solve(11, 4, 0, [
Token(12, Operator.insert),
Token(None, Operator.pop)
])
level_twenty_seven = solve(102, 4, 0, [
Token(10, Operator.insert),
Token(1, Operator.addition),
Token(None, Operator.pop)
])
level_twenty_eight = solve(222, 4, 0, [
Token(1, Operator.insert),
Token((1, 2), Operator.convert)
])
level_twenty_nine = solve(93, 4, 0, [
Token(6, Operator.addition),
Token(7, Operator.multiplication),
Token((6, 9), Operator.convert)
])
level_thirty = solve(2321, 6, 0, [
Token(1, Operator.insert),
Token(2, Operator.insert),
Token((1, 2), Operator.convert),
Token((2, 3), Operator.convert),
])
level_thirty_one = solve(24, 5, 0, [
Token(9, Operator.addition),
Token(2, Operator.multiplication),
Token((8, 4), Operator.convert)
])
level_thrity_two = solve(29, 5, 11, [
Token(2, Operator.division),
Token(3, Operator.addition),
Token((1, 2), Operator.convert),
Token((2, 9), Operator.convert),
])
level_thirty_three = solve(20, 5, 36, [
Token(3, Operator.addition),
Token(3, Operator.division),
Token((1, 2), Operator.convert)
])
level_thirty_four = solve(15, 4, 2, [
Token(3, Operator.division),
Token(1, Operator.insert),
Token(2, Operator.multiplication),
Token((4, 5), Operator.convert)
])
level_thirty_five = solve(414, 4, 1234, [
Token((23, 41), Operator.convert),
Token((24, 14), Operator.convert),
Token((12, 24), Operator.convert),
Token((14, 2), Operator.convert)
])
level_thirty_six = solve(-85, 4, 0, [
Token(7, Operator.subtraction),
Token(6, Operator.addition),
Token(5, Operator.insert)
])
level_thirty_seven = solve(9, 3, 0, [
Token(1, Operator.subtraction),
Token(2, Operator.subtraction),
Token(2, Operator.exponential)
])
level_thirty_eight = solve(-120, 4, 0, [
Token(5, Operator.multiplication),
Token(6, Operator.subtraction),
Token(4, Operator.insert)
])
level_thirty_nine = solve(144, 3, 0, [
Token(1, Operator.subtraction),
Token(2, Operator.insert),
Token(2, Operator.exponential)
])
level_fourty = solve(5, 1, -5, [
Token(None, Operator.switch)
])
level_fourty_one = solve(-6, 3, 0, [
Token(4, Operator.addition),
Token(2, Operator.addition),
Token(None, Operator.switch)
])
level_fourty_two = solve(-13, 4, 0, [
Token(3, Operator.addition),
Token(7, Operator.subtraction),
Token(None, Operator.switch)
])
level_fourty_three = solve(60, 4, 0, [
Token(5, Operator.addition),
Token(10, Operator.subtraction),
Token(4, Operator.multiplication),
Token(None, Operator.switch)
])
level_fourty_four = solve(52, 5, 44, [
Token(9, Operator.addition),
Token(2, Operator.division),
Token(4, Operator.multiplication),
Token(None, Operator.switch)
])
level_fourty_five = solve(10, 5, 9, [
Token(5, Operator.addition),
Token(5, Operator.multiplication),
Token(None, Operator.switch)
])
level_fourty_six = solve(12, 5, 14, [
Token(6, Operator.insert),
Token(5, Operator.addition),
Token(8, Operator.division),
Token(None, Operator.switch)
])
level_fourty_seven = solve(13, 4, 55, [
Token(9, Operator.addition),
Token(None, Operator.switch),
Token(None, Operator.pop)
])
level_fourty_eight = solve(245, 5, 0, [
Token(3, Operator.subtraction),
Token(5, Operator.insert),
Token(4, Operator.multiplication),
Token(None, Operator.switch)
])
level_fourty_nine = solve(12, 4, 39, [
Token(-3, Operator.multiplication),
Token(3, Operator.division),
Token(9, Operator.addition),
Token(None, Operator.switch)
])
level_fifty = solve(126, 6, 111, [
Token(3, Operator.multiplication),
Token(9, Operator.subtraction),
Token(None, Operator.switch),
Token(None, Operator.switch),
Token(None, Operator.pop)
])
print(level_fifty)
|
# job_runner.py: gis cloud test case,
# Get list of folders, and process each using another python script.
# Author: JMassey
import sys
import argparse
import boto3
import sys
import os
from timeit import default_timer as timer
batch = boto3.client('batch')
def main():
# 1 - Define Variables passed from CLI, assign to variables for simplicity
#parser = argparse.ArgumentParser(description='Merge Raster Images from data stored in s3.')
#parser.add_argument('bucket', help="Source S3 Bucket location, do not include s3://")
#parser.add_argument('s3path', help="Path to files to be processed in bucket ex: cloud_test/BlueRidge_DEMs_1")
#args = parser.parse_args()
#_BUCKET_NAME = args.bucket
#_PREFIX = args.s3path + '/'
#print('S3 Bucket: %s' % _BUCKET_NAME)
#print('S3 Bucket Key: %s' % _PREFIX)
#all_folder_list = list_folders_in_bucket(_BUCKET_NAME, _PREFIX)
#print ('\nFolders found in: %s' % _PREFIX)
#for folder in all_folder_list:
# Submit Jobs to Batch here
# print (folder)
response = batch.submit_job(jobName='Bayota_Testing',
jobQueue='Modeling',
jobDefinition='Modeling-Bayota:6',
containerOverrides={
"command": ['python', '/root/bayota/bin/run_steps/step1_generatemodel.py', '-cn' , 'step1_studycon1d6c68f6-326c-4487-8d85-5c5113f67bd7' , '--s3workspace', 'optimization/ws_copies/bayota_ws_0.1b2','--log_level=INFO'],
})
print("Job ID is {}.".format(response['jobId']))
response2 = batch.submit_job(jobName='Bayota_Testing_step2',
jobQueue='Modeling',
dependsOn=[
{
'jobId': response['jobId'],
'type': 'N_TO_N'
},
],
jobDefinition='Modeling-Bayota:6',
containerOverrides={
"command": ['python', '/root/bayota/bin/run_steps/step2_modifymodel.py', '-cn' , 'step3_expcon70ab6881-1c51-4f03-bcf2-fdef72e5662d' , '--s3workspace','optimization/ws_copies/bayota_ws_0.1b2','--log_level=INFO'],
})
print("Job ID is {}.".format(response2['jobId']))
response3 = batch.submit_job(jobName='Bayota_Testing_step3',
jobQueue='Modeling',
dependsOn=[
{
'jobId': response2['jobId'],
'type': 'N_TO_N'
},
],
jobDefinition='Modeling-Bayota:6',
containerOverrides={
"command": ['python', '/root/bayota/bin/run_steps/step3_solveonetrial.py', '-cn' , 'step4_trialcon2534e7f9-9e4b-4544-b351-4fe231bb4f0a' , '--s3workspace','optimization/ws_copies/bayota_ws_0.1b2','--log_level=INFO'],
})
print("Job ID is {}.".format(response3['jobId']))
if __name__ == '__main__':
main() |
from datetime import datetime
from lib.resources import Resources
import logging
from PIL import Image, ImageDraw
import requests
class Weather:
def __init__(self, config, resources: Resources):
self.config = config
self.resources = resources
def render(self, image: Image):
response = requests.get("https://api.openweathermap.org/data/2.5/onecall", params={
'lat': self.config['lat'],
'lon': self.config['long'],
'appid': self.config['key'],
'units': 'metric'}).json()
draw = ImageDraw.Draw(image)
title_len = int(draw.textlength('Weather', font=self.resources.font_large()))
draw.text(((image.width - title_len) // 2, 10), 'Weather', fill=0, font=self.resources.font_large())
self._draw_weather('Current', response['current']['weather'][0]['id'], response['current']['temp'], 60, image, draw)
self._draw_weather('Next Hour', response['hourly'][1]['weather'][0]['id'], response['hourly'][1]['temp'], 170, image, draw)
if datetime.now().hour >= 18:
self._draw_weather('Tomorrow', response['daily'][1]['weather'][0]['id'], response['daily'][1]['temp']['day'], 280, image, draw)
else:
self._draw_weather('Today', response['daily'][0]['weather'][0]['id'], response['daily'][0]['temp']['day'], 280, image, draw)
# https://openweathermap.org/weather-conditions#Weather-Condition-Codes-2
def _get_icon(code: int) -> str:
if code < 200:
logging.warn("Unknown weather code {}", code)
return 'Moon-New'
elif code < 300:
return 'Cloud-Lightning'
elif code < 400:
return 'Cloud-Drizzle'
elif code < 500:
logging.warn("Unknown weather code {}", code)
return 'Moon-New'
elif code < 510:
return 'Cloud-Rain'
elif code < 520:
return 'Cloud-Snow-Alt'
elif code < 600:
return 'Cloud-Rain-Sun'
elif code < 700:
return 'Cloud-Snow-Alt'
elif code < 800:
return 'Cloud-Fog'
elif code == 800:
return 'Sun'
elif code < 804:
return 'Cloud-Sun'
elif code == 804:
return 'Cloud'
else:
logging.warn("Unknown weather code {}", code)
return 'Moon-New'
def _draw_weather(self, label: str, weather_code: int, temp: float, y: int, image: Image, draw: ImageDraw):
icon = self.resources.icon(Weather._get_icon(weather_code))
image.paste(icon, (10, y + 30))
temp_text = "{}°C".format(round(temp))
temp_len = int(draw.textlength("30°C", font=self.resources.font_medium()))
draw.text((image.width - 10 - temp_len, y + 40), temp_text, fill=0, font=self.resources.font_medium())
text_len = int(draw.textlength(label, font=self.resources.font_medium()))
draw.text(((image.width - text_len) // 2, y), label, fill=0, font=self.resources.font_medium())
|
import staticfiles_tests.signals # noqa
|
# coding: utf-8
"""
Borrowed from https://github.com/1996scarlet/Dense-Head-Pose-Estimation/blob/main/service/CtypesMeshRender.py
To use this render, you should build the clib first:
```
cd utils/asset
gcc -shared -Wall -O3 render.c -o render.so -fPIC
cd ../..
```
"""
import sys
sys.path.append('../../3DDFA')
import os.path as osp
import cv2
import numpy as np
import ctypes
from utils.functions import plot_image
make_abs_path = lambda fn: osp.join(osp.dirname(osp.realpath(__file__)), fn)
class TrianglesMeshRender(object):
def __init__(
self,
clibs,
light=(0, 0, 5),
direction=(0.6, 0.6, 0.6),
ambient=(0.3, 0.3, 0.3)
):
if not osp.exists(clibs):
raise Exception(f'{clibs} not found, please build it first, by run '
f'"gcc -shared -Wall -O3 render.c -o render.so -fPIC" in utils/asset directory')
self._clibs = ctypes.CDLL(clibs)
self._light = np.array(light, dtype=np.float32)
self._light = np.ctypeslib.as_ctypes(self._light)
self._direction = np.array(direction, dtype=np.float32)
self._direction = np.ctypeslib.as_ctypes(self._direction)
self._ambient = np.array(ambient, dtype=np.float32)
self._ambient = np.ctypeslib.as_ctypes(self._ambient)
def __call__(self, vertices, triangles, bg):
self.triangles = np.ctypeslib.as_ctypes(3 * triangles) # Attention
self.tri_nums = triangles.shape[0]
self._clibs._render(
self.triangles, self.tri_nums,
self._light, self._direction, self._ambient,
np.ctypeslib.as_ctypes(vertices),
vertices.shape[0],
np.ctypeslib.as_ctypes(bg),
bg.shape[0], bg.shape[1]
)
render_app = TrianglesMeshRender(clibs=make_abs_path('asset/render.so'))
def render(img, ver_lst, tri, alpha=0.6, show_flag=False, wfp=None, with_bg_flag=True):
if with_bg_flag:
overlap = img.copy()
else:
overlap = np.zeros_like(img)
for ver_ in ver_lst:
ver = np.ascontiguousarray(ver_.T) # transpose
render_app(ver, tri, bg=overlap)
if with_bg_flag:
res = cv2.addWeighted(img, 1 - alpha, overlap, alpha, 0)
else:
res = overlap
if wfp is not None:
cv2.imwrite(wfp, res)
print(f'Save visualization result to {wfp}')
if show_flag:
plot_image(res)
return res
|
"""Aimsun dummy server.
This script creates a dummy server mimicking the functionality in the Aimsun
runner script. Used for testing purposes.
"""
from thread import start_new_thread
import socket
import struct
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import flow.utils.aimsun.constants as ac # noqa
PORT = 9999
entered_vehicles = [1, 2, 3, 4, 5]
exited_vehicles = [6, 7, 8, 9, 10]
tl_ids = [1, 2, 3, 4, 5]
def send_message(conn, in_format, values):
"""Send a message to the client.
If the message is a string, it is sent in segments of length 256 (if the
string is longer than such) and concatenated on the client end.
Parameters
----------
conn : socket.socket
socket for server connection
in_format : str
format of the input structure
values : tuple of Any
commands to be encoded and issued to the client
"""
if in_format == 'str':
packer = struct.Struct(format='i')
values = values[0]
# when the message is too large, send value in segments and inform the
# client that additional information will be sent. The value will be
# concatenated on the other end
while len(values) > 256:
# send the next set of data
conn.send(values[:256])
values = values[256:]
# wait for a reply
data = None
while data is None:
data = conn.recv(2048)
# send a not-done signal
packed_data = packer.pack(*(1,))
conn.send(packed_data)
# send the remaining components of the message (which is of length less
# than or equal to 256)
conn.send(values)
# wait for a reply
data = None
while data is None:
data = conn.recv(2048)
# send a done signal
packed_data = packer.pack(*(0,))
conn.send(packed_data)
else:
packer = struct.Struct(format=in_format)
packed_data = packer.pack(*values)
conn.send(packed_data)
def retrieve_message(conn, out_format):
"""Retrieve a message from the client.
Parameters
----------
conn : socket.socket
socket for server connection
out_format : str or None
format of the output structure
Returns
-------
Any
received message
"""
unpacker = struct.Struct(format=out_format)
try:
data = conn.recv(unpacker.size)
unpacked_data = unpacker.unpack(data)
finally:
pass
return unpacked_data
def threaded_client(conn):
# send feedback that the connection is active
conn.send('Ready.')
done = False
while not done:
# receive the next message
data = conn.recv(256)
if data is not None:
# if the message is empty, search for the next message
if data == '':
continue
# convert to integer
data = int(data)
if data == ac.VEH_GET_ENTERED_IDS:
send_message(conn, in_format='i', values=(0,))
data = None
while data is None:
data = conn.recv(256)
global entered_vehicles
if len(entered_vehicles) == 0:
output = '-1'
else:
output = ':'.join([str(e) for e in entered_vehicles])
send_message(conn, in_format='str', values=(output,))
entered_vehicles = []
elif data == ac.VEH_GET_EXITED_IDS:
send_message(conn, in_format='i', values=(0,))
data = None
while data is None:
data = conn.recv(256)
global exited_vehicles
if len(exited_vehicles) == 0:
output = '-1'
else:
output = ':'.join([str(e) for e in exited_vehicles])
send_message(conn, in_format='str', values=(output,))
exited_vehicles = []
elif data == ac.VEH_GET_STATIC:
send_message(conn, in_format='i', values=(0,))
retrieve_message(conn, 'i')
output = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, False, 18, 19, 20, 21, 22, 23, 24, 25, 26)
send_message(conn,
in_format='i i i f f f f f f f f f f i i i ? '
'f f f f f i i i i',
values=output)
elif data == ac.VEH_GET_TRACKING:
send_message(conn, in_format='i', values=(0,))
retrieve_message(conn, 'i')
output = (4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27)
send_message(conn,
in_format='f f f f f f f f f f f f f i i i i i i '
'i i',
values=output)
elif data == ac.TL_GET_IDS:
send_message(conn, in_format='i', values=(0,))
data = None
while data is None:
data = conn.recv(256)
global tl_ids
if len(tl_ids) == 0:
output = '-1'
else:
output = ':'.join([str(e) for e in tl_ids])
send_message(conn, in_format='str', values=(output,))
tl_ids = []
# in case the message is unknown, return -1001
else:
send_message(conn, in_format='i', values=(-1001,))
while True:
# tcp/ip connection from the aimsun process
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('localhost', PORT))
# connect to the Flow instance
server_socket.listen(10)
c, address = server_socket.accept()
# start the threaded process
start_new_thread(threaded_client, (c,))
|
__version__ = '0.14.0'
|
from __future__ import division
import matplotlib as mpl
# Set correct backend
mpl.use('tkagg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import random
import itertools
import threading
import time
import mplcursors
import sys
from hrm import *
from matplotlib.widgets import Button
# Polar H6
macAddress = "00:22:D0:AE:60:06"
fps = 25
class PulseFunction(object):
startX = 0
endX = 0.2
pHeight = 0.9
"""Returns pulse function values for values between 0-1"""
def __init__(self):
super(PulseFunction, self).__init__()
def getValue(self, x):
topX = (self.endX - self.startX) / 4
bottomX = 3 * (self.endX - self.startX) / 4
func = lambda x: 0
if x <= topX:
func = self.pUp
elif x <= bottomX:
func = self.pDown
elif x <= self.endX:
func = self.toZero
return func(x)
def pUp(self, x):
topX = (self.endX - self.startX) / 4
a = self.pHeight / (topX - self.startX)
return a * x
def pDown(self, x):
topX = (self.endX - self.startX) / 4
bottomX = 3 * (self.endX - self.startX) / 4
a = - 2 * self.pHeight / (bottomX - topX)
return a * (x - topX) + self.pHeight
def toZero(self, x):
bottomX = 3 * (self.endX - self.startX) / 4
a = self.pHeight / (self.endX - bottomX)
return a * (x - bottomX) - self.pHeight
class PulseCurve(object):
pulseFunction = None
pulseTime = 0
# Figure width in pixels
figWidth = 320
# Current pixels from left
t = 0
# Current position in curve of a single pulse
i = 0
pulse = 0
ys = None
# width in px for pulse 60
pulse60Width = 100
# time between pulses in milliseconds
pulse60Time = 1000
msBetweenPixels = pulse60Time / pulse60Width
pixelsInIteration = int(pulse60Width / fps)
"""docstring for Pulse"""
def __init__(self):
super(PulseCurve, self).__init__()
self.pulseFunction = PulseFunction()
self.ys = np.zeros(self.figWidth, dtype=float)
def setPulse(self, newPulse):
self.pulse = newPulse
def update(self):
if self.pulse == 0:
# Default time for flat line
self.pulseTime = self.pulse60Time
else:
self.pulseTime = np.round(self.pulse60Time * 60 / self.pulse)
def getData(self):
if self.i >= self.pulseTime:
self.update();
self.i = 0
for j in np.arange(0, self.pixelsInIteration):
# how many % of pulse has been drawn
v = self.i / self.pulseTime
if self.pulse == 0 or v >= 1:
val = 0
else:
val = self.pulseFunction.getValue(v)
self.ys[(self.t + j) % self.figWidth] = val
self.i += self.msBetweenPixels
self.t += self.pixelsInIteration
self.t = self.t % self.figWidth
return self.ys
class PulseThread(threading.Thread):
def __init__(self):
super(PulseThread, self).__init__()
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def run(self):
hrm = Hrm(macAddress)
while not self.stopped():
hrm.peripheral.waitForNotifications(1.0)
p = hrm.getPulse()
pc.setPulse(p)
pulseThread = PulseThread()
pulseThread.start()
# Hide toolbar
mpl.rcParams['toolbar'] = 'None'
pc = PulseCurve()
fig = plt.figure(facecolor='black')
ax = fig.add_subplot(111, xlim=(0, pc.figWidth), ylim=(-1, 1), facecolor='black')
# Add pulse label to right top corner
text = ax.text(pc.figWidth - 5, 0.9, "0", color='g', fontsize=24, ha='right')
# No margins
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
linecolor = 'g'
linewidth = 3
line1, = ax.plot(np.arange(0, pc.figWidth), np.zeros(pc.figWidth), linecolor, linewidth=linewidth)
line2, = ax.plot(np.arange(0, pc.figWidth), np.zeros(pc.figWidth), linecolor, linewidth=linewidth)
xs = np.arange(0, pc.figWidth, 1)
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
def animate(j):
ys = pc.getData()
text.set_text(pc.pulse)
if pc.pulse >= 90:
linecolor = 'r'
elif pc.pulse >= 80:
linecolor = 'y'
else:
linecolor = 'g'
line1.set_color(linecolor)
line2.set_color(linecolor)
text.set_color(linecolor)
gapwidth = 10
line1.set_xdata(xs[:pc.t])
line1.set_ydata(ys[:pc.t])
line2.set_xdata(xs[pc.t+gapwidth:])
line2.set_ydata(ys[pc.t+gapwidth:])
return line1,line2,text
def closeAll(args):
pulseThread.stop()
plt.close('all')
sys.exit(0)
ani = animation.FuncAnimation(fig, animate, None,
interval=1000/fps, blit=True)
# Add close button to top left corner
buttonAxes = plt.axes([0, 0.9, 0.1, 0.1])
closeButton = Button(buttonAxes, 'Close', color='black')
closeButton.on_clicked(closeAll)
plt.show()
# Save animation to file
# Writer = animation.writers['ffmpeg']
# writer = Writer(metadata=dict(artist='irne'), fps=25)
# ani.save("pulse.mp4", writer=writer)
|
from django.shortcuts import render
from rest_framework import status
# Create your views here.
from rest_framework.response import Response
from django.contrib.auth.models import User
from rest_framework import viewsets
from rest_framework import serializers
from accounts.models import Customer, Vendor
from accounts.serializers import UserSerializer, CustomerSerializer, VendorSerializer
from rest_framework.generics import CreateAPIView
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class CustomerViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Customer.objects.all()
serializer_class = CustomerSerializer
class VendorViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Vendor.objects.all()
serializer_class = VendorSerializer
class UserRegisterView(viewsets.ViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
def create(self, request, *args, **kwargs):
# import pdb; pdb.set_trace()
data = dict()
try:
data = {
'username': request.data['name'],
'email': request.data['email']
}
try:
user = User.objects.get(email=data['email'])
except Exception, e:
user = User(**data)
user.save()
customer = Customer(user=user, age=request.data['age'], gender=request.data['gender'])
customer.save()
response = UserSerializer(user).data
return Response(
response,
status=status.HTTP_201_CREATED)
except KeyError:
raise serializers.ValidationError({"Invalid JSON": "Blah"})
# def get_serializer(self, *args, **kwargs):
# return super(UserRegisterView, self).get_serializer(*args, **kwargs)
|
#!/usr/bin/env python3
#coding: utf-8
### 1st line allows to execute this script by typing only its name in terminal, with no need to precede it with the python command
### 2nd line declaring source code charset should be not necessary but for exemple pydoc request it
__doc__ = "this module provide support for temporary files and directories"#information describing the purpose of this module
__status__ = "Development"#should be one of 'Prototype' 'Development' 'Production' 'Deprecated' 'Release'
__version__ = "1.0.0"# version number,date or about last modification made compared to the previous version
__license__ = "public domain"# ref to an official existing License
__date__ = "2020"#started creation date / year month day
__author__ = "N-zo syslog@laposte.net"#the creator origin of this prog,
__maintainer__ = "Nzo"#person who curently makes improvements, replacing the author
__credits__ = []#passed mainteners and any other helpers
__contact__ = "syslog@laposte.net"# current contact adress for more info about this file
### import the required modules
import tempfile #for creating and handling temporary files and folders
import os
from os import path
def get_secure_directory():
"""get a temporary secure sub directory"""
temp_dir = tempfile.mkdtemp(suffix='',prefix='')
return temp_dir
def get_secure_file(ext='',temp_dir=None):
"""get a temporary secure file"""
if ext :
ext= os.extsep+ext
pathname= tempfile.mkstemp(dir=temp_dir,prefix='',suffix=ext)[1]
return pathname
def get_named_file(name,ext='',sub_dir=''):
"""get a named temporary file"""
if ext :
name+=os.extsep+ext
temp_dir = tempfile.gettempdir()
temp_path= path.join( temp_dir ,sub_dir, name )
if path.exists(temp_path) :
raise FileExistsError(temp_path)
else :
return temp_path
|
from .space import *
from .task import *
from .decorator import *
|
# -*- coding: utf-8 -*-
# @Time : 2020/6/11
# @Author : Fred liuchuanhao
# @File : .py
# @Role :
import fire
from libs.aws.com_ebs import ComplianceEbsApi
from models.ebs import DB
from libs.web_logs import ins_log
from libs.db_context import DBContext
from libs.aws.session import get_aws_session
from settings import settings
def sync_cmdb(api):
"""
将ebs信息入库
:return:
"""
ebs_list = api.main()
with DBContext('w') as session:
# 清除数据库数据
try:
session.query(DB).delete()
session.commit()
except:
session.rollback()
# 写入新数据
for rds in ebs_list:
ins_log.read_log('info', 'ebs信息:{}'.format(rds))
new_db = DB(Attachments=rds.get('Attachments'),
AvailabilityZone=rds.get('AvailabilityZone', ),
CreateTime=rds.get('CreateTime'),
Encrypted=rds.get('Encrypted'),
Size=rds.get('Size'),
SnapshotId=rds.get('SnapshotId'),
State=rds.get('State'),
VolumeId=rds.get('VolumeId'),
Iops=rds.get('Iops'),
VolumeType=rds.get('VolumeType'),
Snapshot_overtime=rds.get('Snapshot_overtime'),
update_time=rds.get('update_time'),
)
session.add(new_db)
session.commit()
ins_log.read_log('info', 'ebs写入数据库共{}条'.format(len(ebs_list)))
def main():
"""
从接口获取配置
:return:
"""
session = get_aws_session(**settings.get("aws_key"))
ebs_api = ComplianceEbsApi(session)
sync_cmdb(ebs_api)
if __name__ == '__main__':
fire.Fire(main)
|
# coding: utf-8
from pprint import pformat
from ..utils import to_dict
class PublicProfile(object):
def __init__(self, id=None, first_name=None, last_name=None, profile_image=None):
self._id = id
self._first_name = first_name
self._last_name = last_name
self._profile_image = profile_image
@property
def id(self):
return self._id
@id.setter
def id(self, id):
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def first_name(self):
return self._first_name
@first_name.setter
def first_name(self, first_name):
if first_name is None:
raise ValueError("Invalid value for `first_name`, must not be `None`")
self._first_name = first_name
@property
def last_name(self):
return self._last_name
@last_name.setter
def last_name(self, last_name):
if last_name is None:
raise ValueError("Invalid value for `last_name`, must not be `None`")
self._last_name = last_name
@property
def profile_image(self):
return self._profile_image
@profile_image.setter
def profile_image(self, profile_image):
if profile_image is None:
raise ValueError("Invalid value for `profile_image`, must not be `None`")
self._profile_image = profile_image
def to_dict(self):
"""
Returns the model properties as a dict
"""
return to_dict(self.__dict__)
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import random
import numpy as np
"""
Problem Generation:
This file includes all methods used for generating problems
"""
class Problem:
def __init__(self, param_fragment_size, param_queries, param_query_frequency,
param_query_cost, param_query_ids, total_nr_queries):
self.param_fragment_size = param_fragment_size
self.param_queries = param_queries
self.param_query_frequency = param_query_frequency
self.param_query_cost = param_query_cost
self.param_query_ids = param_query_ids
self.total_nr_queries = total_nr_queries
def generate_queries(num_queries, num_fragments):
queries = []
used = [0] * num_fragments
for q in range(num_queries - 1):
nr_frag = np.random.binomial(num_fragments - 1, 0.3) + 1
chosen_fragments = np.random.choice(num_fragments, nr_frag, replace=False)
for fragment in chosen_fragments:
used[fragment] = 1
queries.append([1 if i in chosen_fragments else 0 for i in range(num_fragments)])
used[0] = 0 # to avoid empty
queries.append([0 if u else 1 for u in used])
return queries
def generate_problems(num_epochs, min_fragments, max_fragments, min_queries, max_queries,
num_workloads):
problems = []
for epoch in range(num_epochs):
param_num_fragments = random.sample(range(min_fragments, max_fragments), 1)[0]
param_num_queries = random.sample(range(min_queries, max_queries), 1)[0]
problem = add_problem_properties(param_num_fragments, param_num_queries, num_workloads)
problems.append(problem)
return problems
def add_problem_properties(param_num_fragments, param_num_queries, workloads):
param_fragment_size = random.choices(range(1, 3000), k=param_num_fragments)
param_queries = generate_queries(param_num_queries, param_num_fragments)
param_query_frequency = [random.choices(range(1, 100), k=param_num_queries)
for _ in range(workloads)]
param_query_cost = random.choices(range(1, 100), k=param_num_queries)
param_query_ids = [i for i in range(len(param_query_cost))]
problem = Problem(param_fragment_size, param_queries,
param_query_frequency, param_query_cost, param_query_ids,
len(param_query_ids))
return problem
|
"""A classification example where we we only have asymptotically 1/4th of parameterized gates."""
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit.library import ZFeatureMap, RealAmplitudes
from qiskit.circuit.parametertable import ParameterTable
from qiskit.opflow import H
from .benchmark import Benchmark
class Classification(QuantumCircuit):
"""UCCSD with the same API as Qiskit's circuit library."""
def __init__(self, num_qubits, reps=3):
self._reps = reps
super().__init__(num_qubits)
self._build()
@property
def reps(self):
"""Get the number of repetitions of the circuit."""
return self._reps
@reps.setter
def reps(self, value):
"""Set the number of repetitions. Rebuilds the circuit."""
self._reps = value
self._build() # rebuild
def assign_parameters(self, params, inplace=False):
"""Assign parameters."""
if isinstance(params, (list, np.ndarray)):
params = dict(zip(self._params[:], params))
return super().assign_parameters(params, inplace=inplace)
@property
def ordered_parameters(self):
return list(self._parameter_table.keys())
def _build(self):
# wipe current state
self._data = []
self._parameter_table = ParameterTable()
# get UCCSD circuit
featmap = ZFeatureMap(self.num_qubits, reps=self.reps)
ansatz = RealAmplitudes(self.num_qubits, reps=self.reps, entanglement='circular')
# store the parameters in a list for assigning them
self._params = ansatz.ordered_parameters
# set the data circuit with some input data
featmap.assign_parameters(np.random.random(featmap.num_parameters), inplace=True)
# combine the circuit
self.compose(featmap, inplace=True)
self.compose(ansatz, inplace=True)
def run_featuremap():
circuit = Classification(4)
benchmark = Benchmark(2 ** np.arange(2, 8), H, 24)
benchmark.run_benchmark(circuit, 'free')
benchmark.plot(show=True)
|
import sys
sys.path.append("xicd")
import tkinter as tk
import time
from xicdcfg import Config
class App(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self._canvas = None
self._config = Config("config.json")
self._config.setup()
self._running = True
self._init_window()
self._init_overlay()
def _exit_window(self):
self._running = False
def _init_window(self):
self.master.title("Xicd")
self.master.resizable(False, False)
self.master.protocol("WM_DELETE_WINDOW", self._exit_window)
self.pack(fill=tk.BOTH, expand=True)
def _init_overlay(self):
overlay = self._config._config["overlay"]
overlay_script = self._config._config["overlays"][overlay]["file"].replace('.py', '').replace('/', '.')
print(f"Script: {overlay_script}")
self._overlay = __import__(overlay_script, fromlist=["__overlay_init__", "__overlay_update__"])
self._overlay.__overlay_init__(self._config._config["overlays"][overlay]["options"], self)
def loop(self):
while self._running:
self._overlay.__overlay_update__()
self.update()
time.sleep(0.01)
|
from backbone import *
def cnn(backbone, nb_classes=20, patch_size=224, activation='softmax', pretrain='imagenet', trainable=False, isbackbone=False):
if backbone=='resnet50':
base_model = resnet50(nb_classes, patch_size, activation, pretrain, isbackbone)
feat_channel = 2048
if backbone=='nasnet':
base_model = nasnet(nb_classes, patch_size, activation, pretrain, isbackbone)
feat_channel = 1056
if backbone == 'vggnet16':
base_model = vggnet16(nb_classes, patch_size, activation, pretrain, isbackbone)
feat_channel = 512
if backbone == 'inceptionv3':
base_model = inceptionv3(nb_classes, patch_size, activation, pretrain, isbackbone)
feat_channel = 2048
for lyr in range(len(base_model.layers)):
base_model.layers[lyr].trainable = trainable
return Model(base_model.input, base_model.output, name='cnn')
def sel_dim(backbone):
if backbone=='resnet50':
feat_channel = 2048
if backbone=='nasnet':
feat_channel = 1056
if backbone == 'vggnet16':
feat_channel = 512
if backbone == 'inceptionv3':
feat_channel = 2048
return feat_channel
|
def char_mask(pat, alphabet):
ret = {}
for c in alphabet:
ret[c] = bitarray(m*'1')
for i in range(m):
ret[pat[i]][m-i-1] = 0
return ret
def scan(txt, pat, err, alphabet):
n = len(txt)
m = len(pat)
occ = []
chmasks = char_mask(pat, alphabet)
print chmasks
S = [bitarray((m-q) * '1' + q*'0') for q in range(0,err+1)]
for j in range(n):
chmsk = chmasks[txt[j]]
Sjminusoneqminusone = S[0]
S[0] = (S[0] << 1) | chmsk
for q in range(1,err+1):
Sjminusoneq = S[q]
S[q] = shift(S[q] | chmsk)\
& shift(Sjminusoneqminusone)\
& shift(S[q-1])\
& Sjminusoneqminusone
Sjminusoneqminusone = Sjminusoneq
if not S[0][0]:#bit mais significativo for 0
occ.append(j)
return occ
def main():
txt = "abadac"
pat = "cada"
err = 2
alphabet = "abcd"
occ = scan(txt, pat, err, alphabet)
print occ
if __name__ == '__main__':
main() |
from app.main.model.review_dao import ReviewDAO as model_review
from app.main.model.congestion_dao import CongestionDAO as model_congestion
from app.main.model.place_dao import PlaceDAO as model_place
from app.main.model import *
from ..service.search import get_score, get_top_tag, get_matching_rate
from ..service.user import get_likelist
from sqlalchemy.orm import sessionmaker
from flask import *
import pandas as pd
from app.config import Config
from .congestion import Visitor
import datetime
import logging
# 상세 정보
def get_detail(param):
if len(param) == 0 or str(list(param.keys())[0]) != 'content_id':
return redirect('/main', code=302)
else:
req_contentid = param['content_id']
params = {}
'''
# SELECT AVG(star) FROM review WHERE place_id IN(
# SELECT id FROM place WHERE content_id = 특정 장소의 content_id);
'''
Session = sessionmaker(bind=client)
session_ = Session()
try:
if req_contentid is not None:
place_info = session_.query(model_place).filter(model_place.content_id == int(req_contentid)).all()
review_query = model_review.query.with_entities(func.avg(model_review.star).label('avg_star')).filter(
model_review.place_id == req_contentid).all()
if review_query[0][0] is None:
avg_star = 0
else:
avg_star = round(float(review_query[0][0]), 2)
local_obj = get_local(place_info[0].sigungu_code)
congestion_obj = get_congestion(place_info[0].sigungu_code)
params['congestion_obj'] = congestion_obj
params['place_info'] = place_info[0]
params['place_info'].detail_image = str(place_info[0].detail_image).split(',')[:5]
params['avg_star'] = avg_star
params['local_info'] = local_obj if local_obj is not None else None
params['algo_star'], params['algo_score'] = get_score(place_info[0].content_id)
params['tag'], params['size'] = get_top_tag(int(req_contentid), 5)
try:
params['user_name'] = session['name']
except KeyError:
params['user_name'] = '사용자'
logging.info('----[' + str(datetime.datetime.now()) + ' get_detail() : 200]----')
params['code'] = 200
# 비로그인 시 x
try:
if session['access_token']:
params['like'] = get_likelist()['like']
params['match_pct'], params['user_point'] = get_matching_rate(int(req_contentid))
except:
pass
finally:
session_.close()
except:
logging.error('----[' + str(datetime.datetime.now()) + ' get_detail() : 500]----')
params['code'] = 500
finally:
session_.close()
return jsonify(params)
# 관광지, 축제 정보
def get_local(sigungu_code):
Session = sessionmaker(bind=client)
session_ = Session()
try:
if sigungu_code is not None:
'''
# SELECT * FROM place WHERE (place_num = 1 OR place_num = 2 )
# AND sigungu_code = 47130 ORDER BY readcount DESC LIMIT 5;
'''
query = session_.query(model_place).filter(or_(model_place.place_num == 1, model_place.place_num == 2) &
(model_place.sigungu_code == int(sigungu_code))
).order_by(model_place.readcount.desc()).limit(Config.LIMIT).all()
return query
else:
return None
except:
pass
finally:
session_.close()
# 과거 혼잡도
def get_past_congestion(content_id):
Session = sessionmaker(bind=client)
session_ = Session()
try:
if content_id is not None:
base = datetime.datetime.today().strftime('%Y-%m-%d 00:00:00')
past = (datetime.datetime.now() - datetime.timedelta(days=Config.DATE_RANGE)).strftime('%Y-%m-%d 00:00:00')
'''
# SELECT * FROM congestion WHERE base_ymd BETWEEN date('과거일') AND date('현재일')+1 AND content_id=특정 content_id
# ORDER BY base_ymd;
'''
query = model_congestion.query.filter(model_congestion.base_ymd.between(past, base) + 1,
model_congestion.content_id == int(content_id)).all()
return query
else:
return None
except:
pass
finally:
session_.close()
# 혼잡도 (시군구, 전체 지역 평균)
def get_congestion(sigungu_code):
param = dict()
vs = Visitor()
final_df = vs.visitor_final(int(sigungu_code))
final_df.index = final_df.index.map(str)
base_ymd = final_df.index.tolist()
avg_visitor = pd.to_numeric(final_df['avg_visitor']).values.tolist()
avg_visitor = [val * 10 for val in avg_visitor]
sgg_visitor = pd.to_numeric(final_df['sgg_visitor']).values.tolist()
param['base_ymd'] = base_ymd
param['avg_visitor'] = avg_visitor
param['sgg_visitor'] = sgg_visitor
return param
# json serialize date to str
def json_default(query_obj):
date, congestion = [], []
for obj in query_obj:
parse = str(obj.base_ymd).split(' 00:00:00')[0]
date.append(parse)
future_congestion = float(obj.congestion)
congestion.append(future_congestion)
return date, congestion |
from api.schemas import AllowedBlurType, AllowedFilter, CompressMethod
from api.schemas.schemas import PlacePosition, Position
from api.utils.convert import sizeof_fmt, to_dict
from api.utils.executor import executor
from api.utils.image import *
from fastapi import APIRouter, File, Form, UploadFile, params
from fastapi.exceptions import HTTPException
from fastapi.responses import Response
from pydantic.color import Color
from pydantic.types import NonNegativeInt, PositiveFloat, PositiveInt
router = APIRouter(tags=["tools"])
@router.post("/crop")
async def crop(
image: UploadFile = File(...),
x1: PositiveInt = Form(...),
y1: PositiveInt = Form(...),
x2: PositiveInt = Form(...),
y2: PositiveInt = Form(...),
):
content_type = image.content_type
params = {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
content = await executor.run_task(
func=process_image_pillow,
proc_func=crop_image,
sr_image=image,
params=params,
)
if not content:
raise HTTPException(
status_code=422,
detail=f"Crop is not possible with your coordinates.",
)
else:
return Response(content, media_type=content_type)
@router.post("/scale")
async def scale(
image: UploadFile = File(...),
width: Optional[PositiveInt] = Form(default=None),
height: Optional[PositiveInt] = Form(default=None),
):
if not (width or height):
raise HTTPException(status_code=422, detail=f"Width or height is required!")
else:
content_type = image.content_type
params = {"width": width, "height": height}
content = await executor.run_task(
func=process_image_pillow,
proc_func=scale_image,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/rotate")
async def rotate(
image: UploadFile = File(...),
angle: float = Form(...),
):
content_type = image.content_type
params = {"angle": angle}
content = await executor.run_task(
func=process_image_pillow,
proc_func=rotate_image,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/resize")
async def resize(
image: UploadFile = File(...),
width: PositiveInt = Form(...),
height: PositiveInt = Form(...),
):
content_type = image.content_type
params = {"width": width, "height": height}
content = await executor.run_task(
func=process_image_pillow,
proc_func=resize_image,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/thumbnail")
async def thumbnail(
image: UploadFile = File(...),
width: PositiveInt = Form(...),
height: PositiveInt = Form(...),
round: bool = Form(default=False),
r_radius: PositiveInt = Form(default=5),
):
content_type = image.content_type
params = {
"width": width,
"height": height,
"round": round,
"r_radius": r_radius,
}
content = await executor.run_task(
func=process_image_pillow,
proc_func=thumbnail_generator,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/filter")
async def filter(
image: UploadFile = File(...),
filter_name: AllowedFilter = Form(...),
):
content_type = image.content_type
params = {"filter_name": filter_name}
content = await executor.run_task(
func=process_image_pillow,
proc_func=apply_filter,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/compress")
async def compress(
image: UploadFile = File(...),
quality: PositiveInt = Form(default=70),
compress_method: CompressMethod = Form(default=CompressMethod.default),
compress_level: PositiveInt = Form(default=9),
):
content_type = image.content_type
params = {
"quality": quality,
"compress_method": compress_method,
"compress_level": compress_level,
}
if compress_method == CompressMethod.jpegoptim:
image_bytes = await image.read()
content = await executor.run_in_shell(
command=f"jpegoptim --strip-all --max {params['quality']} -",
input_file=image_bytes,
)
else:
content = await executor.run_task(
func=process_image_pillow,
proc_func=compress_image,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/blur")
async def blur(
image: UploadFile = File(...),
blur_type: AllowedBlurType = Form(default=AllowedBlurType.gaussian),
radius: NonNegativeInt = Form(default=5),
):
content_type = image.content_type
params = {"blur_type": blur_type, "radius": radius}
content = await executor.run_task(
func=process_image_pillow,
proc_func=apply_blur,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/getColorPalette")
async def color_palette(
image: UploadFile = File(...),
max_colors: PositiveInt = Form(default=5),
display_hex_values: bool = Form(default=True),
):
params = {"max_colors": max_colors, "display_hex_values": display_hex_values}
content = await executor.run_task(
func=process_image_pillow,
proc_func=color_palette_generator,
sr_image=image,
params=params,
)
return Response(content, media_type="image/png")
@router.post("/getImageInfo")
async def get_image_info(
image: UploadFile = File(...),
extract_exif: bool = Form(default=True),
):
image_first_bytes = await image.read(size=8)
if is_image_content(image_first_bytes):
await image.seek(0)
image_bytes = await image.read()
content = await executor.run_in_shell(
command="exiftool -c '%.6f' -S -",
input_file=image_bytes,
convert_output_func=to_dict,
)
content["ImageFileSize"] = sizeof_fmt(len(image_bytes))
else:
raise HTTPException(
status_code=422,
detail="Unable to determine file type. Supported formats: jpg, jpeg, png",
)
return content
@router.post("/concat")
async def concat(
image_1: UploadFile = File(...),
image_2: UploadFile = File(...),
position: Position = Form(default=Position.vertical),
):
content_type = image_1.content_type
params = {"position": position}
content = await executor.run_task(
func=process_images,
proc_func=concat_images,
sr_image_1=image_1,
sr_image_2=image_2,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/watermark")
async def watermark(
image_1: UploadFile = File(...),
watermark: UploadFile = File(...),
position: PlacePosition = Form(default=PlacePosition.center),
repeat: Optional[bool] = Form(default=False),
):
content_type = image_1.content_type
params = {"position": position, "repeat": repeat}
content = await executor.run_task(
func=process_images,
proc_func=place_watermark,
sr_image_1=image_1,
sr_image_2=watermark,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/addText")
async def add_text(
image: UploadFile = File(...),
text: str = Form(...),
size: PositiveInt = Form(...),
color: Color = Form(default="pink"),
position: PlacePosition = Form(default=PlacePosition.center),
):
content_type = image.content_type
params = {"text": text, "size": size, "color": color.as_hex(), "position": position}
content = await executor.run_task(
func=process_image_pillow,
proc_func=draw_text,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/grayscale")
async def get_image_info(
image: UploadFile = File(...),
):
content_type = image.content_type
params = {"mode": "L"}
content = await executor.run_task(
func=process_image_pillow,
proc_func=set_grayscale,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/mirror")
async def mirror(
image: UploadFile = File(...),
):
content_type = image.content_type
content = await executor.run_task(
func=process_image_pillow,
proc_func=mirror_apply,
sr_image=image,
)
return Response(content, media_type=content_type)
@router.post("/fit")
async def fit(
image: UploadFile = File(...),
width: PositiveInt = Form(...),
height: PositiveInt = Form(...),
):
content_type = image.content_type
params = {"width": width, "height": height}
content = await executor.run_task(
func=process_image_pillow,
proc_func=image_fit,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/brightness")
async def brightness(
image: UploadFile = File(...),
factor: PositiveFloat = Form(...),
):
content_type = image.content_type
params = {"factor": factor}
content = await executor.run_task(
func=process_image_pillow,
proc_func=set_brightness,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/invert")
async def invert(
image: UploadFile = File(...),
):
content_type = image.content_type
content = await executor.run_task(
func=process_image_pillow,
proc_func=invert_apply,
sr_image=image,
)
return Response(content, media_type=content_type)
@router.post("/addBorder")
async def border(
image: UploadFile = File(...),
size: int = Form(...),
color: Color = Form(...),
):
content_type = image.content_type
params = {"size": size, "color": color.as_hex()}
content = await executor.run_task(
func=process_image_pillow,
proc_func=add_border,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
@router.post("/addNoise")
async def noise(
image: UploadFile = File(...),
variance: PositiveFloat = Form(...),
):
content_type = image.content_type
params = {"variance": variance}
content = await executor.run_task(
func=process_image_cv,
proc_func=add_noise,
sr_image=image,
params=params,
)
return Response(content, media_type=content_type)
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.settings import AttributeProxyProperty
from indico.modules.events.models.reviews import ProposalMixin
from indico.modules.events.papers.models.revisions import PaperRevisionState
from indico.util.locators import locator_property
from indico.util.string import return_ascii
class Paper(ProposalMixin):
"""Proxy class to facilitate access to all paper-related properties."""
proxied_attr = 'contribution'
# Proposal mixin properties
proposal_type = 'paper'
call_for_proposals_attr = 'cfp'
revisions_enabled = True
def __init__(self, contribution):
self.contribution = contribution
@return_ascii
def __repr__(self):
state = self.state.name if self.last_revision else None
return '<Paper(contribution_id={}, state={})>'.format(self.contribution.id, state)
@locator_property
def locator(self):
return self.contribution.locator
# Contribution-related
event = AttributeProxyProperty('event')
title = AttributeProxyProperty('title')
verbose_title = AttributeProxyProperty('verbose_title')
# Paper-related
revisions = AttributeProxyProperty('_paper_revisions')
last_revision = AttributeProxyProperty('_paper_last_revision')
accepted_revision = AttributeProxyProperty('_accepted_paper_revision')
revision_count = AttributeProxyProperty('_paper_revision_count')
files = AttributeProxyProperty('_paper_files')
@property
def state(self):
return self.last_revision.state
@state.setter
def state(self, state):
self.last_revision.state = state
@property
def judgment_comment(self):
return self.last_revision._judgment_comment
@property
def is_in_final_state(self):
return self.state in {PaperRevisionState.accepted, PaperRevisionState.rejected}
def can_comment(self, user, check_state=False):
if not user:
return False
if check_state and self.is_in_final_state:
return False
return self.can_submit(user) or self.can_judge(user) or self.can_review(user)
def can_submit(self, user):
return self.contribution.can_submit_proceedings(user)
def can_manage(self, user):
if not user:
return False
return self.event.can_manage(user)
def can_judge(self, user, check_state=False):
if not user:
return False
elif check_state and self.is_in_final_state:
return False
elif self.can_manage(user):
return True
return user in self.contribution.paper_judges
def can_review(self, user, check_state=False):
if not user:
return False
elif check_state and self.is_in_final_state:
return False
elif self.can_manage(user):
return True
return self.contribution.is_paper_reviewer(user)
def get_revisions(self):
return self.revisions
def get_last_revision(self):
return self.last_revision
def reset_state(self):
self.last_revision.state = PaperRevisionState.submitted
self.last_revision.judgment_comment = ''
self.last_revision.judge = None
self.last_revision.judgment_dt = None
|
from main import config
def md5_with_salt(text: str, salt: str) -> str:
import hashlib
md5 = hashlib.md5()
md5.update((text+salt).encode())
return md5.hexdigest()
def encode_json(obj):
import json
encoder = json.JSONEncoder(default=lambda x: str(x))
return encoder.encode(obj)
def decode_json(obj):
import json
decoder = json.JSONDecoder()
return decoder.decode(obj)
def make_response(code, **data):
return encode_json(dict(**{
"code": code
}, **data))
def generate_file_list(pid: int) -> list:
import os
from main import basedir
upload_path = os.path.join(basedir, f"{config.UPLOAD_DIR}/" + str(pid))
os.makedirs(upload_path, exist_ok=True)
files = filter(lambda x: not x.endswith(".lock"), os.listdir(upload_path))
files = filter(lambda x: os.path.exists(
os.path.join(upload_path, x+".lock")), files)
def read_file(x):
with open(x, "r") as f:
return f.read()
return list(map(lambda x: {"name": x, "last_modified_time": float(read_file(os.path.join(upload_path, x)+".lock")), "size": os.path.getsize(os.path.join(upload_path, x))}, files))
def send_mail(content: str, subject: str, target: str, receiver_username="") -> None:
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from email.utils import parseaddr, formataddr
def my_format(s):
name, addr = parseaddr(s)
return formataddr((Header(name, "utf-8").encode("utf-8"), addr))
content = MIMEText((content), "plain", "utf-8")
# content["From"] = Header("HelloJudgeV2", "utf-8")
content["Subject"] = Header(subject, "utf-8")
content["From"] = my_format(f"HelloJudgeV2 <{config.EMAIL_SENDER}>")
content["To"] = my_format(f"{receiver_username} <{target}>")
if config.SMTP_USING_SSL:
smtp_client = smtplib.SMTP_SSL(config.SMTP_SERVER, config.SMTP_PORT)
else:
smtp_client = smtplib.SMTP(config.SMTP_SERVER, config.SMTP_PORT)
smtp_client.login(config.SMTP_USER, config.SMTP_PASSWORD)
try:
smtp_client.sendmail(config.EMAIL_SENDER, target,
content.as_string())
except smtplib.SMTPException as ex:
return make_response(-1, message="发送失败!\n"+str(ex))
smtp_client.close()
|
#coding=utf-8
'''
Created on 2016-12-5
@author: Administrator
'''
from doraemon.project import models
from url_filter.filtersets.django import ModelFilterSet
class IssueDailyStatisticsFilterSet(ModelFilterSet):
class Meta(object):
model = models.ProjectIssue
fields = ['Project','Version','Status','Module','Processor','IssueCategory','Solution','Severity','Creator','CreationTime','ClosedTime','ResolvedTime']
|
import colorsys
import re
from app.nanoleaf.model import AuroraObject
from app.nanoleaf.exceptions import BadRequestException
class State(AuroraObject):
def __init__(self, requester):
super().__init__(requester)
@property
def color_mode(self):
"""Returns the current color mode."""
return self._requester.request(method="GET", endpoint="state/colorMode")
@property
def on(self):
"""Returns True if the device is on, False if it's off"""
return self._requester.request(method="GET", endpoint="state/on/value")
@on.setter
def on(self, value: bool):
"""Turns the device on/off. True = on, False = off"""
data = {"on": value}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def off(self):
"""Returns True if the device is off, False if it's on"""
return not self.on
@off.setter
def off(self, value: bool):
"""Turns the device on/off. True = off, False = on"""
self.on = not value
def on_toggle(self):
"""Switches the on/off state of the device"""
self.on = not self.on
@property
def brightness(self):
"""Returns the brightness of the device (0-100)"""
return self._requester.request(method="GET", endpoint="state/brightness/value")
@brightness.setter
def brightness(self, level):
"""Sets the brightness to the given level (0-100)"""
data = {"brightness": {"value": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def brightness_min(self):
"""Returns the minimum brightness possible. (This always returns 0)"""
return self._requester.request(method="GET", endpoint="state/brightness/min")
@property
def brightness_max(self):
"""Returns the maximum brightness possible. (This always returns 100)"""
return self._requester.request(method="GET", endpoint="state/brightness/max")
def brightness_raise(self, level):
"""Raise the brightness of the device by a relative amount (negative lowers brightness)"""
data = {"brightness": {"increment": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
def brightness_lower(self, level):
"""Lower the brightness of the device by a relative amount (negative raises brightness)"""
self.brightness_raise(-level)
@property
def hue(self):
"""Returns the hue of the device (0-360)"""
return self._requester.request(method="GET", endpoint="state/hue/value")
@hue.setter
def hue(self, level):
"""Sets the hue to the given level (0-360)"""
data = {"hue": {"value": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def hue_min(self):
"""Returns the minimum hue possible. (This always returns 0)"""
return self._requester.request(method="GET", endpoint="state/hue/min")
@property
def hue_max(self):
"""Returns the maximum hue possible. (This always returns 360)"""
return self._requester.request(method="GET", endpoint="state/hue/max")
def hue_raise(self, level):
"""Raise the hue of the device by a relative amount (negative lowers hue)"""
data = {"hue": {"increment": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
def hue_lower(self, level):
"""Lower the hue of the device by a relative amount (negative raises hue)"""
self.hue_raise(-level)
@property
def saturation(self):
"""Returns the saturation of the device (0-100)"""
return self._requester.request(method="GET", endpoint="state/sat/value")
@saturation.setter
def saturation(self, level):
"""Sets the saturation to the given level (0-100)"""
data = {"sat": {"value": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def saturation_min(self):
"""Returns the minimum saturation possible. (This always returns 0)"""
self._requester.request(method="GET", endpoint="state/sat/min")
@property
def saturation_max(self):
"""Returns the maximum saturation possible. (This always returns 100)"""
self._requester.request(method="GET", endpoint="state/sat/max")
def saturation_raise(self, level):
"""Raise the saturation of the device by a relative amount (negative lowers saturation)"""
data = {"sat": {"increment": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
def saturation_lower(self, level):
"""Lower the saturation of the device by a relative amount (negative raises saturation)"""
self.saturation_raise(-level)
@property
def color_temperature(self):
"""Returns the color temperature of the device (0-100)"""
return self._requester.request(method="GET", endpoint="state/ct/value")
@color_temperature.setter
def color_temperature(self, level):
"""Sets the color temperature to the given level (0-100)"""
data = {"ct": {"value": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def color_temperature_min(self):
"""Returns the minimum color temperature possible. (This always returns 1200)"""
return self._requester.request(method="GET", endpoint="state/ct/min")
@property
def color_temperature_max(self):
"""Returns the maximum color temperature possible. (This always returns 6500)"""
return self._requester.request(method="GET", endpoint="state/ct/max")
def color_temperature_raise(self, level):
"""Raise the color temperature of the device by a relative amount (negative lowers color temperature)"""
data = {"ct": {"increment": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
def color_temperature_lower(self, level):
"""Lower the color temperature of the device by a relative amount (negative raises color temperature)"""
self.color_temperature_raise(-level)
# TODO: Shame on all these magic numbers. SHAME.
@property
def rgb(self):
"""The color of the device, as represented by 0-255 RGB values"""
hue = self.hue
saturation = self.saturation
brightness = self.brightness
if hue is None or saturation is None or brightness is None:
return None
rgb = colorsys.hsv_to_rgb(hue / 360, saturation / 100, brightness / 100)
return [int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)]
@rgb.setter
def rgb(self, color):
"""Set the color of the device, as represented by either a hex string or a list of 0-255 RGB values"""
try:
red, green, blue = color
except ValueError:
try:
hexcolor = color
reg_match = re.match("^([A-Fa-f0-9]{6})$", hexcolor)
if reg_match:
red = int(hexcolor[:2], 16)
green = int(hexcolor[2:-2], 16)
blue = int(hexcolor[-2:], 16)
else:
print("Error: Color must be in valid hex format.")
return
except ValueError:
print("Error: Color must have one hex value or three 0-255 values.")
return
if not 0 <= red <= 255:
print("Error: Red value out of range! (0-255)")
return
if not 0 <= green <= 255:
print("Error: Green value out of range! (0-255)")
return
if not 0 <= blue <= 255:
print("Error: Blue value out of range! (0-255)")
return
hsv = colorsys.rgb_to_hsv(red / 255, green / 255, blue / 255)
hue = int(hsv[0] * 360)
saturation = int(hsv[1] * 100)
brightness = int(hsv[2] * 100)
data = {"hue": {"value": hue}, "sat": {"value": saturation}, "brightness": {"value": brightness}}
self._requester.request(method="PUT", endpoint="state", data=data)
|
#!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for NN models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import absl.flags as flags
import models.resnet
import models.vggnet
FLAGS = flags.FLAGS
def get_net(num_classes=None): # pylint: disable=missing-docstring
architecture = FLAGS.architecture
if 'vgg19' in architecture:
net = functools.partial(
models.vggnet.vgg19,
filters_factor=FLAGS.get_flag_value('filters_factor', 8))
else:
if 'resnet50' in architecture:
net = models.resnet.resnet50
elif 'revnet50' in architecture:
net = models.resnet.revnet50
else:
raise ValueError('Unsupported architecture: %s' % architecture)
net = functools.partial(
net,
filters_factor=FLAGS.get_flag_value('filters_factor', 4),
last_relu=FLAGS.get_flag_value('last_relu', True),
mode=FLAGS.get_flag_value('mode', 'v2'))
if FLAGS.task in ('jigsaw', 'relative_patch_location'):
net = functools.partial(net, root_conv_stride=1, strides=(2, 2, 1))
# Few things that are common across all models.
net = functools.partial(
net, num_classes=num_classes,
weight_decay=FLAGS.get_flag_value('weight_decay', 1e-4))
return net
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from urllib.parse import urlparse
from base64 import b64decode
from itertools import count
import os
import sys
import json
import shutil
try:
import ijson.backends.yajl2_cffi as ijson
except ImportError:
try:
import ijson.backends.yajl2 as ijson
except ImportError:
try:
import ijson
except ImportError:
ijson = None
__appname__ = 'har-extractor'
__version__ = '1.0.1'
NAME_VERSION = '%s %s' % (__appname__, __version__)
SIZE_UNITS = 'BKMGT'
def format_size(size):
if size < 0:
return '<invalid size>'
unit_name = 'B'
unit_value = 1
for unit_name in SIZE_UNITS:
if size < 1024 * unit_value:
break
unit_value *= 1024
if size % unit_value == 0:
return '%d%s' % (size // unit_value, unit_name)
return '%.2f%s' % (size / unit_value, unit_name)
def get_unused_name(path):
if not os.path.exists(path):
return path
name, ext = os.path.splitext(path)
i = 1
while True:
path = '%s.%d%s' % (name, i, ext)
if not os.path.exists(path):
return path
i += 1
def write(content, fname):
if isinstance(content, bytes):
mode = 'wb'
else:
mode = 'w'
with open(fname, mode) as fp:
fp.write(content)
def format_entry(entry):
request = entry.get('request', {})
response = entry.get('response', {})
content = response.get('content', {})
return '%s %s -> %s %s %s %s' % (
request.get('method', '<no method>'),
request.get('url', '<no url>'),
response.get('status', '<no status>'),
response.get('statusText', '<no status text>'),
content.get('mimeType', '<no mime type>'),
format_size(content.get('size', -1))
)
def get_entry_content(entry):
try:
content = entry['response']['content']
except KeyError:
return None
try:
text = content['text']
if not text:
return None
except KeyError:
return None
try:
if content['encoding'] == 'base64':
text = b64decode(text)
else:
raise ValueError(
'\tUnknown content encoding: "%s"' % content['encoding']
)
except KeyError:
pass
return text
def get_entry_path(entry, subdirs=False):
try:
url = urlparse(entry['request']['url'])
except KeyError:
raise ValueError('Invalid entry: missing request URL: %s' % repr(entry))
fname = url.path.strip('/')
if fname == '':
fname = 'index.html'
if subdirs:
return os.path.join(url.netloc, fname)
return os.path.basename(fname)
def get_entries(fp, iterative=True):
if fp is sys.stdin:
iterative = True
fp = fp.buffer
if ijson is None or not iterative:
data = fp.read()
if isinstance(data, bytes):
data = data.decode('utf-8')
data = json.loads(data)
return data['log']['entries']
else:
return ijson.items(fp, 'log.entries.item')
def get_out_dir(path, default):
if not path:
return default
if os.path.exists(path):
if not os.path.isdir(path):
raise ValueError('"%s" is not a directory' % path)
return os.path.join(path, default)
return path
def dirnames(entry, root):
path = os.path.relpath(entry, root)
ret = []
path = os.path.dirname(path)
while path:
ret.append(os.path.join(root, path))
path = os.path.dirname(path)
return ret
def move_files_to_dir(path, first):
dirname, name = os.path.split(path)
name, ext = os.path.splitext(name)
shutil.move(first, os.path.join(path, 'index.html'))
for i in count(1):
fpath = os.path.join(dirname, '%s.%d%s' % (name, i, ext))
if not os.path.exists(fpath):
return
fname = 'index.%d.html' % i
shutil.move(fpath, os.path.join(path, fname))
def make_entry_dirs(root, entry):
try:
os.makedirs(os.path.dirname(entry), exist_ok=True)
return
except OSError:
for path in reversed(dirnames(entry, root)):
if not os.path.exists(path):
os.mkdir(path)
elif not os.path.isdir(path):
tmp = get_unused_name(path)
shutil.move(path, tmp)
os.mkdir(path)
move_files_to_dir(path, tmp)
def extract(entries, outdir=None,
subdirs=False, verbose=False, exit_on_error=True):
if outdir is not None:
os.makedirs(outdir, exist_ok=True)
for entry in entries:
try:
if verbose or outdir is None:
print(format_entry(entry))
if outdir is None:
continue
content = get_entry_content(entry)
if content is None:
if verbose:
print('\t----> <no content>')
continue
fname = get_entry_path(entry, subdirs)
fname = os.path.join(outdir, fname)
fname = get_unused_name(fname)
if verbose:
print('\t---->', fname)
try:
if subdirs:
make_entry_dirs(outdir, fname)
write(content, fname)
except (OSError, IOError) as err:
msg = 'Could not write "%s": %s' % (fname, repr(err))
if exit_on_error:
raise IOError(msg)
else:
print(msg, file=sys.stderr)
except (KeyError, ValueError) as err:
msg = 'Invalid entry: %s: %s' % (repr(entry), repr(err))
if exit_on_error:
raise ValueError(msg)
else:
print(msg, file=sys.stderr)
def main(args=None):
parser = ArgumentParser(args)
parser.add_argument('file', metavar='FILE', help='HAR file')
parser.add_argument('-V', '--version',
action='version', version=NAME_VERSION)
parser.add_argument('-l', '--list', action='store_true',
help='list the contents of input file')
parser.add_argument('-o', '--output',
metavar='DIRECTORY', default=None,
help='set output directory (default: ./<filename>.d)')
parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true',
help='turn on verbose output (default)')
parser.add_argument('-nv', '--no-verbose',
dest='verbose',
action='store_false',
help='turn off verbose output')
parser.add_argument('-i', '--iterative',
dest='iterative',
action='store_true',
help='use iterative json parser')
parser.add_argument('-ni', '--no-iterative',
dest='iterative',
action='store_false',
help='do not use iterative json parser (default)')
parser.add_argument('-s', '--strict',
dest='strict',
action='store_true',
help='exit and delete extracted data after first error')
parser.add_argument('-ns', '--no-strict',
dest='strict',
action='store_false',
help='ignore errors (default)')
parser.add_argument('-d', '--directories',
dest='directories',
action='store_true',
help='create url directories (default)')
parser.add_argument('-nd', '--no-directories',
dest='directories',
action='store_false',
help='do not create url directories')
parser.set_defaults(
iterative=False,
directories=True,
strict=False,
verbose=True
)
if args is not None:
args = parser.parse_args(args)
else:
args = parser.parse_args()
if args.file is None:
return 1
if args.list:
outdir = None
else:
try:
outdir = get_out_dir(args.output,
os.path.basename(args.file) + '.d')
except ValueError as err:
print(err, file=sys.stderr)
return 1
try:
with open(args.file, 'rb') as fp:
entries = get_entries(fp, args.iterative)
extract(entries, outdir,
args.directories, args.verbose, args.strict)
except (ValueError, IOError) as err:
if args.strict:
shutil.rmtree(outdir)
print(err, file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
# Copyright (C) 2012 Yaco Sistemas (http://www.yaco.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from django.contrib.auth.models import Group
from saml2.s_utils import UnknownSystemEntity
from . import settings as saml_settings
from .models import LongGroupName
from django.db.models import F
# import logging
import requests
# logger = logging.getLogger()
# hdlr = logging.FileHandler('/logs/penndjangosaml2.log')
# formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
# hdlr.setFormatter(formatter)
# logger.addHandler(hdlr)
# logger.setLevel(logging.WARNING)
# logger.error(saml_settings.INCLUDE_PENN_GROUPS)
def build_user_groups(user):
# Strip off .upenn.edu part of username
pennkey = user.username[:-10]
headers = {'Authorization': 'Token %s' % saml_settings.WISP_TOKEN}
try:
response = requests.get(
'https://apps.wharton.upenn.edu/wisp/api/v1/penngroups/' + pennkey,
headers=headers).json()
if hasattr(response, 'error'):
return user
except ValueError as err:
raise Exception(
'WISP did not return valid JSON. This may be due to WISP API being down.'
) from err
groups = response.get('groups')
include_penn_groups = saml_settings.INCLUDE_PENN_GROUPS
for penn_group in groups:
if penn_group in include_penn_groups:
if len(penn_group) <= saml_settings.MAX_GROUP_NAME_LENGTH:
group, created = Group.objects.get_or_create(name=str(penn_group))
if not user.groups.filter(name=str(penn_group)).exists():
user.groups.add(group)
else:
g, created = LongGroupName.objects.get_or_create(group_name=penn_group)
g.count = g.count + 1
g.save()
for group in user.groups.all():
if group.name not in groups:
user.groups.remove(group)
user.save()
return user
def get_custom_setting(name, default=None):
if name == 'SAML_ATTRIBUTE_MAPPING':
return getattr(saml_settings, name, default)
return getattr(settings, name, default)
def available_idps(config, langpref=None):
if langpref is None:
langpref = "en"
idps = set()
for metadata_name, metadata in config.metadata.metadata.items():
result = metadata.any('idpsso_descriptor', 'single_sign_on_service')
if result:
idps = idps.union(set(result.keys()))
return dict([(idp, config.metadata.name(idp, langpref)) for idp in idps])
def get_idp_sso_supported_bindings(idp_entity_id=None, config=None):
"""Returns the list of bindings supported by an IDP
This is not clear in the pysaml2 code, so wrapping it in a util"""
if config is None:
# avoid circular import
from penndjangosaml2.conf import get_config
config = get_config()
# load metadata store from config
meta = getattr(config, 'metadata', {})
# if idp is None, assume only one exists so just use that
if idp_entity_id is None:
# .keys() returns dict_keys in python3.5+
idp_entity_id = list(available_idps(config).keys()).pop()
try:
return meta.service(idp_entity_id, 'idpsso_descriptor', 'single_sign_on_service').keys()
except UnknownSystemEntity:
return []
def get_location(http_info):
"""Extract the redirect URL from a pysaml2 http_info object"""
assert 'headers' in http_info
headers = http_info['headers']
assert len(headers) == 1
header_name, header_value = headers[0]
assert header_name == 'Location'
return header_value
|
import keras
import numpy as np
import sys
import time
PUNISHMENT_FOR_VISIT = 0.2
MAX_COUNTER = 1000
def test_with_data(model):
print("TEST")
counter = 0
total = 0
for i in range(21):
path = "../moves/bin-moves/sol_ida_problem_"+str(i)+"_.txt"
file = open(path, 'r')
for line in file.readlines():
line = line.replace(" ", "")
solution = line[-5:].strip() # last 4 chars
line = line[:-5] # all line except the solution
x_pred = np.array([list(line)]).astype('f')
y_pred = model.predict(x_pred)
# y = get_decision(y_pred[0])
s = ""
for i in y_pred:
s += str(i)
if(s == solution):
counter += 1
total += 1
file.close()
print(total)
print(counter)
print(float(counter)/float(total))
def find_pos(line):
cero = "1000000000000000"
for i in range (4):
for j in range(4):
if line[i][j] == cero:
return (i, j)
print("No encontro la posicion del 0 upss..")
sys.exit()
return None
def get_matrix(line):
list_line = [[],[],[],[]]
for i in range(4):
for j in range(4):
list_line[i].append(line[i*16*4 + j*16 : i*16*4 + j*16+16])
return list_line
def print_matrix(matrix):
for i in range(4):
row = ""
for j in range(4):
x = matrix[i][j].find("1")
row += str(x) + " "
if x < 10:
row += " "
print(row)
def different(matrix1, matrix2):
for i in range(4):
for j in range(4):
if matrix1[i][j] != matrix2[i][j]:
return True
return False
def get_x_array(matrix):
line = ""
for i in range(4):
for j in range(4):
line += matrix[i][j]
return np.array([list(line)]).astype('f')
def move_down(state, pos):
x, y = pos
if x == 3:
# print("no move d",x ,y)
return False
# print("move down")
temp = state[x][y]
state[x][y] = state[x+1][y]
state[x+1][y] = temp
return state
def move_up(state, pos):
x, y = pos
if x == 0:
# print("no move u",x ,y)
return False
# print("move up")
temp = state[x][y]
state[x][y] = state[x-1][y]
state[x-1][y] = temp
return state
def move_right(state, pos):
x, y = pos
if y == 3:
# print("no move r",x ,y)
return False
# print("move right")
temp = state[x][y]
state[x][y] = state[x][y+1]
state[x][y+1] = temp
return state
def move_left(state, pos):
x, y = pos
if y == 0:
# print("no move l",x ,y)
return False
# print("move left")
temp = state[x][y]
state[x][y] = state[x][y-1]
state[x][y-1] = temp
return state
def get_decision(model, state, pos, states_visited, counter_of_visits):
x_pred = get_x_array(state)
y_pred = model.predict(x_pred)
y_pred = y_pred[0]
ans = [(y_pred[i], i) for i in range(4)] # [(probabilidad, movimiento), (...), ... ]
# cambiar las probabilidades segun los estados visitados
# fin de cambiar las probabilidades segun los visitados
# cambiamos la opcion si es que no es posible y la ajustamos segun sus visitas
index = 0
already_punished = set()
while index < 4:
ans.sort(reverse=True)
best_option = ans[index][1]
state_copy = [[x for x in st] for st in state]
if best_option == 0:
possible_state = move_left(state_copy, pos)
elif best_option == 1:
possible_state = move_down(state_copy, pos)
elif best_option == 2:
possible_state = move_right(state_copy, pos)
elif best_option == 3:
possible_state = move_up(state_copy, pos)
else:
print("weird best option, siempre debiese ser un movimiento permitido entre 0 y 3")
sys.exit()
# revisamos si ya hemos visitado este nodo
if possible_state:
if (not best_option in already_punished) and (possible_state in states_visited):
index_visited = states_visited.index(possible_state) # TODO: no repetir esta busqueda!
count = counter_of_visits[index_visited]
# print("estado ya visitado! ups", index_visited, count)
# print(ans)
# cambiamos las probabilidades
x, y = ans[index]
x -= PUNISHMENT_FOR_VISIT * count
ans[index] = (x, y)
# fin cambiar las probabilidades
already_punished.add(best_option)
index = 0
continue
break
index += 1
if not possible_state:
print("no se encontro ninguna opcion de movimiento posible, muy raro esto")
sys.exit()
# fin de cambiar la opcion si no es posible
# change state for new_state
for i in range(len(possible_state)):
for j in range(len(possible_state[i])):
state[i][j] = possible_state[i][j]
# end changing actual state
return state
def read_states():
states = []
fo = open('problems.txt')
for s in fo.readlines():
s = s.strip()
s = s.split(" ")[1:]
s = [int(x) for x in s]
s = ["0"*i + "1" + "0"*(15-i) for i in s]
s = " ".join(s)
states.append(s)
fo.close()
return states
import random
def test(model=""):
print("TEST")
last = "1000000000000000 0100000000000000 0010000000000000 0001000000000000 0000100000000000 0000010000000000 0000001000000000 0000000100000000 0000000010000000 0000000001000000 0000000000100000 0000000000010000 0000000000001000 0000000000000100 0000000000000010 0000000000000001"
states = read_states()
last = last.replace(" ", "")
last = get_matrix(last)
state_number = 1
length_all_solution = 0
total_time = 0
for state in states:
state_number += 1
state = state.replace(" ", "")
state = get_matrix(state)
counter = 0
initial_time = time.time()
states_visited = [] # almacena matrices de estados visitados
counter_of_visits = {} # { index of the state in the list states_visited: counter of visits to that state, ... }
steps_to_state = {} # index of the state in the list states_visited: minimum counter of steps to get there. This avoids counting loops
while different(state,last):
counter += 1
if counter > MAX_COUNTER and False:
print("mayor a", MAX_COUNTER, ", no sigue")
break
pos = find_pos(state)
# print(" ")
# print("TEST: ", counter)
# print_matrix(state)
state = get_decision(model, state, pos, states_visited, counter_of_visits)
if state is False:
print("se callo el juego, movida prohibida, esto no deberia pasar")
sys.exit()
state_copy = [[x for x in st] for st in state]
if state_copy in states_visited:
index = states_visited.index(state_copy)
counter_of_visits[index] += 1
counter = steps_to_state[index]
else:
states_visited.append(state_copy)
counter_of_visits[len(states_visited) - 1] = 1
steps_to_state[len(states_visited) - 1] = counter
if not different(state, last):
iteration_time = time.time() - initial_time
print(state_number - 1, counter, iteration_time)
length_all_solution += counter
total_time += iteration_time
print("RESUMEN")
print("length: ", length_all_solution)
print("total_time", total_time)
if __name__ == '__main__':
model = keras.models.load_model('15puzzle_solver_model.h5')
test(model)
#test()
|
#! /usr/bin/python
#
'''
Version : 0.1
Author : Arian Pasquali
Summary : This class defines generic datasource interface
'''
class DataSource: # define parent class
def open_connection(self,options):
print 'Openning connection'
def index(self,object):
print 'Indexing'
def close_connection(self):
print 'Closing connection' |
import sys
from abc import ABC
from typing import final
from extutils import exec_timing_result
from mongodb.factory import SINGLE_DB_NAME, MONGO_CLIENT
from tests.base import TestCase
if not SINGLE_DB_NAME:
print("Utilize single DB by setting `MONGO_DB` in environment variables "
"to prevent the possible data corruption.")
sys.exit(1)
class TestDatabaseMixin(TestCase, ABC):
"""
This class should be used if the test case will make use of database.
This class sets a single database at the beginning of the test case and destroy them after each test case.
This class provides functionality to get the database ping.
"""
@staticmethod
@final
def get_mongo_client():
return MONGO_CLIENT
@staticmethod
@final
def get_db_name():
return SINGLE_DB_NAME
@classmethod
@final
def get_collection(cls, col_name):
client = cls.get_mongo_client()
db = cls.get_db_name()
return client.get_database(db).get_collection(col_name)
@classmethod
def db_ping_ms(cls) -> float:
return exec_timing_result(MONGO_CLIENT.get_database(SINGLE_DB_NAME or "admin").command, "ping").execution_ms
|
# -*- coding: utf-8 -*-
from django.template import Library
from django.utils.translation import ugettext as _
register = Library()
@register.filter
def get_range(value):
"""
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the sviews
"""
return range(value)
@register.filter
def get_range1(value):
return range(1, value + 1)
@register.filter
def get_user(tickets, user):
return tickets[user].ticket_type
@register.filter
def get_user_seat(dictionary, key):
return dictionary.get(key)
@register.filter
def get_seating_url(dictionary, key):
return dictionary.get(key).seating.get_absolute_url()
@register.filter
def get_seat_placement(dictionary, key):
return dictionary.get(key).placement
@register.filter
def get_seat_info(dictionary, key):
seat = dictionary.get(key)
return _(u'{0}, seat {1}').format(seat.seating.title, seat.placement)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_pagedown import PageDown
from config import config
db = SQLAlchemy()
bootstrap = Bootstrap()
mail = Mail()
moment = Moment()
pagedown = PageDown()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
bootstrap.init_app(app)
mail.init_app(app)
moment.init_app(app)
login_manager.init_app(app)
pagedown.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
from .api_1_0 import api as api_1_0_blueprint
app.register_blueprint(api_1_0_blueprint, url_prefix='/api/v1.0')
return app
from .models import AnonymousUser
login_manager.anonymous_user = AnonymousUser
|
Desc = cellDescClass("TBUFX20")
Desc.properties["cell_leakage_power"] = "6496.329600"
Desc.properties["cell_footprint"] = "tbuf"
Desc.properties["area"] = "76.507200"
Desc.pinOrder = ['A', 'OE', 'Y']
Desc.add_arc("A","Y","combi")
Desc.add_arc("OE","Y","three_state_disable")
Desc.add_arc("OE","Y","three_state_enable")
Desc.add_param("area",76.507200);
Desc.add_pin("A","input")
Desc.add_pin("Y","output")
Desc.add_pin_func("Y","unknown")
Desc.add_pin("OE","input")
CellLib["TBUFX20"]=Desc
|
# TODO: Implement genetic algorithms
|
from filesystem import InjectText, CopyFile, CreateLink, CreateDirectory
|
class DbDig:
def __init__(self, conn, dbtype="Postgres"):
self.Imp = None
self.Type = dbtype
if dbtype == "Oracle":
self.Impl = DbDigOracle(conn)
else: # assume Postgres
self.Impl = DbDigPostgres(conn)
def __getattr__(self, name):
return getattr(self.Impl, name)
class DbDigImpl:
def getTableNameSpace(self, nspace, table):
words = table.split('.',1)
if len(words) >= 2:
nspace, table = tuple(words[:2])
return nspace, table
class DbDigOracle(DbDigImpl):
def __init__(self, conn):
self.Conn = conn
def columns(self, nspace, table):
c = self.Conn.cursor()
nspace, table = self.getTableNameSpace(nspace, table)
c.execute("""select column_name, data_type
from all_tab_columns
where upper(owner)=upper('%s') and upper(table_name)=upper('%s')""" %
(nspace, table))
return c.fetchall()
class DbDigPostgres(DbDigImpl):
def __init__(self, conn):
"""
"""
self.Conn = conn
self.Dsn = {}
dsn = self.Conn.dsn
dsn = dsn.split(' ')
for v in dsn:
v = v.split('=')
if v[0] == 'password':
continue
self.Dsn[v[0]] = v[1]
def dsn(self):
return self.Dsn
def dbases(self):
"""
Find all databases
"""
c = self.Conn.cursor()
sql = """select datname from %s""" % ('pg_database',)
c.execute(sql)
dd = c.fetchall()
c.close()
if dd:
return [d[0] for d in dd]
else:
return None
def nspaces(self):
"""
Find all namespaces
"""
c = self.Conn.cursor()
sql = """select nspname from pg_namespace where nspname !~ 'pg_' and nspname != 'information_schema'"""
c.execute(sql)
dd = c.fetchall()
c.close()
if dd:
return [d[0] for d in dd]
else:
return []
def tables(self, nspace):
"""
Find all tables
"""
c = self.Conn.cursor()
sql = """select relname from pg_class where relnamespace=(select OID from pg_namespace where nspname='%s') and relkind='r'""" % (nspace,)
c.execute(sql)
dd = c.fetchall()
c.close()
if dd:
return [d[0] for d in dd]
else:
return []
def columns(self, nspace, table):
"""
Find all columns
"""
nspace, table = self.getTableNameSpace(nspace, table)
c = self.Conn.cursor()
sql = """SELECT a.attname as "Column",
pg_catalog.format_type(a.atttypid, a.atttypmod) as "Type",
(SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) for 128)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef) as "Modifiers",
a.attnotnull as "Not NULL", pg_catalog.col_description(a.attrelid, a.attnum) as "Description"
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = (select OID from pg_catalog.pg_class where relnamespace=(select OID from pg_namespace where nspname='%s') and relname='%s')
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum;
""" % (nspace, table, )
c.execute(sql)
dd = c.fetchall()
c.close()
if dd:
return dd
### return pp(c, dd)
else:
return []
def indexes(self, nspace, table):
"""
Find indexes
"""
nspace, table = self.getTableNameSpace(nspace, table)
c = self.Conn.cursor()
sql = """SELECT indexname,indexdef FROM pg_indexes WHERE schemaname='%s' AND tablename = '%s'
""" % (nspace, table, )
c.execute(sql)
dd = c.fetchall()
c.close()
if dd:
return dd
else:
return []
def pKey(self, nspace, table):
"""
Find primary keys
"""
nspace, table = self.getTableNameSpace(nspace, table)
c = self.Conn.cursor()
sql = """SELECT constraint_name FROM information_schema.table_constraints WHERE table_schema='%s' AND table_name='%s' AND constraint_type='PRIMARY KEY'
""" % (nspace, table, )
c.execute(sql)
dd = c.fetchall()
c.close()
if dd:
#return dd
return self.keyDef(nspace, dd[0][0])[-1] # Return only column list
else:
return None
def fKeys(self, nspace, table):
"""
Find foreign keys
"""
nspace, table = self.getTableNameSpace(nspace, table)
c = self.Conn.cursor()
sql = """SELECT constraint_name FROM information_schema.table_constraints WHERE table_schema='%s' AND table_name='%s' AND constraint_type='FOREIGN KEY'
""" % (nspace, table, )
c.execute(sql)
dd = c.fetchall()
c.close()
if dd:
#return dd
reply = []
for kn in dd:
#reply.append(self.keyDef(nspace, kn[0])[2:]) # return only the first pair - (table_name, column_list) for the specified table itself
reply.append(self.keyDef(nspace, kn[0]))
return reply
else:
return None
def referringKeys(self, nspace, table):
"""
Find all keys
"""
nspace, table = self.getTableNameSpace(nspace, table)
c = self.Conn.cursor()
sql = """SELECT constraint_name FROM information_schema.table_constraints WHERE
table_schema='%s' AND constraint_type='FOREIGN KEY' AND constraint_name IN
(SELECT constraint_name FROM information_schema.constraint_table_usage WHERE table_schema='%s' AND table_name='%s') ORDER BY table_name
""" % (nspace, nspace, table, )
c.execute(sql)
dd = c.fetchall()
c.close()
if dd:
#return dd
reply = []
for kn in dd:
#reply.append(self.keyDef(nspace, kn[0])[:2]) # Return only the second pair - (table_name, column_list) for the referred table
reply.append(self.keyDef(nspace, kn[0]))
return reply
else:
return []
def keyDef(self, nspace, kname):
"""
Find key details
"""
# Referring table
c = self.Conn.cursor()
sql = """SELECT table_name, column_name FROM information_schema.key_column_usage
WHERE table_schema='%s' AND constraint_name='%s'
""" % (nspace, kname, )
c.execute(sql)
rr = c.fetchall()
if rr:
rtname = rr[0][0]
rcname = tuple([v[1] for v in rr])
else:
return None
# Referred table
#sql = """SELECT table_name, column_name FROM information_schema.constraint_column_usage
# WHERE table_schema='%s' AND constraint_name='%s'
# """ % (nspace, kname, )
sql = """select table_name, column_name from
(SELECT nr.nspname, r.relname, a.attname, c.conname
FROM pg_namespace nr, pg_class r, pg_attribute a,
pg_namespace nc, pg_constraint c
WHERE nr.oid = r.relnamespace AND r.oid = a.attrelid AND
nc.oid = c.connamespace AND
CASE
WHEN c.contype = 'f'::"char" THEN r.oid =
c.confrelid AND (a.attnum = ANY (c.confkey))
ELSE r.oid = c.conrelid AND (a.attnum = ANY (c.conkey))
END AND NOT a.attisdropped AND (c.contype = ANY
(ARRAY['p'::"char", 'u'::"char", 'f'::"char"])) AND r.relkind =
'r'::"char")
as x(namespace, table_name, column_name, constraint_name)
where namespace='%s' and constraint_name='%s'""" % (nspace, kname,)
c.execute(sql)
pp = c.fetchall()
if pp:
ptname = pp[0][0]
pcname = tuple([v[1] for v in pp])
else:
return None
c.close()
# Return combined result
return (rtname, rcname, ptname, pcname)
#return (rtname, rcname)
def pp(cursor, data=None, rowlens=0):
"""
"""
d = cursor.description
if not d:
return "#### NO RESULTS ###"
names = []
lengths = []
rules = []
if not data:
t = cursor.fetchall()
for dd in d: # iterate over description
### l = dd[1] # Should it be dd[2] i.e. display_size?
l = dd[2]
if not l:
l = 12 # or default arg ...
l = max(l, len(dd[0])) # handle long names
names.append(dd[0])
lengths.append(l)
for col in range(len(lengths)):
if rowlens:
rls = [len(str(row[col])) for row in data if row[col]]
lengths[col] = max([lengths[col]]+rls)
rules.append("-"*lengths[col])
format = " ".join(["%%-%ss" % l for l in lengths])
result = [format % tuple(names)]
result.append(format % tuple(rules))
for row in data:
result.append(format % row)
return "\n".join(result)
#return result
if __name__ == '__main__':
import psycopg2
import getopt, sys
opts, args = getopt.getopt(sys.argv[1:], 'p:h:U:W:')
port = 5432
dbname = 'postgres'
user = 'postgres'
host = 'localhost'
password = ''
for opt, val in opts:
if opt=='-p':
port = val
if opt=='-U':
user = val
if opt=='-h':
host = val
if opt=='-W':
password = val
if args:
dbname = args[0]
dsn = "dbname='%s' user='%s' host='%s' port=%s password='%s'" % (dbname, user, host, port, password)
### print "DSN: %s" % "dbname='%s' user='%s' host='%s' port=%s password='************'" % (dbname, user, host, port, )
try:
conn = psycopg2.connect(dsn)
except:
print("I am unable to connect to the database")
sys.exit(1)
### c = conn.cursor()
### c.execute("select * from %s where 1=0" % ('pg_database',))
### print c.description
### print
### c.close()
dddd = DbDig(conn)
### print dddd.dsn()
if len(args)==0:
dd = dddd.dbases()
print("Databases:")
if dd:
for d in dd:
print(d)
sys.exit(0)
if len(args)==1:
dd = dddd.nspaces()
print("Namespaces:")
if dd:
for d in dd:
print(d)
sys.exit(0)
if len(args)==2:
nspace = args[1]
dd = dddd.tables(nspace)
print("Tables:")
if dd:
for d in dd:
print(d)
sys.exit(0)
if len(args)==3:
nspace = args[1]
tname = args[2]
dd = dddd.columns(nspace, tname)
print("Columns:")
if dd:
for d in dd:
print(d)
dd = dddd.pKey(nspace, tname)
print("Primary Key:")
if dd:
print(dd)
dd = dddd.indexes(nspace, tname)
print("Indexes:")
if dd:
for d in dd:
print(d)
dd = dddd.fKeys(nspace, tname)
print("Foreign Keys:")
if dd:
for d in dd:
print(d)
dd = dddd.referringKeys(nspace, tname)
print("Referring Keys:")
if dd:
for d in dd:
print(d)
|
"""
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/word-search-ii
"""
import collections
from typing import List
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
END_OF_WORD = "#"
class Solution(object):
def __init__(self):
self.n = None
self.m = None
self.result = None
def find_words(self, board: List[List[str]], words: List[str]) -> List[str]:
if not board or not board[0]:
return []
if not words:
return []
self.result = set()
'''使用字典实现一个字典树'''
root = collections.defaultdict()
for word in words:
node = root
for char in word:
node = node.setdefault(char, collections.defaultdict())
node[END_OF_WORD] = END_OF_WORD
self.m, self.n = len(board), len(board[0])
for i in range(self.m):
for j in range(self.n):
if board[i][j] in root:
self._dfs(board, i, j, "", root)
return list(self.result)
def _dfs(self, board, i, j, cur_word, cur_dict):
cur_word += board[i][j]
cur_dict = cur_dict[board[i][j]]
if END_OF_WORD in cur_dict:
self.result.add(cur_word)
tmp, board[i][j] = board[i][j], '@'
for k in range(4):
x, y = i + dx[k], j + dy[k]
if 0 <= x < self.m and 0 <= y < self.n \
and board[x][y] != '@' and board[x][y] in cur_dict:
self._dfs(board, x, y, cur_word, cur_dict)
board[i][j] = tmp
if __name__ == '__main__':
_board = [['o', 'a', 'a', 'n'], ['e', 't', 'a', 'e'], ['i', 'h', 'k', 'r'], ['i', 'f', 'l', 'v']]
_words = ["oath", "pea", "eat", "rain", "klf"]
solution = Solution()
print(solution.find_words(_board, _words))
|
import os
import re
import boto3
import requests
def download_from_s3():
s3_bucket_name = 'heartrate-power-streams'
s3_base_url = 'https://s3-eu-west-1.amazonaws.com/heartrate-power-streams/{key}'
s3 = boto3.resource('s3')
s3_bucket = s3.Bucket(s3_bucket_name)
athletes = set()
for obj in s3_bucket.objects.all():
match = re.search('^(?P<athlete>.*)/(?P<activity>.*).json$', obj.key)
if match:
athlete = match.group('athlete')
activity = match.group('activity')
directory = os.path.join('data', athlete)
if not os.path.exists(directory):
os.makedirs(directory)
athletes.add(athlete)
r = requests.get(s3_base_url.format(key=obj.key))
with open(os.path.join(directory, activity + '.json'), "w") as f:
f.write(r.content.decode('utf-8'))
for athlete in athletes:
r = requests.get(s3_base_url.format(key=athlete + '.csv'))
with open(os.path.join('data', athlete + '_activities.csv'), 'w') as f:
f.write(r.content.decode('utf-8'))
if __name__ == "__main__":
download_from_s3()
|
from django.urls import path
from . import views
from .views import buchung, bausparvertrag, sparbeitrag, sparbeitrag_job
from .views.index import IndexView
urlpatterns = [
path('', IndexView.as_view(), name='index'),
path('bausparvertrag/', bausparvertrag.ListView.as_view(), name='bausparvertrag_list'),
path('bausparvertrag/new', bausparvertrag.CreateView.as_view(), name='bausparvertrag_create'),
path('bausparvertrag/<int:pk>/', bausparvertrag.DetailView.as_view(), name='bausparvertrag_detail'),
path('bausparvertrag/<int:bausparvertrag__id>/buchung/', buchung.ListView.as_view(), name='buchung_list'),
path('bausparvertrag/<int:bausparvertrag__id>/buchung/new', buchung.CreateView.as_view(), name='buchung_create'),
path('bausparvertrag/<int:bausparvertrag__id>/sparbeitrag/', sparbeitrag.ListView.as_view(), name='sparbeitrag_list'),
path('bausparvertrag/<int:bausparvertrag__id>/sparbeitrag/new', sparbeitrag.CreateView.as_view(), name='sparbeitrag_create'),
path('bausparvertrag/<int:bausparvertrag__id>/sparbeitrag/<int:sparbeitrag__id>/job/', sparbeitrag_job.ListView.as_view(), name='sparbeitrag_job_list'),
path('bausparvertrag/<int:bausparvertrag__id>/sparbeitrag/<int:sparbeitrag__id>/job/new', sparbeitrag_job.CreateView.as_view(), name='sparbeitrag_job_create'),
]
|
import collections
class Solution:
# 和554很相近
def orangesRotting(self, grid) -> int:
if not grid:
return -1
n, m = len(grid), len(grid[0])
zero_list = [(i, j) for i in range(len(grid)) for j in range(len(grid[0])) if grid[i][j] == 2]
q = collections.deque(zero_list)
seen = set(zero_list)
level = 0
while q:
size = len(q)
while size:
r, l = q.popleft()
for nr, nl in [(r - 1, l), (r + 1, l), (r, l - 1), (r, l + 1)]:
if 0 <= nr < n and 0 <= nl < m and (nr, nl) not in seen:
seen.add((nr, nl))
if grid[nr][nl] == 1:
q.append((nr, nl))
grid[nr][nl] = 2
size -= 1
if q:
level += 1
if any(1 in row for row in grid):
return -1
return level
mat = [[2, 1, 1], [1, 1, 0], [0, 1, 1]]
mat = [[2, 1, 1], [0, 1, 1], [1, 0, 1]]
solution = Solution()
orangesRotting = solution.orangesRotting(mat)
print(orangesRotting)
|
with open("./twelve_step/version.txt", "r+") as version_file:
version = version_file.readline().replace("\n", "")
with open("./twelve_step/version.txt", "w+") as version_file:
version_parts = version.split(".")
patch = version_parts[-2]
version_parts[-2] = str(int(patch) + 1)
version_file.write(".".join(version_parts))
|
import time
import srt
import re
import datetime
from mqtthandler import MQTTHandler
INIT_STATUS={
"video": {
"title": None,
"series_title": None,
"season": None,
"episode": None
},
"time": None,
"events": None
}
class SubtitleHandler:
subtitles = []
phrases = []
def __init__(self, broker):
self.mqtt = MQTTHandler(broker)
def parseSRT(self, srt_filename):
f=open(srt_filename, "r")
subtitle_generate = srt.parse(f.read())
f.close()
self.subtitles = list(subtitle_generate)
return self.subtitles
def parsePhrases(self, phrase_filename):
f=open(phrase_filename, "r")
lines = f.readlines()
for line in lines:
phrase = line.rstrip("\n\r").split("/")
self.phrases.append(phrase)
return self.phrases
def isPhraseInLine(self,phrase, sub, content):
sub_line = re.sub('[^A-Za-z0-9\s]+', '', str(content)).lower()
phrase = re.sub('[^A-Za-z0-9\s]+', '', str(phrase)).lower()
count = 0
while bool(re.search(phrase, sub_line)):
count += 1
sub_line = sub_line.replace(phrase, '', 1)
return count
def getEventTime(self,sub):
middle = sub.end - sub.start
between_sec = datetime.timedelta.total_seconds(middle) / 2
sec = between_sec + datetime.timedelta.total_seconds(sub.start)
return int(sec)
def matchEventToMovie(self, movie, subtitles, phrases, time_offset):
global INIT_STATUS
status = INIT_STATUS
status["video"]["title"] = movie
#TODO determine how to set up phrase data
for sub in subtitles:
c = sub.content.replace('\n', ' ')
c = c.split(" ")
firstpart, secondpart = " ".join(c[:len(c)//2]), " ".join(c[len(c)//2:])
mult = 0
for phrase in phrases:
line = phrase[0]
events = phrase[1]
mult += self.isPhraseInLine(line,sub,sub.content)
#f = self.isPhraseInLine(line,sub, firstpart)
#s = self.isPhraseInLine(line,sub, secondpart)
#if f + s == 0:
# mult += self.isPhraseInLine(line,sub,sub.content )
#else:
# mult += f+s
## DEAR LESS DRUNK SELF
# this currently adds the number of events over the entire subtitle
# what you need to do if you wish to accept it, is to split each subtitle into to two parts
# the first part will the the half that has the first bit of text, which will have the correct time to event for the work
# the second half will have the correct time to event gfor the second half
# you could have three if statements that check and each toher them reach a send.message()
if mult > 0: # wotn work properly if events is greater than 1
status["time"] = self.getEventTime(sub) + time_offset
status["events"] = int(events) * mult
self.sendMessage(status)
#print(sub.content)
def sendMessage(self, msg):
self.mqtt.send(msg)
print(msg)
return msg
def isDone(self):
return True |
import pytest
from _midynet.prior import sbm
from midynet.config import Wrapper
@pytest.fixture
def wrapper():
size = 100
max_block_count = 10
block_count = sbm.BlockCountUniformPrior(max_block_count)
blocks = sbm.BlockUniformPrior(size, block_count)
return Wrapper(
blocks,
block_count=block_count,
)
def test_access_wrapped_method(wrapper):
assert wrapper.get_size() == 100
wrapper.sample_priors()
def test_get_wrap(wrapper):
assert isinstance(wrapper.get_wrap(), sbm.BlockUniformPrior)
def test_get_others(wrapper):
assert isinstance(
wrapper.get_other("block_count"), sbm.BlockCountUniformPrior
)
def test_correct_setup(wrapper):
assert id(wrapper.get_block_count_prior()) == id(
wrapper.get_others()["block_count"]
)
if __name__ == "__main__":
pass
|
# -*- coding: utf-8 -*-
class Solution(object):
def numSquares(self, n):
dp = [float("inf") for x in xrange(n + 1)]
dp[0] = 0
i = 0
while i <= n:
j = 1
while j * j <= i:
# dp[i]: integer i can be consist of least number of square
dp[i] = min(dp[i], dp[i - j * j] + 1)
# print i, j, dp[i - j * j], dp[i]
j += 1
i += 1
return dp[n]
# Recursion, traverse all squares that less than n, call function for (n - square)
def numSquares2(self, n):
res = n
# num = 1 is meaningless (res = n)
num = 2
while num * num <= n:
a = n / (num * num)
b = n % (num * num)
res = min(res, a + self.numSquares2(b))
# print num, a, b, res
num += 1
return res
# Recursion, num从1开始每次不加或加一,当前余下的数减去(num * num)继续传入递归函数
def numSquares3(self, n):
self.res = n
def helper(curr, num, remain):
square = num * num
# Exit: if remain not enough
if remain < square:
return
if remain - square == 0:
self.res = min(self.res, curr + 1)
return
else:
tmp = remain - square
helper(curr + 1, num, tmp)
helper(curr + 1, num + 1, tmp)
for i in range(1, n):
helper(0, i, n)
return self.res
test = Solution()
print test.numSquares3(12) |
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
"""Common functions used for odps input."""
from easy_rec.python.protos.dataset_pb2 import DatasetConfig
def is_type_compatiable(odps_type, input_type):
"""Check that odps_type are compatiable with input_type."""
type_map = {
'bigint': DatasetConfig.INT64,
'string': DatasetConfig.STRING,
'double': DatasetConfig.DOUBLE
}
tmp_type = type_map[odps_type]
if tmp_type == input_type:
return True
else:
float_types = [DatasetConfig.FLOAT, DatasetConfig.DOUBLE]
int_types = [DatasetConfig.INT32, DatasetConfig.INT64]
if tmp_type in float_types and input_type in float_types:
return True
elif tmp_type in int_types and input_type in int_types:
return True
else:
return False
def check_input_field_and_types(data_config):
"""Check compatibility of input in data_config.
check that data_config.input_fields are compatible with
data_config.selected_cols and data_config.selected_types.
Args:
data_config: instance of DatasetConfig
"""
input_fields = [x.input_name for x in data_config.input_fields]
input_field_types = [x.input_type for x in data_config.input_fields]
selected_cols = data_config.selected_cols if data_config.selected_cols else None
selected_col_types = data_config.selected_col_types if data_config.selected_col_types else None
if not selected_cols:
return
selected_cols = selected_cols.split(',')
for x in input_fields:
assert x in selected_cols, 'column %s is not in table' % x
if selected_col_types:
selected_types = selected_col_types.split(',')
type_map = {x: y for x, y in zip(selected_cols, selected_types)}
for x, y in zip(input_fields, input_field_types):
tmp_type = type_map[x]
assert is_type_compatiable(tmp_type, y), \
'feature[%s] type error: odps %s is not compatible with input_type %s' % (
x, tmp_type, DatasetConfig.FieldType.Name(y))
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from python_qt_binding.QtCore import QAbstractTableModel, QModelIndex, Qt, qWarning
from python_qt_binding.QtGui import QBrush, QIcon
from .message import Message
from .message_list import MessageList
class MessageDataModel(QAbstractTableModel):
# the column names must match the message attributes
columns = ['message', 'severity', 'node', 'stamp', 'topics', 'location']
severity_colors = {
Message.DEBUG: QBrush(Qt.darkCyan),
Message.INFO: QBrush(Qt.darkBlue),
Message.WARN: QBrush(Qt.darkYellow),
Message.ERROR: QBrush(Qt.darkRed),
Message.FATAL: QBrush(Qt.red),
}
def __init__(self):
super(MessageDataModel, self).__init__()
self._messages = MessageList()
self._message_limit = 20000
self._info_icon = QIcon.fromTheme('dialog-information')
self._warning_icon = QIcon.fromTheme('dialog-warning')
self._error_icon = QIcon.fromTheme('dialog-error')
# BEGIN Required implementations of QAbstractTableModel functions
def rowCount(self, parent=None):
return len(self._messages)
def columnCount(self, parent=None):
return len(MessageDataModel.columns) + 1
def data(self, index, role=None):
if role is None:
role = Qt.DisplayRole
if index.row() >= 0 and index.row() < len(self._messages):
msg = self._messages[index.row()]
if index.column() == 0:
if role == Qt.DisplayRole:
return '#%d' % msg.id
elif index.column() > 0 and index.column() < len(MessageDataModel.columns) + 1:
column = MessageDataModel.columns[index.column() - 1]
if role == Qt.DisplayRole or role == Qt.UserRole:
if column == 'stamp':
if role != Qt.UserRole:
data = msg.get_stamp_string()
else:
data = msg.get_stamp_for_compare()
else:
data = getattr(msg, column)
# map severity enum to label
if role == Qt.DisplayRole and column == 'severity':
data = Message.SEVERITY_LABELS[data]
# implode topic names
if column == 'topics':
data = ', '.join(data)
# append row number to define strict order
if role == Qt.UserRole:
# append row number to define strict order
# shortest string representation to compare stamps
# print(column, data, str(index.row()).zfill(len(str(len(self._messages)))))
data = str(data) + ' %08x' % index.row()
return data
# decorate message column with severity icon
if role == Qt.DecorationRole and column == 'message':
if msg.severity in [Message.DEBUG, Message.INFO]:
return self._info_icon
elif msg.severity in [Message.WARN]:
return self._warning_icon
elif msg.severity in [Message.ERROR, Message.FATAL]:
return self._error_icon
# colorize severity label
if role == Qt.ForegroundRole and column == 'severity':
assert msg.severity in MessageDataModel.severity_colors, \
'Unknown severity type: %s' % msg.severity
return MessageDataModel.severity_colors[msg.severity]
if role == Qt.ToolTipRole and column != 'severity':
if column == 'stamp':
data = msg.get_stamp_string()
elif column == 'topics':
data = ', '.join(msg.topics)
else:
data = getattr(msg, column)
# <font> tag enables word wrap by forcing rich text
return '<font>' + data + '<br/><br/>' + \
self.tr('Right click for menu.') + '</font>'
def headerData(self, section, orientation, role=None):
if role is None:
role = Qt.DisplayRole
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
if section == 0:
return self.tr('#')
else:
return MessageDataModel.columns[section - 1].capitalize()
if role == Qt.ToolTipRole:
if section == 0:
return self.tr('Sort the rows by serial number in descendig order')
else:
return self.tr(
'Sorting the table by a column other then the serial number slows down the '
'interaction especially when recording high frequency data')
# END Required implementations of QAbstractTableModel functions
def get_message_limit(self):
return self._message_limit
def set_message_limit(self, new_limit):
self._message_limit = new_limit
self._enforce_message_limit(self._message_limit)
def _enforce_message_limit(self, limit):
if len(self._messages) > limit:
self.beginRemoveRows(QModelIndex(), limit, len(self._messages) - 1)
del self._messages[limit:len(self._messages)]
self.endRemoveRows()
def insert_rows(self, msgs):
# never try to insert more message than the limit
if len(msgs) > self._message_limit:
msgs = msgs[-self._message_limit:]
# reduce model before insert
limit = self._message_limit - len(msgs)
self._enforce_message_limit(limit)
# insert newest messages
self.beginInsertRows(QModelIndex(), 0, len(msgs) - 1)
self._messages.extend(msgs)
self.endInsertRows()
def remove_rows(self, rowlist):
"""
:param rowlist: list of row indexes, ''list(int)''
:returns: True if the indexes were removed successfully, ''bool''
"""
if len(rowlist) == 0:
if len(self._messages) > 0:
try:
self.beginRemoveRows(QModelIndex(), 0, len(self._messages))
del self._messages[0:len(self._messages)]
self.endRemoveRows()
except:
return False
else:
rowlist = list(set(rowlist))
rowlist.sort(reverse=True)
dellist = [rowlist[0]]
for row in rowlist[1:]:
if dellist[-1] - 1 > row:
try:
self.beginRemoveRows(QModelIndex(), dellist[-1], dellist[0])
del self._messages[dellist[-1]:dellist[0] + 1]
self.endRemoveRows()
except:
return False
dellist = []
dellist.append(row)
if len(dellist) > 0:
try:
self.beginRemoveRows(QModelIndex(), dellist[-1], dellist[0])
del self._messages[dellist[-1]:dellist[0] + 1]
self.endRemoveRows()
except:
return False
return True
def get_selected_text(self, rowlist):
"""
Returns an easily readable block of text for the currently selected rows
:param rowlist: list of row indexes, ''list(int)''
:returns: the text from those indexes, ''str''
"""
text = None
if len(rowlist) != 0:
text = ''
rowlist = list(set(rowlist))
for row in rowlist:
text += self._messages[row].pretty_print()
return text
def get_time_range(self, rowlist):
"""
:param rowlist: a list of row indexes, ''list''
:returns: a tuple of min and max times in a rowlist in
'(unix timestamp).(fraction of second)' format, ''tuple(str,str)''
"""
min_ = float("inf")
max_ = float("-inf")
for row in rowlist:
item = self._messages[row].time_as_datestamp()
if float(item) > float(max_):
max_ = item
if float(item) < float(min_):
min_ = item
return min_, max_
def get_unique_nodes(self):
nodes = set()
for message in self._messages:
nodes.add(message.node)
return nodes
def get_unique_severities(self):
severities = set()
for message in self._messages:
severities.add(message.severity)
return severities
def get_unique_topics(self):
topics = set()
for message in self._messages:
for topic in message.topics:
topics.add(topic)
return topics
def get_severity_dict(self):
return Message.SEVERITY_LABELS
def get_message_between(self, start_time, end_time=None):
"""
:param start_time: time to start in timestamp form (including decimal
fractions of a second is acceptable, ''unixtimestamp''
:param end_time: time to end in timestamp form (including decimal
fractions of a second is acceptable, ''unixtimestamp'' (Optional)
:returns: list of messages in the time range ''list[message]''
"""
msgs = []
for message in self._messages:
msg_time = message.stamp[0] + float(message.stamp[1]) / 10**9
if msg_time >= float(start_time) and (end_time is None or msg_time <= float(end_time)):
msgs.append(message)
return msgs
|
from django.db import transaction
from django.template.response import TemplateResponse
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from bambu_webhooks.forms import ReceiverForm
@login_required
@permission_required('webhooks.change_webhook')
def webhooks(request):
"""
The webhook management view. It autodiscovers the registerd webhooks and creates a form with a separate
URL field for each one. Once validated, the form is saved and the user redirected back to the same page,
with a message letting them know the process was a success.
This view requires a valid user account with ``webhooks.change_webhook`` permission.
"""
form = ReceiverForm(
request.POST or None,
user = request.user
)
if request.method == 'POST' and form.is_valid():
with transaction.commit_on_success():
form.save()
messages.success(request, u'Your webhook settings have been saved successfully.')
return HttpResponseRedirect(
reverse('webhooks_manage')
)
return TemplateResponse(
request,
'webhooks/manage.html',
{
'form': form,
'menu_selection': 'profile',
'title_parts': ('Webhooks', 'My profile'),
'breadcrumb_trail': (
('../../', u'Home'),
('../', u'My profile'),
('', u'Webhooks')
),
'menu_selection': 'profile:webhooks'
}
) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Content',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('is_page', models.BooleanField(default=True)),
('created', models.DateTimeField(verbose_name='created', auto_now_add=True)),
('published', models.DateTimeField(blank=True, null=True, verbose_name='published')),
('allow_comments', models.BooleanField(verbose_name='allow comments', default=False)),
('template_name', models.CharField(blank=True, max_length=70, verbose_name='template name', help_text="Example: 'content/contact_page.html'. If this isn't provided, the system will use 'content/default.html'.")),
('registration_required', models.BooleanField(verbose_name='registration required', default=False)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContentTranslation',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('title', models.CharField(max_length=255, verbose_name='title')),
('url', models.CharField(max_length=255, verbose_name='url')),
('text', models.TextField(blank=True, verbose_name='content')),
('language_code', models.CharField(max_length=15, db_index=True)),
('master', models.ForeignKey(editable=False, null=True, related_name='translations', to='content.Content')),
],
options={
'db_table': 'content_content_translation',
'permissions': (('Can list all content', 'list_contents'),),
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='contenttranslation',
unique_together=set([('language_code', 'master'), ('url', 'language_code')]),
),
]
|
from scipy.special import j1,jn_zeros
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
plt.style.use('mint')
x = np.linspace(0, 4 * np.pi, 10000)
y = (2 * j1(x) / x)**2
golden_mean = (np.sqrt(5) - 1) / 2 # Aesthetic ratio
fig_width_pt = 400 # column width
inches_per_pt = 1 / 72.27 # Convert pt to inches
fig_width = fig_width_pt * inches_per_pt
fig_height = fig_width_pt * inches_per_pt * golden_mean # height in inches
figsize = (fig_width, fig_height)
fig, ax = plt.subplots(figsize=figsize)
ax.set_xlabel(r'$x$')
ax.set_ylim(0, 1)
ax.set_xlim(0, 4*np.pi)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xticks([0, np.pi, 2*np.pi, 3*np.pi, 4*np.pi])
ax.set_xticklabels([r'$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$'])
ax.set_yticks([0, .25, .5, .75, 1.0])
ax.set_yticklabels(['', '', r'$0.5$', '', r'$1.0$'])
ax.annotate(r"$\left(2\frac{J_1(x)}{x} \right)^2$",
xytext=(1.5 * np.pi, .65), xycoords='data',
xy=(0.7 * np.pi, (2 * j1(0.7 * np.pi) / (0.7 * np.pi))**2), textcoords='data',
arrowprops=dict(arrowstyle="->",
lw=1,
connectionstyle="arc3, rad=0.3"),
)
for i,z in enumerate(jn_zeros(1, 3)):
ax.axvline(x=z, linestyle='--', lw=2, color="C0{0}".format(i+2), ymin=0, ymax=0.20,
label=r'$({0:.2f}\pi, 0)$'.format(z / np.pi))
ax.plot(x, y, lw=2, zorder=10, color="C01")
ax.legend()
fig.savefig("jinc.png",
bbox_inches='tight',
pad_inches=0,
transparent=False)
plt.show()
|
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackContext,
)
import os
import telegram
import logging
import logging.config
import redis_client as rds
DEFAULT_SEARCH_RADIUS = 100
logging.config.fileConfig("logging.conf")
log = logging.getLogger("grassroot")
def start(update: telegram.Update, ctx: CallbackContext):
update.message.reply_text("First, share your location, please")
def process_location(update: telegram.Update, ctx: CallbackContext):
message: telegram.Message = update.message
chat_id = message.chat_id
longitude = message.location.latitude
latitude = message.location.longitude
msg = location(chat_id, longitude, latitude)
return message.reply_text(msg)
def location(chat_id, longitude, latitude):
log.info(
"[id=%d] location(chat_id=%d longitude=%f latitude=%f)",
chat_id,
chat_id,
longitude,
latitude,
)
cmd_res = rds.set_location(chat_id, longitude, latitude)
if cmd_res < 0:
return "Error while saving location! Sorry.."
return (
"Good, now you can use next commands:\n\n"
+ "/list {radius} - show groups within your location radius (meters). 100m by default.\n"
+ "/link {group} {description} - link a group to your location.\n"
+ "/join {group} - request to join the group\n"
+ "/delete_link {group} - delete the link for Bot"
)
def process_list_groups(update: telegram.Update, ctx: CallbackContext):
args = ctx.args
message: telegram.Message = update.message
chat_id = message.chat_id
if len(args) < 1:
radius = DEFAULT_SEARCH_RADIUS
else:
radius = args[0]
resp = list_groups(chat_id=chat_id, radius=radius)
return message.reply_text(resp)
def list_groups(chat_id, radius):
""" radius - radius to search groups """
loc = rds.get_location(chat_id)
if loc == 0:
return "Error: cannot find your current location. Could you, please, send it again.."
longitude = loc[0]
latitude = loc[1]
groups_in_radius = rds.search_groups_within_radius(
chat_id, longitude, latitude, radius
)
if groups_in_radius == 0:
return "Error while searching groups! Sorry.."
log.info(
"[id=%d] list_groups(chat_id=%d radius=%s) => groups in radius: %s",
chat_id,
chat_id,
radius,
groups_in_radius,
)
list_groups_str = (
"Groups within your location radius ({}m):\n\nname,distance,description".format(
radius
)
)
for g in groups_in_radius:
radius = g[1]
group_name = g[0]
group_desc = rds.get_description(chat_id, group_name)
list_groups_str = "{}\n{},{},{}".format(
list_groups_str, group_name, radius, group_desc
)
return list_groups_str
def process_link_group(update: telegram.Update, ctx: CallbackContext):
args = ctx.args
message: telegram.Message = update.message
user: telegram.User = message.from_user
admin_id = user.id
if len(args) < 1:
return message.reply_text(
"Invalid arguments number={} but required={}".format(len(args), 1)
)
args_len = len(args)
group = args[0]
description = ""
if args_len > 1:
for i in range(1, args_len):
description = "{} {}".format(description, args[i])
msg = link_group(admin_id, group, description)
return message.reply_text(msg)
def link_group(chat_id, group, description):
description = description.replace(" ", "%")
location = rds.get_location(chat_id)
if len(location) < 2 or location is None:
return "Error: group {} not found".format(group)
longitude = location[0]
latitude = location[1]
log.info(
"[id=%d] link_group(chat_id=%d group=%s description=%s) longitude=%s latitude=%s",
chat_id,
chat_id,
group,
description,
longitude,
latitude,
)
link_res = rds.link_group(group, description, chat_id, longitude, latitude)
if link_res < 0:
return "Error while creating group!"
if link_res == 0:
return "Group {} is already exists!".format(group)
return "#link You have linked the group `{}` to the location: longitude={} latitude={}".format(
group, longitude, latitude
)
def process_join_group(update: telegram.Update, ctx: CallbackContext):
args = ctx.args
message: telegram.Message = update.message
user: telegram.User = message.from_user
chat_id = user.id
username = user.username
group = args[0]
admins_ids = rds.get_admins_ids_by(chat_id, group)
for chat_id in admins_ids:
log.info(
"[id=%s] join_group: chat_id=%s group=%s sending join notification of %s",
chat_id,
chat_id,
group,
username,
)
message.bot.send_message(
chat_id=int(chat_id),
text="#join User @{} wants to join group `{}`".format(username, group),
)
return message.reply_text(
"We have notified admins of group `{}`. They will add you soon.".format(group)
)
# required token
# def add_admin(update: telegram.Update, ctx: CallbackContext):
# message: telegram.Message = update.message
# user: telegram.User = message.from_user
# user_id = user.id
# chat_id = message.chat_id
# group = args[0]
# new_admin = args[1]
# access_token = args[2]
# check_token
# txt = 'add yet another admin={} to group={}'.format(new_admin, group)
# rds.add_admin(group, new_admin)
# required token
# def delete_admin(update: telegram.Update, ctx: CallbackContext):
# message: telegram.Message = update.message
# user: telegram.User = message.from_user
# user_id = user.id
# chat_id = message.chat_id
# group = args[0]
# admin = args[1]
# access_token = args[2]
# check_token
def process_delete_group_link(update: telegram.Update, ctx: CallbackContext):
args = ctx.args
message: telegram.Message = update.message
user: telegram.User = message.from_user
admin_id = user.id
if len(args) < 1:
return message.reply_text("Please, define group")
group_name = args[0]
msg = delete_group_link(admin_id, group_name)
message.reply_text(msg)
def delete_group_link(admin_id, group_name):
rds_del_res = rds.delete_group_link(group_name, admin_id)
if rds_del_res < 1:
return "Error: problems with deleting group={} from cache".format(group_name)
return "#delete_link Group={} is deleted.".format(group_name)
def main():
bot_token = os.getenv("BOT_TOKEN")
updater = Updater(bot_token)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
dp.add_handler(MessageHandler(Filters.location, process_location))
dp.add_handler(CommandHandler("list", process_list_groups, pass_args=True))
dp.add_handler(CommandHandler("link", process_link_group, pass_args=True))
dp.add_handler(CommandHandler("join", process_join_group, pass_args=True))
dp.add_handler(
CommandHandler("delete_link", process_delete_group_link, pass_args=True)
)
# TODO: event loop it to process a lot of requests
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
|
from django.contrib import admin
# Register your models here.
from .models import Item, User, Group, Receipt, ItemOnReceipt
admin.site.register(Item)
admin.site.register(User)
admin.site.register(Group)
admin.site.register(Receipt)
admin.site.register(ItemOnReceipt)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
class QNetwork(nn.Module):
def __init__(self, my_pretrained_model, seed):
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.pretrained = my_pretrained_model
self.fc = nn.Sequential(nn.Linear(1000, 512), #changed 2048,512 to 1000,512
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(512, 3),
nn.LogSoftmax(dim=1))
def forward(self, x):
x = self.pretrained(x)
x = self.fc(x)
return x |
#!/usr/bin/env python
#
# Copyright 2018 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import builtins
import os.path as op
import distutils.sysconfig
class ResourceTracker:
def __init__(self):
self.filelist = set()
self.modulelist = set()
self._builtin_open = builtins.open
self._pysysdir = op.realpath(distutils.sysconfig.get_python_lib(standard_lib=True))
def _open_hook(self, name, *args, **kwargs):
ret = self._builtin_open(name, *args, **kwargs)
if op.isfile(name):
self.filelist.update([op.realpath(name)])
return ret
def _get_trackable_files(self):
files = set()
for mod in sys.modules.values():
if not hasattr(mod, '__file__') or mod.__file__ is None:
continue
path = op.realpath(mod.__file__)
modpath = op.dirname(path)
if modpath.startswith(self._pysysdir):
continue
if path.endswith('.pyc'):
path = path[:-1]
elif not path.endswith('.py'):
continue
files.update([path])
return files
def start_hooking(self):
self._start_modules = set(sys.modules.keys())
self._start_files = self._get_trackable_files()
builtins.open = self._open_hook
def end_hooking(self):
new_modules = set(sys.modules.keys()) - self._start_modules
self.modulelist.update(new_modules)
new_files = self._get_trackable_files() - self._start_files
self.filelist.update(new_files)
builtins.open = self._builtin_open
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from pyspark.sql import SparkSession
from pyschema2 import JSONConverter, py_type_from_json_schema, array_of, nullable, \
STRING_TYPE, LONG_TYPE, BOOLEAN_TYPE, TIMESTAMP_TYPE, print_pyschema
def get_spark():
# spark = SparkSession.builder.\
# config("spark.executor.memory", "2g").\
# config("spark.driver.memory", "2g").\
# appName(f"RunJob-{os.getpid()}").getOrCreate()
spark = SparkSession.builder.appName("pyspark-unit-test").getOrCreate()
return spark
CORD_TYPE = nullable({
"type": "object",
"properties": {
"x": LONG_TYPE,
"y": LONG_TYPE,
}
})
POLYGON_TYPE = nullable({
"type": "object",
"properties": {
"receivedAt": TIMESTAMP_TYPE,
"color": STRING_TYPE,
"cords": array_of(CORD_TYPE)
}
})
def app_main(spark):
schema = POLYGON_TYPE
pyschema, _ = py_type_from_json_schema(schema)
print("**********************")
print_pyschema(pyschema)
print("**********************")
# rows = [
# {"x": 1, "y": 2},
# {"x": 3, "y": 4},
# ]
rows = [
{"receivedAt": "2020-08-19T20:19:58.261Z", "color": "red", "cords": [{"y": 2}]},
{"receivedAt": "2020-08-19T20:19:58.261Z", "color": "green", "cords": [{"x": 3, "y": 4}]},
{"receivedAt": "2020-08-19T20:19:58.261Z", "color": "green", "id": 1},
]
# rows = [
# {"color": "red"},
# {"color": "green"},
# {"color": "green"},
# ]
parquet_filename = "/home/stonezhong/temp/foo.parquet"
jcvt = JSONConverter()
jcvt.json_rows_to_parquet(spark, rows, pyschema, parquet_filename)
# load the file
print("after loading")
df = spark.read.parquet(parquet_filename)
for row in df.collect():
print(row)
df.printSchema()
df.show(truncate=False)
def main():
spark = get_spark()
try:
app_main(spark)
finally:
spark.stop()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.