blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c740b0a5e8ffaa9d7f02ed6dadd4985ec7c52622 | Python | vl4di99/Udemy_Learn-To-Code-in-Python-3-Beg-to-Adv | /C33.py | UTF-8 | 1,621 | 3.5 | 4 | [] | no_license | import requests
import json
import random
import html
quit_app=""
correct_answers_count = 0
incorrect_answers_count = 0
while(quit_app!="quit"):
request_api = requests.get("https://opentdb.com/api.php?amount=1&category=12&difficulty=easy&type=multiple")
requests_text = request_api.text
quiz_json = json.loads(requests_text)
print("-----=====Welcome to the quizzing game!=====-----")
print("Your question is: "+html.unescape(quiz_json['results'][0]['question']))
answers = quiz_json['results'][0]['incorrect_answers']
cor_ans = quiz_json['results'][0]['correct_answer']
#print("---CORRECT IS:"+cor_ans)
answers.append(cor_ans)
shuffled_answers = answers
random.shuffle(shuffled_answers)
answer_number = 1
for answer in shuffled_answers:
print(str(answer_number) + ". "+ html.unescape(answer))
answer_number+=1
user_answer = input("Input your answer: ")
answer_string = str(user_answer)
try:
answer_string_validate = shuffled_answers[int(answer_string)-1]
if(answer_string_validate == cor_ans):
print("Your answer is CORRECT!!!")
correct_answers_count+=1
else:
print("Incorrect answer! Try again!")
print("The CORRECT answer was: "+str(cor_ans))
incorrect_answers_count+=1
except:
print("You entered a wrong number! Try again")
quit_app=input("\nPress enter to continue or type 'quit' to quit the game").lower()
print("You had: "+str(correct_answers_count)+" correct answers and "+str(incorrect_answers_count)+" incorrect answers") | true |
db3b96e53db147b88bd2f0bcb3a6b1963111446a | Python | AZ015/design_patterns | /Behavioral/command/editor/html_document.py | UTF-8 | 306 | 3.234375 | 3 | [] | no_license | class HtmlDocument:
def __init__(self):
self._content: str = ""
def make_bold(self):
self._content = f'<b> {self._content} </b>'
@property
def content(self):
return self._content
@content.setter
def content(self, content):
self._content = content
| true |
d860fb27eebc107275b96289d593758528bfb2d9 | Python | KaustavKabi/Practical-Homies | /Credits.py | UTF-8 | 1,022 | 2.9375 | 3 | [] | no_license | from tkinter import *
from os import getcwd
from PIL import ImageTk, Image
class Credits():
def __init__(self, master):
self.root = Toplevel(master)
app_width = 600
app_height = 400
screen_width = self.root.winfo_screenwidth()
screen_height = self.root.winfo_screenheight()
self.root.geometry(f'{app_width}x{app_height}+{int((screen_width/2)-(app_width/2))}+{int((screen_height/2)-(app_height/2))}')
self.root.configure(bg='white'); self.root.title("Practical Homies"); self.root.resizable(0, 0); self.root.iconbitmap(getcwd() + '\\AEC_logo.ico')
credits_img = ImageTk.PhotoImage(Image.open('credits.jpg').resize((540, 300), Image.ANTIALIAS))
self.credits_label = Label(self.root, image=credits_img,borderwidth=0); self.credits_label.place(x=40, y=40)
self.close=Button(self.root,text="Close",justify='center',bg='white', command=self.root.destroy)
self.close.config(font=("Courier", 12, 'bold')); self.close.place(x=260, y=360)
self.root.mainloop()
#Credits(None)
| true |
28e516d670be4410cc61b5c0a93fe46d35b20641 | Python | zzz136454872/leetcode | /fairCandySwap.py | UTF-8 | 537 | 3.203125 | 3 | [] | no_license | from typing import *
class Solution:
def fairCandySwap(self, A: List[int], B: List[int]) -> List[int]:
A.sort()
B.sort()
target=sum(A)-sum(B)
target=target//2
j=0
for i in range(len(A)):
want=A[i]-target
while j<len(B)-1 and B[j]<want:
j+=1
if B[j]==want:
return [A[i],B[j]]
return [0,0] # not here
sl=Solution()
A = [1,2,5]
B = [2,4]
print(sl.fairCandySwap(A,B))
| true |
59c31cc23c74aa613254d07354af4bc70bbacc82 | Python | surendhar-code/Python-Programs | /Basic Programs/cube_sum_of_squares.py | UTF-8 | 152 | 4.0625 | 4 | [] | no_license | n=int(input("Enter the n value : "))
sum=0
for i in range(1,n+1):
sum+=i*i*i
print("Cube sum of first {0} natural numbers is : {1}".format(n,sum))
| true |
b665682da685720190309bb8167e9933def4a2d3 | Python | bluicezhen/Marmot | /server/public/func/password_hash_salt.py | UTF-8 | 201 | 2.8125 | 3 | [] | no_license | from datetime import datetime
from hashlib import sha256
def password_hash_salt(password: str, time: datetime) -> str:
return sha256(f"{password}:{time.timestamp()}".encode("utf-8")).hexdigest()
| true |
263add45b347120ad6b40ed859febec50e42a4c6 | Python | inkychris/3dprinting | /cura/profiles/curaprofile.py | UTF-8 | 1,506 | 2.796875 | 3 | [
"MIT"
] | permissive | import argparse
import pathlib
import zipfile
script_dir = pathlib.Path(__file__).parent.resolve()
PROFILE_EXT = '.curaprofile'
def directory(path):
path = pathlib.Path(path)
if not path.is_dir():
raise ValueError(f'path is not a directory: {path}')
return path
def cura_profile(path):
path = pathlib.Path(path)
if not path.is_file():
raise ValueError(f'path is not a file: {path}')
if path.suffix != PROFILE_EXT:
raise ValueError(f'expected file extension ".curaprofile", got {path.suffix}')
return path
def pack_directory(path):
filename = str(path) + PROFILE_EXT
with zipfile.ZipFile(filename, 'w') as archive:
for content in path.glob('**/*'):
archive.write(content, content.name)
print(filename)
def unpack_profile(path):
extract_dir = script_dir / path.stem
with zipfile.ZipFile(path, 'r') as archive:
archive.extractall(extract_dir)
print(extract_dir)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='commands', dest='command', required=True)
pack_parser = subparsers.add_parser('pack')
pack_parser.add_argument('directory', type=directory, help='profile directory')
unpack_parser = subparsers.add_parser('unpack')
unpack_parser.add_argument('cura_profile', type=cura_profile, help=f'cura config file ({PROFILE_EXT})')
args = parser.parse_args()
if args.command == 'pack':
pack_directory(args.directory)
else:
unpack_profile(args.cura_profile)
| true |
130f6ceb27047d96c38d818884a799b202bb08ca | Python | johnwickakash12/python_code | /lambda123.py | UTF-8 | 31 | 2.828125 | 3 | [] | no_license | a=lambda x,y:x-y
print(a(2,3)) | true |
4f0b8759126b1be90e3db9dd2bb4b6756c580540 | Python | kimurakousuke/MeiKaiPython | /chap06/list0612a.py | UTF-8 | 156 | 4.40625 | 4 | [] | no_license | # 反向遍历并输出字符串的所有字符(利用reversed函数)
s = input('字符串:')
for ch in reversed(s):
print(ch, end='')
print()
| true |
074f35ae16749466ae6796cd363c822321470971 | Python | cattegrin/Faith | /venv/Scripts/update_google_sheet.py | UTF-8 | 924 | 2.75 | 3 | [] | no_license | import sys
import gspread
from oauth2client.service_account import ServiceAccountCredentials
def update_sheet(player_rsn):
player_rsn = sys.argv[0]
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
try: #creates client and opens rank spreadsheet
g_client = gspread.authorize(creds)
sheet = g_client.open("RuneScape Clan Ranks by Points").get_worksheet(1)
except:
print ("Client operation failed.")
return
# updates user points
print(player_rsn)
row = sheet.find(player_rsn).row
print("Row: " + str(row))
print("Points: " + (sheet.cell(row, 6).value))
sheet.update_cell(row, 7, int(sheet.cell(row, 6).value) + 5)
sys.exit()
| true |
e6d0c44e0cbe834de02b136fd3e8a86f76ba3723 | Python | Neeraj-kaushik/Geeksforgeeks | /Array/Rotation.py | UTF-8 | 251 | 3.625 | 4 | [] | no_license | def rotation(li):
min = 1000
for i in range(len(li)):
if li[i] < min:
min = li[i]
loc = i
print("The array is rotated ", loc, "Times")
n = int(input())
li = [int(x) for x in input().split()]
rotation(li)
| true |
fb87d3c7564ff4d1937f7f94bbc5847dd8a0937b | Python | pydemos/test | /08面向对象/hm_14_士兵突击_01_枪类.py | UTF-8 | 1,350 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:Keawen
'''
soldier Gun
name model
gun bullet_count
__init__self() __init__(self,model ):
fire(self) shoot(self):
'''
class Gun:
def __init__(self ,model):
#1枪的型号
self.model = model
#2子弹的数量
self.bullet_count = 0
def add_bullet(self,count):
self.bullet_count+=count
def shoot(self):
#1判断子弹的数量
if self.bullet_count<=0:
print('[%s]没有子弹了...'%self.model)
return
# 2发射子弹,-1
self.bullet_count -=1
#3提示发射信息
print('[%s] 突突突突..[%d]'%(self.model,self.bullet_count))
class Soldier:
def __init__(self,name):
#1.姓名
self.name = name
#2.枪 -新兵没有枪
self.gun = None
def fier (self):
#1.判断士兵是否有枪
if self.gun ==None:
print('[%s ]还没有枪...'%self.name)
return
#2.高喊口号
print('冲阿...[%s]'%self.name)
#3.让枪装填子弹
self.gun.add_bullet(50)
#4.让枪发射子弹
self.gun.shoot()
#1创建枪对象
ak47 =Gun('AK47')
#2创建许三多
xusanduo = Soldier('许三多')
xusanduo.gun = ak47
xusanduo.fier()
print(xusanduo.gun) | true |
6490fa2608055fb98854698793df4d032f37fe31 | Python | SpikeInterface/spikeextractors | /spikeextractors/baseextractor.py | UTF-8 | 24,122 | 2.640625 | 3 | [
"MIT"
] | permissive | import json
from pathlib import Path
import importlib
import numpy as np
import datetime
from copy import deepcopy
import tempfile
import pickle
import shutil
from .exceptions import NotDumpableExtractorError
class BaseExtractor:
# To be specified in concrete sub-classes
# The default filename (extension to be added by corresponding method)
# to be used if no file path is provided
_default_filename = None
def __init__(self):
self._kwargs = {}
self._tmp_folder = None
self._key_properties = {}
self._properties = {}
self._annotations = {}
self._memmap_files = []
self._features = {}
self._epochs = {}
self._times = None
self.is_dumpable = True
self.id = np.random.randint(low=0, high=9223372036854775807, dtype='int64')
def __del__(self):
# close memmap files (for Windows)
for memmap_obj in self._memmap_files:
self.del_memmap_file(memmap_obj)
if self._tmp_folder is not None and len(self._memmap_files) > 0:
try:
shutil.rmtree(self._tmp_folder)
except Exception as e:
print('Impossible to delete temp file:', self._tmp_folder, 'Error', e)
def del_memmap_file(self, memmap_file):
"""
Safely deletes instantiated memmap file.
Parameters
----------
memmap_file: str or Path
The memmap file to delete
"""
if isinstance(memmap_file, np.memmap):
memmap_file = memmap_file.filename
else:
memmap_file = Path(memmap_file)
existing_memmap_files = [Path(memmap.filename) for memmap in self._memmap_files]
if memmap_file in existing_memmap_files:
try:
memmap_idx = existing_memmap_files.index(memmap_file)
memmap_obj = self._memmap_files[memmap_idx]
if not memmap_obj._mmap.closed:
memmap_obj._mmap.close()
del memmap_obj
memmap_file.unlink()
del self._memmap_files[memmap_idx]
except Exception as e:
raise Exception(f"Error in deleting {memmap_file.name}: Error {e}")
def make_serialized_dict(self, relative_to=None):
"""
Makes a nested serialized dictionary out of the extractor. The dictionary be used to re-initialize an
extractor with spikeextractors.load_extractor_from_dict(dump_dict)
Parameters
----------
relative_to: str, Path, or None
If not None, file_paths are serialized relative to this path
Returns
-------
dump_dict: dict
Serialized dictionary
"""
class_name = str(type(self)).replace("<class '", "").replace("'>", '')
module = class_name.split('.')[0]
imported_module = importlib.import_module(module)
try:
version = imported_module.__version__
except AttributeError:
version = 'unknown'
if self.is_dumpable:
dump_dict = {'class': class_name, 'module': module, 'kwargs': self._kwargs,
'key_properties': self._key_properties, 'annotations': self._annotations,
'version': version, 'dumpable': True}
else:
dump_dict = {'class': class_name, 'module': module, 'kwargs': {}, 'key_properties': self._key_properties,
'annotations': self._annotations, 'version': version,
'dumpable': False}
if relative_to is not None:
relative_to = Path(relative_to).absolute()
assert relative_to.is_dir(), "'relative_to' must be an existing directory"
dump_dict = _make_paths_relative(dump_dict, relative_to)
return dump_dict
def dump_to_dict(self, relative_to=None):
"""
Dumps recording to a dictionary.
The dictionary be used to re-initialize an
extractor with spikeextractors.load_extractor_from_dict(dump_dict)
Parameters
----------
relative_to: str, Path, or None
If not None, file_paths are serialized relative to this path
Returns
-------
dump_dict: dict
Serialized dictionary
"""
return self.make_serialized_dict(relative_to)
def _get_file_path(self, file_path, extensions):
"""
Helper to be used by various dump_to_file utilities.
Returns default file_path (if not specified), assures that target
directory exists, adds correct file extension if none, and assures
that provided file extension is one of the allowed.
Parameters
----------
file_path: str or None
extensions: list or tuple
First provided is used as an extension for the default file_path.
All are tested against
Returns
-------
Path
Path object with file path to the file
Raises
------
NotDumpableExtractorError
"""
ext = extensions[0]
if self.check_if_dumpable():
if file_path is None:
file_path = self._default_filename + ext
file_path = Path(file_path)
file_path.parent.mkdir(parents=True, exist_ok=True)
folder_path = file_path.parent
if Path(file_path).suffix == '':
file_path = folder_path / (str(file_path) + ext)
assert file_path.suffix in extensions, \
"'file_path' should have one of the following extensions:" \
" %s" % (', '.join(extensions))
return file_path
else:
raise NotDumpableExtractorError(
f"The extractor is not dumpable to {ext}")
def dump_to_json(self, file_path=None, relative_to=None):
"""
Dumps recording extractor to json file.
The extractor can be re-loaded with spikeextractors.load_extractor_from_json(json_file)
Parameters
----------
file_path: str
Path of the json file
relative_to: str, Path, or None
If not None, file_paths are serialized relative to this path
"""
dump_dict = self.make_serialized_dict(relative_to)
self._get_file_path(file_path, ['.json'])\
.write_text(
json.dumps(_check_json(dump_dict), indent=4),
encoding='utf8'
)
def dump_to_pickle(self, file_path=None, include_properties=True, include_features=True,
relative_to=None):
"""
Dumps recording extractor to a pickle file.
The extractor can be re-loaded with spikeextractors.load_extractor_from_json(json_file)
Parameters
----------
file_path: str
Path of the json file
include_properties: bool
If True, all properties are dumped
include_features: bool
If True, all features are dumped
relative_to: str, Path, or None
If not None, file_paths are serialized relative to this path
"""
file_path = self._get_file_path(file_path, ['.pkl', '.pickle'])
# Dump all
dump_dict = {'serialized_dict': self.make_serialized_dict(relative_to)}
if include_properties:
if len(self._properties.keys()) > 0:
dump_dict['properties'] = self._properties
if include_features:
if len(self._features.keys()) > 0:
dump_dict['features'] = self._features
# include times
dump_dict["times"] = self._times
file_path.write_bytes(pickle.dumps(dump_dict))
def get_tmp_folder(self):
"""
Returns temporary folder associated to the extractor
Returns
-------
temp_folder: Path
The temporary folder
"""
if self._tmp_folder is None:
self._tmp_folder = Path(tempfile.mkdtemp())
return self._tmp_folder
def set_tmp_folder(self, folder):
"""
Sets temporary folder of the extractor
Parameters
----------
folder: str or Path
The temporary folder
"""
self._tmp_folder = Path(folder)
def allocate_array(self, memmap, shape=None, dtype=None, name=None, array=None):
"""
Allocates a memory or memmap array
Parameters
----------
memmap: bool
If True, a memmap array is created in the sorting temporary folder
shape: tuple
Shape of the array. If None array must be given
dtype: dtype
Dtype of the array. If None array must be given
name: str or None
Name (root) of the file (if memmap is True). If None, a random name is generated
array: np.array
If array is given, shape and dtype are initialized based on the array. If memmap is True, the array is then
deleted to clear memory
Returns
-------
arr: np.array or np.memmap
The allocated memory or memmap array
"""
if memmap:
tmp_folder = self.get_tmp_folder()
if array is not None:
shape = array.shape
dtype = array.dtype
else:
assert shape is not None and dtype is not None, "Pass 'shape' and 'dtype' arguments"
if name is None:
tmp_file = tempfile.NamedTemporaryFile(suffix=".raw", dir=tmp_folder).name
else:
if Path(name).suffix == '':
tmp_file = tmp_folder / (name + '.raw')
else:
tmp_file = tmp_folder / name
raw_tmp_file = r'{}'.format(str(tmp_file))
# make sure any open memmap files with same path are deleted
self.del_memmap_file(raw_tmp_file)
arr = np.memmap(raw_tmp_file, mode='w+', shape=shape, dtype=dtype)
if array is not None:
arr[:] = array
del array
else:
arr[:] = 0
self._memmap_files.append(arr)
else:
if array is not None:
arr = array
else:
arr = np.zeros(shape, dtype=dtype)
return arr
def annotate(self, annotation_key, value, overwrite=False):
"""This function adds an entry to the annotations dictionary.
Parameters
----------
annotation_key: str
An annotation stored by the Extractor
value:
The data associated with the given property name. Could be many
formats as specified by the user
overwrite: bool
If True and the annotation already exists, it is overwritten
"""
if annotation_key not in self._annotations.keys():
self._annotations[annotation_key] = value
else:
if overwrite:
self._annotations[annotation_key] = value
else:
print(f"{annotation_key} is already an annotation key. Use 'overwrite=True' to overwrite it")
def get_annotation(self, annotation_name):
"""This function returns the data stored under the annotation name.
Parameters
----------
annotation_name: str
A property stored by the Extractor
Returns
----------
annotation_data
The data associated with the given property name. Could be many
formats as specified by the user
"""
if annotation_name not in self._annotations.keys():
print(f"{annotation_name} is not an annotation")
return None
else:
return deepcopy(self._annotations[annotation_name])
def get_annotation_keys(self):
"""This function returns a list of stored annotation keys
Returns
----------
property_names: list
List of stored annotation keys
"""
return list(self._annotations.keys())
def copy_annotations(self, extractor):
"""Copy object properties from another extractor to the current extractor.
Parameters
----------
extractor: Extractor
The extractor from which the annotations will be copied
"""
self._annotations = deepcopy(extractor._annotations)
def add_epoch(self, epoch_name, start_frame, end_frame):
"""This function adds an epoch to your extractor that tracks
a certain time period. It is stored in an internal
dictionary of start and end frame tuples.
Parameters
----------
epoch_name: str
The name of the epoch to be added
start_frame: int
The start frame of the epoch to be added (inclusive)
end_frame: int
The end frame of the epoch to be added (exclusive). If set to None, it will include the entire
sorting after the start_frame
"""
if isinstance(epoch_name, str):
start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame)
self._epochs[epoch_name] = {'start_frame': start_frame, 'end_frame': end_frame}
else:
raise TypeError("epoch_name must be a string")
def remove_epoch(self, epoch_name):
"""This function removes an epoch from your extractor.
Parameters
----------
epoch_name: str
The name of the epoch to be removed
"""
if isinstance(epoch_name, str):
if epoch_name in list(self._epochs.keys()):
del self._epochs[epoch_name]
else:
raise ValueError("This epoch has not been added")
else:
raise ValueError("epoch_name must be a string")
def get_epoch_names(self):
"""This function returns a list of all the epoch names in the extractor
Returns
----------
epoch_names: list
List of epoch names in the recording extractor
"""
epoch_names = list(self._epochs.keys())
if not epoch_names:
pass
else:
epoch_start_frames = []
for epoch_name in epoch_names:
epoch_info = self.get_epoch_info(epoch_name)
start_frame = epoch_info['start_frame']
epoch_start_frames.append(start_frame)
epoch_names = [epoch_name for _, epoch_name in sorted(zip(epoch_start_frames, epoch_names))]
return epoch_names
def get_epoch_info(self, epoch_name):
"""This function returns the start frame and end frame of the epoch
in a dict.
Parameters
----------
epoch_name: str
The name of the epoch to be returned
Returns
----------
epoch_info: dict
A dict containing the start frame and end frame of the epoch
"""
# Default (Can add more information into each epoch in subclass)
if isinstance(epoch_name, str):
if epoch_name in list(self._epochs.keys()):
epoch_info = self._epochs[epoch_name]
return epoch_info
else:
raise ValueError("This epoch has not been added")
else:
raise ValueError("epoch_name must be a string")
def copy_epochs(self, extractor):
"""Copy epochs from another extractor.
Parameters
----------
extractor: BaseExtractor
The extractor from which the epochs will be copied
"""
for epoch_name in extractor.get_epoch_names():
epoch_info = extractor.get_epoch_info(epoch_name)
self.add_epoch(epoch_name, epoch_info["start_frame"], epoch_info["end_frame"])
def _cast_start_end_frame(self, start_frame, end_frame):
from .extraction_tools import cast_start_end_frame
return cast_start_end_frame(start_frame, end_frame)
@staticmethod
def load_extractor_from_json(json_file):
"""
Instantiates extractor from json file
Parameters
----------
json_file: str or Path
Path to json file
Returns
-------
extractor: RecordingExtractor or SortingExtractor
The loaded extractor object
"""
json_file = Path(json_file)
with open(str(json_file), 'r') as f:
d = json.load(f)
extractor = _load_extractor_from_dict(d)
return extractor
@staticmethod
def load_extractor_from_pickle(pkl_file):
"""
Instantiates extractor from pickle file.
Parameters
----------
pkl_file: str or Path
Path to pickle file
Returns
-------
extractor: RecordingExtractor or SortingExtractor
The loaded extractor object
"""
pkl_file = Path(pkl_file)
with open(str(pkl_file), 'rb') as f:
d = pickle.load(f)
extractor = _load_extractor_from_dict(d['serialized_dict'])
if 'properties' in d.keys():
extractor._properties = d['properties']
if 'features' in d.keys():
extractor._features = d['features']
if 'times' in d.keys():
extractor._times = d['times']
return extractor
@staticmethod
def load_extractor_from_dict(d):
"""
Instantiates extractor from dictionary
Parameters
----------
d: dictionary
Python dictionary
Returns
-------
extractor: RecordingExtractor or SortingExtractor
The loaded extractor object
"""
extractor = _load_extractor_from_dict(d)
return extractor
def check_if_dumpable(self):
return _check_if_dumpable(self.make_serialized_dict())
def _make_paths_relative(d, relative):
dcopy = deepcopy(d)
if "kwargs" in dcopy.keys():
relative_kwargs = _make_paths_relative(dcopy["kwargs"], relative)
dcopy["kwargs"] = relative_kwargs
return dcopy
else:
for k in d.keys():
# in SI, all input paths have the "path" keyword
if "path" in k:
d[k] = str(Path(d[k]).relative_to(relative))
return d
def _load_extractor_from_dict(dic):
cls = None
class_name = None
probe_file = None
kwargs = deepcopy(dic['kwargs'])
if np.any([isinstance(v, dict) for v in kwargs.values()]):
# nested
for k in kwargs.keys():
if isinstance(kwargs[k], dict):
if 'module' in kwargs[k].keys() and 'class' in kwargs[k].keys() and 'version' in kwargs[k].keys():
extractor = _load_extractor_from_dict(kwargs[k])
class_name = dic['class']
cls = _get_class_from_string(class_name)
kwargs[k] = extractor
break
elif np.any([isinstance(v, list) and isinstance(v[0], dict) for v in kwargs.values()]):
# multi
for k in kwargs.keys():
if isinstance(kwargs[k], list) and isinstance(kwargs[k][0], dict):
extractors = []
for kw in kwargs[k]:
if 'module' in kw.keys() and 'class' in kw.keys() and 'version' in kw.keys():
extr = _load_extractor_from_dict(kw)
extractors.append(extr)
class_name = dic['class']
cls = _get_class_from_string(class_name)
kwargs[k] = extractors
break
else:
class_name = dic['class']
cls = _get_class_from_string(class_name)
assert cls is not None and class_name is not None, "Could not load spikeinterface class"
if not _check_same_version(class_name, dic['version']):
print('Versions are not the same. This might lead to errors. Use ', class_name.split('.')[0],
'version', dic['version'])
if 'probe_file' in kwargs.keys():
probe_file = kwargs.pop('probe_file')
# instantiate extrator object
extractor = cls(**kwargs)
# load probe file
if probe_file is not None:
assert 'Recording' in class_name, "Only recording extractors can have probe files"
extractor = extractor.load_probe_file(probe_file=probe_file)
# load properties and features
if 'key_properties' in dic.keys():
extractor._key_properties = dic['key_properties']
if 'annotations' in dic.keys():
extractor._annotations = dic['annotations']
return extractor
def _get_class_from_string(class_string):
class_name = class_string.split('.')[-1]
module = '.'.join(class_string.split('.')[:-1])
imported_module = importlib.import_module(module)
try:
imported_class = getattr(imported_module, class_name)
except:
imported_class = None
return imported_class
def _check_same_version(class_string, version):
module = class_string.split('.')[0]
imported_module = importlib.import_module(module)
try:
return imported_module.__version__ == version
except AttributeError:
return 'unknown'
def _check_if_dumpable(d):
kwargs = d['kwargs']
if np.any([isinstance(v, dict) and 'dumpable' in v.keys() for (k, v) in kwargs.items()]):
for k, v in kwargs.items():
if 'dumpable' in v.keys():
return _check_if_dumpable(v)
else:
return d['dumpable']
def _check_json(d):
# quick hack to ensure json writable
for k, v in d.items():
if isinstance(v, dict):
d[k] = _check_json(v)
elif isinstance(v, Path):
d[k] = str(v.absolute())
elif isinstance(v, bool):
d[k] = bool(v)
elif isinstance(v, (int, np.integer)):
d[k] = int(v)
elif isinstance(v, float):
d[k] = float(v)
elif isinstance(v, datetime.datetime):
d[k] = v.isoformat()
elif isinstance(v, (np.ndarray, list)):
if len(v) > 0:
if isinstance(v[0], dict):
# these must be extractors for multi extractors
d[k] = [_check_json(v_el) for v_el in v]
else:
v_arr = np.array(v)
if len(v_arr.shape) == 1:
if 'int' in str(v_arr.dtype):
v_arr = [int(v_el) for v_el in v_arr]
d[k] = v_arr
elif 'float' in str(v_arr.dtype):
v_arr = [float(v_el) for v_el in v_arr]
d[k] = v_arr
elif isinstance(v_arr[0], str):
v_arr = [str(v_el) for v_el in v_arr]
d[k] = v_arr
else:
print(f'Skipping field {k}: only 1D arrays of int, float, or str types can be serialized')
elif len(v_arr.shape) == 2:
if 'int' in str(v_arr.dtype):
v_arr = [[int(v_el) for v_el in v_row] for v_row in v_arr]
d[k] = v_arr
elif 'float' in str(v_arr.dtype):
v_arr = [[float(v_el) for v_el in v_row] for v_row in v_arr]
d[k] = v_arr
elif 'bool' in str(v_arr.dtype):
v_arr = [[bool(v_el) for v_el in v_row] for v_row in v_arr]
d[k] = v_arr
else:
print(f'Skipping field {k}: only 2D arrays of int or float type can be serialized')
else:
print(f"Skipping field {k}: only 1D and 2D arrays can be serialized")
else:
d[k] = list(v)
return d
| true |
8cd564997ea53d214ab828b5ae6f38551ac1d767 | Python | SunRiseGG/EmbSystemsLab2 | /FFTThread2.py | UTF-8 | 1,340 | 2.96875 | 3 | [] | no_license | import threading
import time
import math
class FFTThread2(threading.Thread):
def __init__(self, x, F_Re, F_Im):
threading.Thread.__init__(self)
self.x = x
self.N = len(x)
self.F_Re = F_Re
self.F_Im = F_Im
def run(self):
print('Starting thread2 for fft')
res = self.calculate()
if (res):
print('Finished thread2 for fft')
return res
def calculate(self):
F_Re1 = []
F_Im1 = []
F_Re2 = []
F_Im2 = []
for p in range(int(self.N / 2)):
F_Re1.append(0.0)
F_Im1.append(0.0)
F_Re2.append(0.0)
F_Im2.append(0.0)
for m in range(int(self.N / 2)):
F_Re1[p] += self.x[2 * m + 1] * math.cos(4 * math.pi * p * m / self.N)
F_Re2[p] += self.x[2 * m] * math.cos(4 * math.pi * p * m / self.N)
F_Im1[p] -= self.x[2 * m + 1] * math.sin(4 * math.pi * p * m / self.N)
F_Im2[p] -= self.x[2 * m] * math.sin(4 * math.pi * p * m / self.N)
self.F_Re[p + int(self.N / 2)] = F_Re2[p] + F_Re1[p] * math.cos(2 * math.pi * (p + self.N / 2) / self.N)
self.F_Im[p + int(self.N / 2)] = F_Im2[p] + F_Im1[p] * math.sin(2 * math.pi * (p + self.N / 2) / self.N)
return self.F_Re
| true |
d5fdaf87dc053078fd46a456417cb0a96601b792 | Python | Aasthaengg/IBMdataset | /Python_codes/p02851/s522861811.py | UTF-8 | 354 | 2.78125 | 3 | [] | no_license | from collections import defaultdict
N, K, *A = map(int, open(0).read().split())
x = [0] * (N + 1)
for i in range(N):
x[i + 1] = x[i] + A[i]
y = [(x[i] - i) % K for i in range(N + 1)]
ctr = defaultdict(int)
ans = 0
for j in range(N + 1):
ans += ctr[y[j]]
ctr[y[j]] += 1
if j - K + 1 >= 0:
ctr[y[j - K + 1]] -= 1
print(ans)
| true |
5df122b85efad4d66bc5fbbdb55d9f4afe98ed01 | Python | mathivanansoft/algorithms_and_data_structures | /algorithms/dynamic_programming/longest_subsequence.py | UTF-8 | 1,101 | 3.9375 | 4 | [] | no_license | # Find the longest subsequence in array in which the elements in subsequence
# are consecutive
def longest_subsequence(arr):
dt = {}
for elmnt in arr:
dt.update({elmnt: False})
start = -1
end = -1
maximum = 0
data = 0
for index, elmnt in enumerate(arr):
if dt.get(elmnt) is False:
temp = elmnt
while True:
if temp in dt:
temp -=1
else:
temp +=1
start = temp
break
while True:
if temp in dt:
dt.update({temp: True})
temp += 1
else:
end = temp
if end-start > maximum:
maximum = end - start
data = end-1
break
print("Maximum consecutive", maximum)
for index in range(data-maximum+1, data+1):
print(index, end= " ")
if __name__ == "__main__":
arr = [10, 4, 3, 11, 13, 5, 6, 12, 7]
longest_subsequence(arr) | true |
8d97a6f20cbeaaa42109baf346e38b04dbaf71ea | Python | koushalkh/Image-Organizer | /app/create_table.py | UTF-8 | 359 | 2.59375 | 3 | [] | no_license | import sqlite3 as sql
def create_table():
con=sql.connect('USERINFO.db')
# print("haha")
con.execute("DROP TABLE IF EXISTS USER")
con.execute("CREATE TABLE USER(username TEXT PRIMARY KEY NOT NULL,password TEXT NOT NULL)")
c=con.cursor()
con.execute("INSERT INTO USER(username,password) VALUES('admin','admin')")
con.commit()
create_table()
| true |
75b87ffba71ae89ac6bb1d6de6d1ca438e672c3e | Python | Guilherme-Galli77/Curso-Python-Mundo-3 | /Exercicios/Ex082 - Dividindo valores em várias listas.py | UTF-8 | 812 | 4.75 | 5 | [] | no_license | #Exercício Python 082: Crie um programa que vai ler vários números e colocar em uma lista.
# Depois disso, crie duas listas extras que vão conter apenas os valores pares
# e os valores ímpares digitados, respectivamente. Ao final, mostre o conteúdo das três listas geradas.
lista = list()
par = list()
impar = list()
while True:
valor = int(input("Digite um valor: "))
lista.append(valor)
if valor % 2 == 0:
par.append(valor)
else:
impar.append(valor)
r = str(input("Deseja continuar? [S/N] ")).strip().upper()
while r not in "SsNn":
r = str(input("Deseja continuar? [S/N] ")).strip().upper()
if r == "N":
break
print("="*40)
print(f"A lista completa é: {lista}")
print(f"A lista de pares é: {par}")
print(f"A lista de impares é: {impar}")
| true |
c9216ef83e76bda92822735a6cb23958422bc1dd | Python | lemonferret/positron_loops | /ecut.py | UTF-8 | 1,160 | 2.546875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import copy as copy
name = "k15_a3.106_ecut"
data=pd.read_csv(name, delim_whitespace=True, skipinitialspace=True, engine="python", skiprows = 0, skipfooter =1, header = None)
Ecut = np.arange(200, 550, 10)
F = copy.deepcopy(data[3][1:])
E = copy.deepcopy(data[5][1:])
dE = copy.deepcopy(data[8][1:])
Ecorr = copy.deepcopy(data[9][1:])
N = 2
Ecut = np.array(Ecut)
F = np.array(F)
E = np.array(E)
dE = np.array(dE)
Ecorr = np.array(Ecorr)
de = []
for i in dE:
i = i[1:]
de.extend([i])
dE = np.array(de)
Ecut = Ecut.astype(np.float)
F = F.astype(np.float)
E = E.astype(np.float)
dE = dE.astype(np.float)
Ecorr = Ecorr.astype(np.float)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(Ecut, F, marker='x', color='blue', linestyle='dotted', label='(k15) F')
ax1.plot(Ecut, F+N*Ecorr, marker='x', color='lightblue', linestyle='dotted', label='(k15) F+N*Ecorr')
ax1.legend()
ax1.plot([400, 400], [-8.15, -25.4], color='black', linestyle='dashed')
ax1.set_ylabel('F')
ax1.set_ylabel('F')
ax1.set_xlabel('Ecut')
ax1.set_title('Ecut, a=3.106, dE<0.01')
plt.show()
| true |
f133a2e6c122d342f24595e22ddcb98504e0927a | Python | michaelerne/adventofcode-2019 | /day_06.py | UTF-8 | 2,949 | 2.71875 | 3 | [] | no_license | from functools import partial
from os.path import basename, splitext
from typing import List, Tuple, Dict, Set
import networkx as nx # type: ignore
from lib import solve
DAY: int = int(splitext(basename(__file__))[0].split('_')[1])
SOLVE = partial(solve, DAY)
def part_a(graph: nx.Graph) -> int:
return sum(nx.shortest_path_length(graph, x, "COM") for x in graph.nodes)
def part_b(graph: nx.Graph) -> int:
return nx.shortest_path_length(graph, "YOU", "SAN") - 2
def parse(data: str) -> nx.Graph:
return nx.Graph([x.split(')') for x in data.split('\n')])
def get_indirect_orbits(direct_orbits, planet):
if planet == 'COM':
return []
orbiting_around = direct_orbits[planet]
return [orbiting_around] + get_indirect_orbits(direct_orbits, orbiting_around)
def part_a_no_nx(data: List[Tuple[str, str]]) -> int:
planets: Set[str] = set([x for x, _ in data] + [y for _, y in data])
direct_orbits: Dict[str, str] = {orbit_to: orbit_from for orbit_from, orbit_to in data}
indirect_orbits: Dict[str, List[str]] = {
planet: get_indirect_orbits(direct_orbits, planet) for planet in planets
}
indirect_orbits_count: int = sum([len(v) for v in indirect_orbits.values()])
return indirect_orbits_count
def path_to_com(direct_orbits, origin):
if origin == 'COM':
return ['COM']
return [origin] + path_to_com(direct_orbits, direct_orbits[origin])
def part_b_no_nx(data: List[Tuple[str, str]]) -> int:
direct_orbits: Dict[str, str] = {orbit_to: orbit_from for orbit_from, orbit_to in data}
you_path: List[str] = path_to_com(direct_orbits, 'YOU')
santa_path = path_to_com(direct_orbits, 'SAN')
intersection = set(you_path) & set(santa_path)
first_intersection = [x for x in you_path if x in intersection][0]
you_path_to_intersection = you_path[0:you_path.index(first_intersection)]
santa_path_to_intersection = santa_path[0:santa_path.index(first_intersection)]
path_to_santa = you_path_to_intersection + [first_intersection] + santa_path_to_intersection
# 'YOU' and 'SANTA'
path_to_santa.remove('YOU')
path_to_santa.remove('SAN')
# we count the transfers, so skip the last element
return len(path_to_santa[0:-1])
def parse_no_nx(data: str) -> List[Tuple[str, str]]:
return [(x.split(')')[0], x.split(')')[1]) for x in data.split('\n')]
if __name__ == "__main__":
SOLVE(part_a, parse, False, [
('''COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L''', 42),
('''COM)A
A)B''', 3)
])
SOLVE(part_b, parse, False, [
('''COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN''', 4)
])
SOLVE(part_a_no_nx, parse_no_nx, False, [
('''COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L''', 42),
('''COM)A
A)B''', 3)
])
SOLVE(part_b_no_nx, parse_no_nx, False, [
('''COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN''', 4)
])
| true |
3b472f67d7d60cd0b8cc4d4554178873a243e155 | Python | Aaatresh/LetNet | /letnet_fixed_multilayer.py | UTF-8 | 11,867 | 3.125 | 3 | [
"MIT"
] | permissive | """
Script to apply LET approach to image reconstruction on diffusercam lensless cameras.
The parameters C and tau, (the linear coeffecients and standard deviation respectively) are fixed across all layers. These parameters
can be made a parameter for each layer by extending this program.
This based on the paper: "Bayesian Deep Deconvolutional Neural Networks"
"""
########################################################################
import torch as tor
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.utils.data
import cv2
import os
from scipy.fftpack import dct
from sklearn.preprocessing import StandardScaler
import torch.nn.functional as F
########################################################################
def preprocess(img):
"""
Function to preprocess the image by zero-centering the mean and normalizing it with it's standard deviation
Arguments:
img: The image to be preprocessed
Returns:
image after preprocessing
"""
scaler=StandardScaler() ## scaler object to perform preprocessing
img=scaler.fit_transform(img) ## zero-center and normalize
return img
def DoG(u,k,tau):
"""
Function to calculate the kth Derivative of Gaussian (DoG)
Arguments:
u: input element
k: kth derivative of gaussian
tau: standard deviation
Returns:
The kth Derivative of Gaussian
"""
DoG=u*tor.exp(-((k-1)*tor.pow(u,2))/(2*tau**2))
return DoG
def rho(u,param):
"""
Function to find the linear combination of K Derivatives of Gaussian
Arguments:
u: input element
param: list containing the 'K' linear coeffecients and the standard deviation
Returns:
The linear combination of K Derivatives of Gaussian
"""
c=param[0]
tau=param[1]
rho=0
K=len(c)
for k in range(1,K+1): ## Sum up the K Derivatives of Gaussian
rho=rho+c[k-1,0]*DoG(u,k,tau)
return rho
def gen_dct(size):
"""
Function to generate a DCT basis. It is used to enforce sparsity.
Arguments:
size: The size of one of the dimensions (the matrix is a square matrix) of the DCT matrix
Returns:
DCT basis of required size
"""
D=tor.from_numpy(dct(np.eye(size),norm="ortho",axis=0)) ## Generate DCT basis
D=D.view(1,1,D.size(0),-1).type(tor.FloatTensor) ## Resize it and convert it to float datatype
return D
def get_psf(psf_path,width,height):
"""
Function to read a point-spread-function and return it after resizing and converting it to the required datatype
Arguments:
psf_path: The location of the directory that contains the psf image
width: The width of the sensor
height: The height of the sensor
Returns:
The psf after resizing and converting it to float datatype
"""
psf=cv2.resize(cv2.imread(psf_path,0),(width,height)) ## Read image and resize it
psf=tor.from_numpy(psf).type(tor.FloatTensor) ## Convert into a float dtype tensor
psf=psf.view(1,1,psf.size(0),psf.size(1)) ## reshaping the tensor
return psf
def conv(h,x):
"""
Function to perform 2D-convolution in time domain.
Arguments:
h: signal1
x: signal2
Returns:
The 2D convolution result
"""
final_conv_dim=(512,512) ## dimension of the convolution result before cropping
x_dim=(x.size(2),x.size(3)) ## dimension of x
h_dim=(h.size(2),h.size(3)) ## dimension of h
crop_dim=x_dim ## image obtained after cropping is the same dimension as the image x
padding=(final_conv_dim[0]-(x_dim[0]-h_dim[0]+1),final_conv_dim[1]-(x_dim[1]-h_dim[1]+1)) ## calculate the amount of padding required given final_conv_dim, x_dim and h_dim
x_pad=F.pad(x,(padding[0]//2,padding[0]//2+1,padding[1]//2,padding[1]//2+1)) ## pad x
y=F.conv2d(x_pad,h.flip(2,3),padding=0) ## convolve x_pad with h
## starting and ending values along the column and the rows for cropping
starti=(final_conv_dim[0]-crop_dim[0])//2
endi=crop_dim[0]+starti
startj=(final_conv_dim[1]-crop_dim[1])//2
endj=crop_dim[1]+startj
## Cropping
y=y[:,:,starti:endi,startj:endj]
return y
class t_layer(nn.Module):
"""
Class that defines the tth-layer of the network
"""
def __init__(self,learning_rate):
super().__init__()
self.learning_rate = learning_rate
def forward(self,y,xt,D,h,param):
"""
Compute the forward prop in the tth-layer
xt1 = D.T ( rho ( D ( xt - (learning_rate)h_flip*( h*xt - y ) ) D.T ) ) D
here '*' denotes linear convolution
"""
h_flip=h.flip(2,3)
a=conv(h,xt)-y
b=xt-self.learning_rate*conv(h_flip,a)
u=tor.matmul(D,tor.matmul(b,D.transpose(2,3)))
u_=rho(u,param)
xt1=tor.matmul(D.transpose(2,3),tor.matmul(u_,D))
return xt1
class LetNet(nn.Module):
"""
Class that defines the whole network called, "LetNet". This network applies the concept of LET for image reconstruction on lensless
cameras
"""
def __init__(self,c,tau,psf,num_layers,learning_rate):
super().__init__()
self.c=nn.Parameter(c,requires_grad=True) ## The linear coeffecients ; the are the paremeters of the network
self.tau=nn.Parameter(tau,requires_grad=True) ## The standard deviation ; it is the parameter of the network
self.param=[self.c,self.tau]
self.D=gen_dct(psf.size(2))
self.h=psf
self.num_layers=num_layers ## number of layers required
## uncomment to test a single layer
## self.layer1=t_layer()
## comment out this snippet to test just a single layer
self.layers=[]
for l in range(num_layers): ## instantiate 'num_layers' number of layers and store each layer as an element of the list
self.layers.append(t_layer(learning_rate))
## comment
def forward(self,y):
"""
Forward prop of the network
"""
x0=tor.randn(y.shape) ## Initialize the reconstructed image to half the psf pixel values (This method has been followed in the iterative appoach as well.
## uncomment to test a single layer
## out = self.layer1(y,x0,self.D,self.h,self.param)
## comment this snippet to test a single layer
out=self.layers[0](y,x0,self.D,self.h,self.param) ## Compute the output of the first layer
for l in range(1,self.num_layers):
out=self.layers[l](y,out,self.D,self.h,self.param) ## Compute output of other 'num_layers-1' layers
## comment
return out
class dataset(torch.utils.data.Dataset):
"""
Class that defines the properties of a dataset object
The functions look into gt_dir for the ground truth images and sensor_dir for the blurred images(sensor readings)
"""
def __init__(self,gt_dir,sensor_dir,datasize=0,transform=None):
super().__init__()
gts=os.listdir(gt_dir)
gts=[os.path.join(gt_dir,x) for x in gts]
sensor=os.listdir(sensor_dir)
sensor=[os.path.join(sensor_dir,x) for x in sensor]
if(datasize<0 or datasize>len(gts)):
assert("datasize should be >=0 or <= max data size")
elif(datasize==0):
datasize=len(gts)
self.gts=gts
self.sensor=sensor
self.datasize=datasize
self.transform=transform
def __len__(self):
return self.datasize
def __getitem__(self,idx):
gt_addr=self.gts[idx]
sensor_addr=self.sensor[idx]
## Convert the images into float dtype
X=np.float32(cv2.imread(gt_addr,0))
Y=np.float32(cv2.imread(sensor_addr,0))
## Preprocess the images
if(self.transform is not None):
X=self.transform(X)
Y=self.transform(Y)
## Convert the images to tensors and reshape
X=tor.from_numpy(X).type(tor.FloatTensor).unsqueeze(0)
Y=tor.from_numpy(Y).type(tor.FloatTensor).unsqueeze(0)
return Y,X
if __name__=="__main__":
gts_dir="./gts/" ## The directory where the ground truth images are stored
sensor_dir="./sensor_readings/" ## The directory where the blurred images(sensor readings) are stored
psf_path="./psf/psf_sample.tif" ## The directory where the psf is stored
width=256 ## sensor width
height=256 ## sensor height
psf=get_psf(psf_path,width,height) ## read the psf
trainset=dataset(gts_dir,sensor_dir,transform=preprocess) ## instantiate a training set object
trainloader=torch.utils.data.DataLoader(trainset,batch_size=2,shuffle=True) ## instantiate a dataloader
"""
testset=dataset(gts_dir,sensor_dir,transform=preprocess) ## instantiate a test set object
testloader=torch.utils.data.DataLoader(testset,batch_size=2,shuffle=True) ## instantiate a dataloader
"""
## K DoGs
K=10
## randomly initialize the parameters
c=tor.randn(K,1)*0.001
tau=tor.randn(1)*0.001
## Number of layers
num_layers=1
## learning rate for each layer
learning_rate = 1e-5
## The network is instantiated
clf=LetNet(c,tau,psf,num_layers,learning_rate)
## The mean-square error loss function
criterion=nn.MSELoss()
## Adam optimizer is used to optimize this problem
optimizer=tor.optim.Adam(clf.parameters(),lr=1e-3)
losses=[]
epochs=10
for epoch in range(epochs):
l=[]
for batch_idx,(Y,X) in enumerate(trainloader):
X_=clf(Y) ## Forward Prop
loss=criterion(X_,X) ## Compute cost
loss.backward() ## Back prop
optimizer.step() ## Update network parameters
l.append(loss.item()) ## append the loss after processing each batch
print("Batch Index: %d \t Loss in batch: %0.4f" % (batch_idx,l[-1]))
losses.append(np.mean(l)) ## append the mean loss on that epoch
print("Epoch: ",epoch,"\tCost: ",losses[-1])
| true |
05bbce3792d19294a23760f1ce5bdf418048d044 | Python | sxu11/Algorithm_Design | /Array/2dSearch/P200_NumberofIslands.py | UTF-8 | 1,581 | 3.75 | 4 | [] | no_license |
'''
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input:
11110
11010
11000
00000
Output: 1
Example 2:
Input:
11000
11000
00100
00011
Output: 3
'''
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
row, col = len(grid), len(grid[0])
visited = [[False] * col for _ in range(row)]
def search_dfs(i, j): # init state
'''
effect: set visited
return: 1 or 0
'''
visited[i][j] = True
if grid[i][j] == '0':
return 0
else:
'''
set 4 directions
'''
if i > 0 and not visited[i - 1][j]:
search_dfs(i - 1, j)
if i < row - 1 and not visited[i + 1][j]:
search_dfs(i + 1, j)
if j > 0 and not visited[i][j - 1]:
search_dfs(i, j - 1)
if j < col - 1 and not visited[i][j + 1]:
search_dfs(i, j + 1)
return 1
tot_cnt = 0
for i in range(row):
for j in range(col):
if not visited[i][j]:
tot_cnt += search_dfs(i, j)
return tot_cnt
| true |
1a8b69f28877c4acaaad6fa607f90d5c63e35a6e | Python | Wang-Zekun/ichw | /pyassign1/planets.py | UTF-8 | 1,684 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 6 17:51:30 2018
@author: wangzekun
"""
import turtle
import math
def CreatePlanet(name,size,posx,color):
name = turtle.Turtle()
name.speed(0)
name.color(color)
name.shape("circle")
name.shapesize(size,size,1)
name.penup()
name.goto(posx,0)
name.pendown()
return name
def Movement(name,a,b,deg,speed,delta):
x = a * math.cos(10*3.14159265359*deg/speed/360) + delta
y = b * math.sin(10*3.14159265359*deg/speed/360)
name.goto(x,y)
def main():
Sun = turtle.Turtle()
Mer = turtle.Turtle()
Ven = turtle.Turtle()
Ear = turtle.Turtle()
Mar = turtle.Turtle()
Jup = turtle.Turtle()
Sat = turtle.Turtle()
Ura = turtle.Turtle()
Nep = turtle.Turtle()
Sun = CreatePlanet(Sun,1,0,"red")
Mer = CreatePlanet(Mer,0.2,27,"grey")
Ven = CreatePlanet(Ven,0.6,36,"gold")
Ear = CreatePlanet(Ear,0.64,53,"green")
Mar = CreatePlanet(Mar,0.36,76.4,"orange")
Jup = CreatePlanet(Jup,1.78,114.1,"brown")
Sat = CreatePlanet(Sat,1.50,210,"khaki")
Ura = CreatePlanet(Ura,1.00,281,"azure")
Nep = CreatePlanet(Nep,0.95,324,"blue")
for times in range(1000):
for deg in range(3600):
Movement(Mer,19.5,18,deg,0.24,7.5)
Movement(Ven,36,36,deg,0.62,0)
Movement(Ear,50,50,deg,1,3)
Movement(Mar,65,64,deg,1.88,11.4)
Movement(Jup,100,99,deg,2.98,14.1)
Movement(Sat,183,181,deg,7.35,27)
Movement(Ura,243,240,deg,10.5,38)
Movement(Nep,300,299,deg,20.0,24)
if __name__=="__main__":
main()
| true |
61860c780a74fd568062a83ce9ae60aaab3c6b27 | Python | AnMik/practice | /practice_1/year.py | UTF-8 | 394 | 3.640625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
def main():
year = raw_input("Enter year:")
if year == "exit":
return
try:
year = int(year)
if (not year % 4 and year % 100) or not year % 400:
print('Leap year')
else:
print('Common year')
main()
except Exception:
print('Something wrong with your number')
main()
main()
| true |
4b5becb609b280c31981cea9a6b4defb37173cba | Python | BarryZM/dig-text-similarity-search | /py_scripts/preprocessing/filter_trusted_sources.py | UTF-8 | 1,535 | 2.640625 | 3 | [
"MIT"
] | permissive | # <editor-fold desc="Basic Imports">
import os.path as p
from argparse import ArgumentParser
import sys
sys.path.append(p.join(p.dirname(__file__), '..'))
sys.path.append(p.join(p.dirname(__file__), '../..'))
# </editor-fold>
# <editor-fold desc="Parse Command Line Options">
arp = ArgumentParser(description='Append articles written by trusted sources '
'from input_file.jl into output_file.jl')
arp.add_argument('input_file', help='Path to rawLexisNexis.jl to be filtered.')
arp.add_argument('output_file', help='Path to trustedLexisNexis.jl '
'(appends new docs if file exists).')
arp.add_argument('-w', '--white_list_path',
default='py_scripts/configs/SourceWhiteList.txt',
help='Substitute your own news source white list '
'(default: py_scripts/configs/SourceWhiteList.txt)')
opts = arp.parse_args()
# </editor-fold>
from dt_sim.data_reader.source_filter_funcs import source_filter
wl_file = p.abspath(opts.white_list_path)
try:
news_white_list = list()
with open(wl_file, 'r') as wl:
for ln in wl:
news_white_list.append(str(ln).replace('\n', ''))
news_white_list = tuple(news_white_list)
except FileNotFoundError:
news_white_list = None
print(f'File not found: {wl_file}')
if __name__ == '__main__' and news_white_list:
source_filter(
input_file=opts.input_file,
output_file=opts.output_file,
white_list=news_white_list
)
| true |
643055a4a4bd5318d5985ae0f080a6b875f0025a | Python | priyankarnd/Python-Training | /classes.py | UTF-8 | 1,829 | 4.4375 | 4 | [] | no_license | #Defining a class
class MyClass:
a = 10
b = 20
x = MyClass()
print(x.a)
print(x.b)
class Person :
#1. constructor
def __init__(self, firstName, lastName, age):
self.firstName = firstName
self.lastName = lastName
self.age = age
#2. Methods
def fullName(self):
return f'{self.firstName} {self.lastName}'
#3. Inheritance
class Customer (Person) :
gender = "male"
def __init__(self, id, firstName, lastName, age):
super().__init__(firstName, lastName, age)
self._id = id #Private property of child class
def getBalance(self):
return 10
def getCustomerId (self): #Method to access private property
return str(self._id)
class Employee (Person):
def __init__(self, id, firstName, lastName, age):
super().__init__(firstName, lastName, age)
# Private variable
self.__id = id
def getDepartment(self):
#retrieve from database
return "Sales"
def getYearsofService(self):
return 5
def getEmployeeID(self):
return str(self.__id)
#Creating a Person Object
p = Person("Mike", "Smith", 35)
print(p.firstName)
print(p.age)
print(p.fullName())
# Passing new value
p.firstName = "Jack"
print(p.firstName)
# Deleting object
del p
#Creating a customer object
c = Customer (34, "Mary", "Smith", 40)
print(c.gender)
#Inherited Method
print(c.fullName())
print(c.getBalance())
#accessing private variables
c = Customer (34, "Mary", "Smith", 40)
print(c._id) #No protection; still accessible
print(c.getCustomerId())
e = Employee(50, "Harold", "Thompson", 55)
#print(e.__id) # protected, will now work
print(e.getEmployeeID())
print(e._Employee__id) #Behind the scenes, how Python accesses
print(e.getYearsofService())
print(e.getDepartment())
| true |
c413717766630170f7c1ed0c04a06445de5172d1 | Python | alanveloso/ufpa-graphs-2017 | /boruvka_algorithm.py | UTF-8 | 3,261 | 3.265625 | 3 | [
"MIT"
] | permissive | class graph:
vertex = ['A','B','C','D','E','F','G','H']
edge={ 'A':{'C': 6, 'D': 12},
'B':{'D': 7, 'E':14},
'C':{'A': 6, 'D': 9, 'F': 16, 'G': 3 },
'D':{'A': 12, 'B': 7, 'C': 9, 'E': 11},
'E':{'B': 14, 'D': 11, 'F': 5 , 'H': 18 },
'F':{'C': 16, 'E': 5, 'G': 8, 'H': 13},
'G':{'C':3 ,'F': 8, 'H': 20},
'H':{'E': 18, 'F': 13, 'G': 20}}
# output = [[6, 'A', 'C'], [7, 'B', 'D'], [3, 'C', 'G'], [5, 'E', 'F'], [13, 'H', 'F'], [8, 'F', 'G'], [9, 'C', 'D']]
class graph2:
vertex = ['A','B','C','D','E','F','G']
edge={ 'A':{'B': 7, 'D': 4},
'B':{'A': 7, 'C': 11, 'D':9, 'E':10},
'C':{'B': 11, 'E': 5},
'D':{'A': 4, 'B': 9, 'F': 6},
'E':{'B': 10, 'C': 5},
'F':{'D': 6, 'E': 12, 'G':13},
'G':{'F': 13, 'E': 8}}
## output = [[4, 'A', 'D'], [7, 'B', 'A'], [5, 'C', 'E'], [6, 'F', 'D'], [8, 'G', 'E'], [10, 'B', 'E']]
#função que retorna o peso entre dois vertices
def weigth(grafo,o, d):
for close in grafo.edge:
if(close == o):
aux = grafo.edge[o]
if d in aux:
return aux[d]
return []
def weigth_min(grafo,vertex):
min = []
for close in grafo.edge[vertex]:
min.append([weigth(grafo,vertex, close),vertex, close])
return sorted(min)
#testa se uma ORIGEM e DESTINO já está contido em determinada LISTA
def test_exp_edge(exp_edge, start, close):
for _, j1, j2 in exp_edge:
if (j1 == start and j2 == close) or (j2 == start and j1 == close):
return True
return False
def weigth_min1(grafo,vertex, list):
min=[]
for close in list:
if(weigth(grafo,vertex, close) != []):
min.append([weigth(grafo,vertex,close),vertex,close])
return sorted(min)
def weigth_min2(grafo, vertex, output):
min=[]
for subtree in output:
if not(vertex in subtree):
subtree = weigth_min1(grafo,vertex, subtree)
if (subtree !=[]):
min.append(subtree[0])
return sorted(min)
## verifica se pelo menos um elemento de uma lista está contida em outra
def test_list(a, b):
for i in a:
if i in b:
return True
return False
#junta duas lista e a retorna ordenada sem repetição
def sortx(list,b):
l = []
list.extend(b)
for i in list:
if i not in l:
l.append(i)
return sorted(l)
## junta duas sublistas, se algum elemento for repetido
def Flatten(list):
for i in range(len(list)):
for j in range(i+1,len(list)):
if (test_list(list[i],list[j])):
list[i] = sortx(list[i],list[j])
list.pop(j)
return Flatten(list)
return list
#IMPLEMENTAÇÃO DO ALGORITMO DE BORUVKA
def boruvka(grafo):
output = []
aux = []
outWithWeigth = []
for origin in grafo.vertex:
aux = weigth_min(grafo,origin)
weigth, start, close = aux[0]
if not(test_exp_edge(outWithWeigth,start,close)):
output.append([start,close])
outWithWeigth.append([weigth,start,close])
while(len(output)>1):
aux = []
output = Flatten(output)
for subtree in output:
for vertex in subtree:
aux.extend(weigth_min2(grafo, vertex, output))
aux.sort()
weigth, start, close = aux[0]
output.append(([start,close]))
outWithWeigth.append(([weigth,start,close]))
output = Flatten(output)
return(outWithWeigth)
print("Grafo 1: ", boruvka(graph))
print("\nGrafo 1: ", boruvka(graph2))
| true |
d3376f2f9e4757933237322ebc195ef1a85bab52 | Python | sjanav/learnpython | /summer_2021/TuplePractice.py | UTF-8 | 101 | 3.296875 | 3 | [] | no_license | import pprint
a = (1,2,3,4)
b = (1,3,3,4,)
pprint.pprint(a)
pprint.pprint(b)
print(bool(a == b)) | true |
c3c57cd83926b8d8905182a1dd792cf51afbcf64 | Python | migueLib/fundus2sex | /src/e2e_utils/compare_labels.py | UTF-8 | 127 | 2.90625 | 3 | [] | no_license | def compare_with_true(data, true):
""""Uses a series for the true"""
return data.apply(lambda x: x == true, axis=0)
| true |
070402bb9900871ccf9a11bb3bb4facbcc1c8361 | Python | FredC94/MOOC-Python3 | /UpyLab/UpyLaB 5.02 - Manip tuples - ADN.py | UTF-8 | 1,459 | 4.3125 | 4 | [
"MIT"
] | permissive | """ Auteur = Frédéric Castel
Date : Avril 2020
Projet : MOOC Python 3 - France Université Numérique
Objectif:
On représente un brin d’ADN par une chaîne de caractères dont les caractères sont parmi les quatre suivants :
'A' (Adénine), 'C' (Cytosine), 'G' (Guanine) et 'T' (Thymine).
Écrire une fonction est_adn qui reçoit une chaîne de caractères en paramètre et qui retourne True si cette chaîne de caractères
n'est pas vide et peut représenter un brin d’ADN, False sinon.
Exemples:
est_adn("ATGGT") doit retourner: True
est_adn("ISA") doit retourner: False
est_adn("CTaG") doit retourner: False
Consignes:
Dans cet exercice, il vous est demandé d’écrire seulement la fonction est_adn. Le code que vous soumettez à UpyLaB doit donc comporter
uniquement la définition de cette fonction, et ne fait en particulier aucun appel à input ou à print.
Vous pouvez supposer que l’argument passé à la fonction sera toujours une chaîne de caractères ;
notez que l'appel de la fonction sur une chaîne vide doit retourner False .
"""
def est_adn(adn):
liste = ["A", "C", "G", "T"]
liste_adn = list(adn) # je split l'argument pour pouvoir boucler sur chacun des termes et/
# et vérifier si il est présent dans la liste
if len(liste_adn) == 0:
return False
for c in liste_adn:
if c not in liste:
return False
return True
est_adn('')
| true |
a4274e4f4846a442be34eac9b5a0eaa3b4319a37 | Python | jsmolina/z88dk-tutorial-sp1 | /build/scenarioparse.py | UTF-8 | 540 | 2.578125 | 3 | [] | no_license |
print("uint8_t map[25][32] = {")
with open('pacmansce.csv', 'r') as f:
for linenum, row in enumerate(f):
if(linenum == 0):
continue
if(linenum == 25):
break
resultcols = []
cols = row.split(',')
for i in range(1, len(cols)):
x = int(cols[i])
resultcols.append(str(x))
print("{" + (','.join(resultcols)) + "},")
print("};")
# compress x = (number1 << 4) | number2
# decompress number1 = (x & 0b11110000) >> 4, number2 = (x & 0b00001111) | true |
804f8754ad28de14a255a6a02b3b97a1f804e1f3 | Python | Rafabaring/SpringBoard | /Guided_capstone/sqlManager.py | UTF-8 | 2,156 | 2.671875 | 3 | [] | no_license | import psycopg2
import pandas as pd
import config as cfg
class Database:
def __init__(self):
self.hostname = cfg.HOSTNAME
self.username = cfg.USERNAME
self.password = cfg.PASSWORD
self.database = cfg.DATABASE
def connect(self):
postgree_connection = psycopg2.connect(host=self.hostname,
user=self.username,
password=self.password,
dbname=self.database)
return postgree_connection
def createTable(self, conn, table_name):
cur = conn.cursor()
create_table_query = "CREATE TABLE " + table_name + ''' (
trade_dt VARCHAR(150),
rec_type VARCHAR(150),
symbol VARCHAR(150),
exchange VARCHAR(150),
event_tm VARCHAR(150),
event_seq_nb VARCHAR(150),
arrival_tm VARCHAR(150),
trade_pr VARCHAR(150),
bid_pr VARCHAR(150),
bid_size VARCHAR(150),
ask_pr VARCHAR(150),
ask_size VARCHAR(150),
execution_id VARCHAR(150),
trade_size VARCHAR(150)
)'''
if table_name == 'events':
create_table_query = create_table_query.replace('arrival_tm VARCHAR(150)', 'arrival_tm TIMESTAMP')
cur.execute(create_table_query)
conn.commit() # save changes in database
print("Table created")
def insertRddIntoTable(self, conn, rdd_to_input, table):
cur = conn.cursor()
for row in rdd_to_input:
insert_value_query = "INSERT INTO " + table + " VALUES " + str(row)
cur.execute(insert_value_query)
conn.commit() # save changes in database
| true |
e0739e932dd9b5bc96d94dd85c15155a2ca36be6 | Python | rvanvenetie/stbem | /src/quadrature.py | UTF-8 | 8,869 | 2.828125 | 3 | [
"MIT"
] | permissive | import numpy as np
from .quadrature_rules import (gauss_log_quadrature_rule,
gauss_sqrtinv_quadrature_rule,
gauss_x_quadrature_rule,
log_log_quadrature_rule, log_quadrature_rule,
sqrt_quadrature_rule, sqrtinv_quadrature_rule)
def gauss_quadrature_scheme(N_poly):
""" Returns quadrature rule that is exact on 0^1 for
p(x) for deg(p) <= N_poly. """
assert (N_poly % 2 != 0)
n = (N_poly + 1) // 2
nodes, weights = np.polynomial.legendre.leggauss(n)
return QuadScheme1D(0.5 * (nodes + 1.0), 0.5 * weights)
def gauss_sqrtinv_quadrature_scheme(N_poly):
""" Returns quadrature rule that is exact on 0^1 with weight 1/sqrt(x)
for p(x) for deg(p) <= N_poly. """
assert (N_poly % 2 != 0)
N = (N_poly + 1) // 2
nodes, weights = gauss_sqrtinv_quadrature_rule(N)
return QuadScheme1D(nodes, weights)
def gauss_x_quadrature_scheme(N_poly):
""" Returns quadrature rule that is exact on 0^1 with weight x
for p(x) for deg(p) <= N_poly. """
N = (N_poly + 1) // 2
nodes, weights = gauss_x_quadrature_rule(N)
return QuadScheme1D(nodes, weights)
def gauss_log_quadrature_scheme(N_poly):
""" Returns quadrature rule that is exact on 0^1 with weight x
for p(x) for deg(p) <= N_poly. """
N = (N_poly + 1) // 2
nodes, weights = gauss_log_quadrature_rule(N)
return QuadScheme1D(nodes, weights)
def log_quadrature_scheme(N_poly, N_poly_log):
""" Returns quadrature rule that is exact on 0^1 for
p(x) + q(x)log(x) for deg(p) <= N_poly and deg(q) <= N_log.
"""
nodes, weights = log_quadrature_rule(N_poly, N_poly_log)
return QuadScheme1D(nodes, weights)
def log_log_quadrature_scheme(N_poly, N_poly_log):
""" Returns quadrature rule that is exact on 0^1 for
p(x) + q(x)log(x) + k(x)log(1-x) for deg(p) <= N_poly, deg(k) <= deg(q) <= N_log.
"""
nodes, weights = log_log_quadrature_rule(N_poly, N_poly_log)
return QuadScheme1D(nodes, weights)
def sqrt_quadrature_scheme(N_poly, N_poly_log):
""" Returns quadrature rule that is exact on 0^1 for
p(x) + q(x)sqrt(x) for deg(p) <= N_poly and deg(q) <= N_poly_sqrt.
"""
nodes, weights = sqrt_quadrature_rule(N_poly, N_poly_log)
return QuadScheme1D(nodes, weights)
def sqrtinv_quadrature_scheme(N_poly, N_poly_log):
""" Returns quadrature rule that is exact on 0^1 for
p(x) + q(x)sqrt(x) for deg(p) <= N_poly and deg(q) <= N_poly_sqrt.
"""
nodes, weights = sqrtinv_quadrature_rule(N_poly, N_poly_log)
return QuadScheme1D(nodes, weights)
class QuadScheme1D:
def __init__(self, points, weights):
self.points = np.array(points)
self.weights = np.array(weights)
self._mirror = None
def mirror(self):
if self._mirror is None:
self._mirror = QuadScheme1D(1 - self.points, self.weights)
return self._mirror
def integrate(self, f, a: float, b: float) -> float:
if a == b: return 0
assert b - a > 1e-5
fx = (b - a) * np.asarray(f(a + (b - a) * self.points))
return np.dot(fx, self.weights)
class QuadScheme2D:
def __init__(self, points, weights):
self.points = np.array(points)
self.weights = np.array(weights)
self._mirror_x = None
self._mirror_y = None
def mirror_x(self):
if self._mirror_x is None:
self._mirror_x = QuadScheme2D([1 - self.points[0], self.points[1]],
self.weights)
return self._mirror_x
def mirror_y(self):
if self._mirror_y is None:
self._mirror_y = QuadScheme2D([self.points[0], 1 - self.points[1]],
self.weights)
return self._mirror_y
def integrate(self, f, a: float, b: float, c: float, d: float) -> float:
assert b - a > 1e-7 and d - c > 1e-7
x = np.array(
[a + (b - a) * self.points[0], c + (d - c) * self.points[1]])
fx = np.asarray(f(x))
return (d - c) * (b - a) * np.dot(fx, self.weights)
class ProductScheme2D(QuadScheme2D):
def __init__(self, scheme_x, scheme_y=None):
if scheme_y is None: scheme_y = scheme_x
assert isinstance(scheme_x, QuadScheme1D) and isinstance(
scheme_y, QuadScheme1D)
points = np.array([
np.repeat(scheme_x.points, len(scheme_y.points)),
np.tile(scheme_y.points, len(scheme_x.points))
])
weights = np.kron(scheme_x.weights, scheme_y.weights)
super().__init__(points=points, weights=weights)
class QuadpyScheme2D(QuadScheme2D):
def __init__(self, quad_scheme):
super().__init__(points=(quad_scheme.points + 1) * 0.5,
weights=quad_scheme.weights)
class DuffyScheme2D(QuadScheme2D):
def __init__(self, scheme2d, symmetric):
assert isinstance(scheme2d, QuadScheme2D)
x = scheme2d.points[0]
y = 1 - scheme2d.points[1]
xy = x * y
weights = scheme2d.weights * x
if symmetric:
points = [x, xy]
weights = weights * 2
else:
points = [np.hstack([x, xy]), np.hstack([xy, x])]
weights = np.hstack([weights, weights])
super().__init__(points=points, weights=weights)
class QuadScheme3D:
def __init__(self, points, weights):
self.points = np.array(points)
self.weights = np.array(weights)
self._mirror_x = None
self._mirror_y = None
self._mirror_z = None
def integrate(self, f, a, b, c, d, k, l):
x = np.array([
a + (b - a) * self.points[0], c + (d - c) * self.points[1],
k + (l - k) * self.points[2]
])
fx = np.asarray(f(x))
return (d - c) * (b - a) * (l - k) * np.dot(fx, self.weights)
def mirror_x(self):
if self._mirror_x is None:
self._mirror_x = QuadScheme3D(
[1 - self.points[0], self.points[1], self.points[2]],
self.weights)
return self._mirror_x
def mirror_y(self):
if self._mirror_y is None:
self._mirror_y = QuadScheme3D(
[self.points[0], 1 - self.points[1], self.points[2]],
self.weights)
return self._mirror_y
def mirror_z(self):
if self._mirror_z is None:
self._mirror_z = QuadScheme3D(
[self.points[0], self.points[1], 1 - self.points[2]],
self.weights)
return self._mirror_z
class ProductScheme3D(QuadScheme3D):
def __init__(self, scheme_x):
scheme_y = scheme_z = scheme_x
assert isinstance(scheme_x, QuadScheme1D) and isinstance(
scheme_y, QuadScheme1D)
points_xy = np.array([
np.repeat(scheme_x.points, len(scheme_y.points)),
np.tile(scheme_y.points, len(scheme_x.points))
])
points = np.vstack([
np.repeat(points_xy, len(scheme_z.points), axis=1),
np.tile(scheme_z.points, points_xy.shape[1])
])
weights = np.kron(np.kron(scheme_x.weights, scheme_y.weights),
scheme_z.weights)
super().__init__(points=points, weights=weights)
class DuffySchemeIdentical3D(QuadScheme3D):
""" Duffy scheme for unit cube having singularies of the form
log[(x − y)^2 + z^2]. """
def __init__(self, scheme3d, symmetric_xy):
assert isinstance(scheme3d, QuadScheme3D)
x = scheme3d.points[0]
y = scheme3d.points[1]
z = scheme3d.points[2]
T1 = [x, x * (1 - y), x * y * z]
T2 = [x * (1 - y + y * z), x * y * z, x]
T3 = [x, x * (1 - y * z), x * y]
T4 = [x * (1 - y), x, x * y * z]
T5 = [x * y * z, x * (1 - y + y * z), x]
T6 = [x * (1 - y * z), x, x * y]
if symmetric_xy:
points = np.hstack([T1, T2, T3])
weights = 2 * np.tile(scheme3d.weights * x**2 * y, 3)
else:
points = np.hstack([T1, T2, T3, T4, T5, T6])
weights = np.tile(scheme3d.weights * x**2 * y, 6)
super().__init__(points=points, weights=weights)
class DuffySchemeTouch3D(QuadScheme3D):
""" Duffy scheme for unit cube having singularies of the form
log[(x + y)^2 + z^2] or log[(x^2 + (y+z)^2. """
def __init__(self, scheme3d):
assert isinstance(scheme3d, QuadScheme3D)
x = scheme3d.points[0]
y = scheme3d.points[1]
z = scheme3d.points[2]
P1 = [x * y, y, z * y]
P2 = [y, x * y, z * y]
P3 = [x * y, z * y, y]
points = np.hstack([P1, P2, P3])
weights = np.tile(scheme3d.weights * y**2, 3)
super().__init__(points=points, weights=weights)
| true |
00e542545974f3d446c6c5f2d23fce4926c7218b | Python | b-oppon-work/Python-Training | /wk4sem1/worksheet4-Q2.py | UTF-8 | 484 | 4.4375 | 4 | [] | no_license | # 2.Write a function that prompts students for how many credits they have.
# Print whether or not they have enough credits for graduation (At UoW 360 credits are needed for graduation).
def students_credit_checker(score):
if (score >= 360 ):
print("Congrats, you made it")
else :
print("Sorry you couldn't make the score to graduate ")
def main():
student_input = float(input("Please enter credit: "))
students_credit_checker(student_input)
main()
| true |
b5eedb33669209ad106e6023147f7f6a8e6d3630 | Python | daniel-reich/ubiquitous-fiesta | /mwGt38m3Q3KcsSaPY_23.py | UTF-8 | 56 | 3.015625 | 3 | [] | no_license |
def increment_items(lst):
return [i+1 for i in lst]
| true |
8bce8f58d2959e3a619053447e76a8eef85b2357 | Python | shivkumarsah/python-code-test | /Solution_ShivKumarSah_5years.py | UTF-8 | 1,766 | 3.46875 | 3 | [] | no_license | #! /usr/bin/python
# -*- encoding: ASCII -*-
# Author - Shiv Kumar Sah
# Date - 16 June 2014
# Company max share price calculation from CSV file
# Program takes CSV file as STDIN for data
# Output
# <company>:<year>:<march>
import sys # Sys for stdinp , stdout
import csv # CSV parser
def read_csv_convert_tuples(input_csv=False):
""" Read and convert to flat tuples """
# Convert the datastructue into tuple list ,
# e.g [(year, 1990, 1991), (month, jan, feb), (compA,10,20), (compB,20,30)]
if not input_csv:
input_csv = csv.reader(sys.stdin, delimiter=',', quoting=csv.QUOTE_NONE)
else:
input_csv = input_csv.strip().split('\n')
input_csv = [i.split(',') for i in input_csv]
flat_tuples = zip(*[map(lambda x: x.strip(),row) for row in input_csv])
return flat_tuples
def get_company_max_shares_time(flat_tuples):
""" Use the flat tuple DS to get maximum share value and the corresponding
time values """
output = []
for company_stats in flat_tuples[2:]: # Company details start from 2nd ele
share_values = list(map (int, company_stats[1:]))
time_index = share_values.index(max(share_values))
# Find this time index in year and month tuple
output.append(":".join([company_stats[0],flat_tuples[0][time_index+1], flat_tuples[1][time_index+1]]))
output = '\n'.join(output)
return output
def main(input_csv=False): # Can take a input_csv importing modules
try:
flat_tuples = read_csv_convert_tuples(input_csv)
return get_company_max_shares_time(flat_tuples)
except:
return "[Critical]: Something wrong in CSV format. Plese verify."
if __name__ == "__main__":
print main() # Drive the startup
| true |
fb7239b42b3c6e95e97636f86df6f256b358bdea | Python | lyj-cooyun/CodingGame | /ColorFightAI/colorfight.py | UTF-8 | 10,164 | 2.5625 | 3 | [
"MIT",
"GPL-3.0-only"
] | permissive | import requests
import json
import os
import random
import threading
hostUrl = 'https://g.fallin.dev/'
def CheckToken(token):
headers = {'content-type': 'application/json'}
r = requests.post(hostUrl + 'checktoken', data=json.dumps({'token':token}), headers = headers)
if r.status_code == 200:
return r.json()
return None
class Cell:
def __init__(self, cellData):
self.owner = cellData['o']
self.attacker = cellData['a']
self.isTaking = cellData['c'] == 1
self.x = cellData['x']
self.y = cellData['y']
self.occupyTime = cellData['ot']
self.attackTime = cellData['at']
self.takeTime = cellData['t']
self.finishTime = cellData['f']
self.cellType = cellData['ct']
self.buildType = cellData['b']
self.isBase = cellData['b'] == "base"
self.isBuilding = cellData['bf'] == False
self.buildTime = cellData['bt']
def __repr__(self):
s = ""
s += "({x}, {y}), owner is {owner}\n".format(x = self.x, y = self.y, owner = self.owner)
if self.isTaking:
s += "Cell is being attacked\n"
s += "Attacker is {attacker}\n".format(attacker = self.attacker)
s += "Attack time is {atkTime}\n".format(atkTime = self.attackTime)
s += "Finish time is {finishTime}\n".format(finishTime = self.finishTime)
else:
s += "Cell is not being attacked\n"
s += "Cell is occupied at {occupyTime}\n".format(occupyTime = self.occupyTime)
s += "Take time is {takeTime}\n".format(takeTime = self.takeTime)
return s
class User:
def __init__(self, userData):
self.id = userData['id']
self.name = userData['name']
self.cdTime = userData['cd_time']
self.buildCdTime = userData['build_cd_time']
self.cellNum = userData['cell_num']
self.baseNum = userData['base_num']
self.goldCellNum = userData['gold_cell_num']
self.energyCellNum = userData['energy_cell_num']
if 'energy' in userData:
self.energy = userData['energy']
if 'gold' in userData:
self.gold = userData['gold']
def __repr__(self):
return "uid: {}\nname: {}\ncd time: {}\ncell number: {}\n".format(self.id, self.name, self.cdTime, self.cellNum)
class Game:
def __init__(self):
self.data = None
self.token = ''
self.name = ''
self.uid = -1
self.endTime = 0
self.joinEndTime = 0
self.gameId = 0
self.users = []
self.cellNum = 0
self.baseNum = 0
self.goldCellNum = 0
self.energyCellNum = 0
self.cdTime = 0
self.buildCdTime = 0
self.energy = 0
self.gold = 0
self.gameVersion = ''
self.Refresh()
def JoinGame(self, name, password = None, force = False, host = None):
if type(name) != str:
print("Your name has to be a string!")
return False
if host != None:
global hostUrl
hostUrl = host
if force == False and os.path.isfile('token'):
with open('token') as f:
self.token = f.readline().strip()
data = CheckToken(self.token)
if data != None:
if name == data['name']:
self.name = data['name']
self.uid = data['uid']
return True
headers = {'content-type': 'application/json'}
data = {'name':name}
if password != None:
data['password'] = password
r = requests.post(hostUrl + 'joingame', data=json.dumps(data), headers = headers)
if r.status_code == 200:
data = r.json()
with open('token', 'w') as f:
f.write(data['token'] + '\n')
self.token = data['token']
self.uid = data['uid']
self.data = None
self.Refresh()
else:
return False
return True
def AttackCell(self, x, y, boost = False):
if self.token != '':
headers = {'content-type': 'application/json'}
r = requests.post(hostUrl + 'attack', data=json.dumps({'cellx':x, 'celly':y, 'boost': boost, 'token':self.token}), headers = headers)
if r.status_code == 200:
data = r.json()
if data['err_code'] == 0:
return True, None, None
else:
return False, data['err_code'], data['err_msg']
else:
return False, None, "Server did not return correctly"
else:
return False, None, "You need to join the game first!"
def BuildBase(self, x, y):
if self.token != '':
headers = {'content-type': 'application/json'}
r = requests.post(hostUrl + 'buildbase', data=json.dumps({'cellx':x, 'celly':y, 'token':self.token}), headers = headers)
if r.status_code == 200:
data = r.json()
if data['err_code'] == 0:
return True, None, None
else:
return False, data['err_code'], data['err_msg']
else:
return False, None, "Server did not return correctly, status_code ", r.status_code
else:
return False, None, "You need to join the game first!"
def Blast(self, x, y, direction):
if self.token != '':
if direction not in ["square", "vertical", "horizontal"]:
return False, None, "Wrong direction!"
headers = {'content-type': 'application/json'}
r = requests.post(hostUrl + 'blast', data=json.dumps({'cellx':x, 'celly':y, 'token':self.token, 'direction':direction}), headers = headers)
if r.status_code == 200:
data = r.json()
if data['err_code'] == 0:
return True, None, None
else:
return False, data['err_code'], data['err_msg']
else:
return False, None, "Server did not return correctly, status_code ", r.status_code
else:
return False, None, "You need to join the game first!"
def MultiAttack(self, x, y):
if self.token != '':
headers = {'content-type': 'application/json'}
r = requests.post(hostUrl + 'multiattack', data=json.dumps({'cellx':x, 'celly':y, 'token':self.token}), headers = headers)
if r.status_code == 200:
data = r.json()
if data['err_code'] == 0:
return True, None, None
else:
return False, data['err_code'], data['err_msg']
else:
return False, None, "Server did not return correctly, status_code ", r.status_code
else:
return False, None, "You need to join the game first!"
def GetCell(self,x,y):
if 0 <= x < self.width and 0 <= y < self.height:
c = Cell(self.data['cells'][x+y*self.width])
return c
return None
def GetTakeTimeEq(self, timeDiff):
if timeDiff <= 0:
return 33
return 30*(2**(-timeDiff/30.0))+3
def RefreshUsers(self, usersData):
self.users = []
for userData in usersData:
u = User(userData)
self.users.append(u)
if u.id == self.uid:
self.gold = u.gold
self.energy = u.energy
self.cdTime = u.cdTime
self.buildCdTime = u.buildCdTime
self.cellNum = u.cellNum
self.baseNum = u.baseNum
self.goldCellNum = u.goldCellNum
self.energyCellNum = u.energyCellNum
self.users.sort(key = lambda x: x.cellNum, reverse = True)
def Refresh(self):
headers = {'content-type': 'application/json'}
if self.data == None:
r = requests.post(hostUrl + 'getgameinfo', data=json.dumps({"protocol":2}), headers = headers)
if r.status_code == 200:
self.data = r.json()
self.width = self.data['info']['width']
self.height = self.data['info']['height']
self.currTime = self.data['info']['time']
self.endTime = self.data['info']['end_time']
self.joinEndTime = self.data['info']['join_end_time']
self.gameId = self.data['info']['game_id']
self.lastUpdate = self.currTime
self.RefreshUsers(self.data['users'])
else:
return False
else:
r = requests.post(hostUrl + 'getgameinfo', data=json.dumps({"protocol":1, "timeAfter":self.lastUpdate}), headers = headers)
if r.status_code == 200:
d = r.json()
self.data['info'] = d['info']
self.data['users'] = d['users']
self.width = d['info']['width']
self.height = d['info']['height']
self.currTime = d['info']['time']
self.endTime = self.data['info']['end_time']
self.joinEndTime = self.data['info']['join_end_time']
self.gameId = self.data['info']['game_id']
self.lastUpdate = self.currTime
self.RefreshUsers(self.data['users'])
for c in d['cells']:
cid = c['x'] + c['y']*self.width
self.data['cells'][cid] = c
for cell in self.data['cells']:
if cell['c'] == 1:
cell['t'] = -1
else:
if cell['o'] == 0:
cell['t'] = 2;
else:
cell['t'] = self.GetTakeTimeEq(self.currTime - cell['ot'])
else:
return False
return True
| true |
7e396a30ab4ad79c5416960adb1d551f49d7d184 | Python | 23b00t/chatbot | /chatbot.py | UTF-8 | 1,137 | 3.359375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import random
def chatbot():
zufallsantworten=["Oh, wirklich", "Interessant ...", "Das kann man so sehen", "Ich verstehe ..."]
reaktionsantworten = {"hallo": "Hallo du!",
"geht": "Was verstehst du darunter?",
"essen": "Ich habe leider keinen Geschmackssinn :(",
"spaß": "Klingt gut!"
}
print("Willkommen beim Chatbot")
print("Worüber würdest du gerne heute sprechen?")
print("Zum Beenden einfach 'bye' eintippen")
print("")
nutzereingabe = ''
while nutzereingabe != 'bye':
nutzereingabe = ''
while nutzereingabe == '':
nutzereingabe = input('Deine Frage/Antwort: ')
nutzereingabe = nutzereingabe.lower()
nutzerwoerter = nutzereingabe.split()
intelligenteAntwort = False
for einzelwoerter in nutzerwoerter:
if einzelwoerter in reaktionsantworten:
print(reaktionsantworten[einzelwoerter])
intelligenteAntwort = True
if intelligenteAntwort == False and nutzereingabe != 'bye':
print(random.choice(zufallsantworten))
print('')
print("Einen schönen Tag wünsche ich Dir. Bis zum nächsten Mal!")
if __name__ == "__chatbot__":
chatbot()
| true |
300bf60f010cee662e297dac5d93b8f62ab4cef5 | Python | ni/nixnet-python | /nixnet/system/_collection.py | UTF-8 | 1,449 | 2.734375 | 3 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
from collections.abc import Iterable, Sized # python 3.3+
except ImportError:
from collections import Iterable, Sized # python 2.7
import typing # NOQA: F401
from nixnet import _cprops
class SystemCollection(Iterable, Sized):
"""Collection of System related objects."""
def __init__(self, handle, prop_id, factory):
# type: (int, int, typing.Any) -> None
self._handle = handle
self._prop_id = prop_id
self._factory = factory
def __repr__(self):
return '{}(handle={})'.format(type(self).__name__, self._handle)
def __eq__(self, other):
if isinstance(other, self.__class__):
sys_other = typing.cast(SystemCollection, other)
return self._handle == sys_other._handle and self._prop_id == sys_other._prop_id
else:
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
else:
return not result
def __hash__(self):
return hash(self._handle)
def __len__(self):
return _cprops.get_session_ref_array_len(self._handle, self._prop_id)
def __iter__(self):
for ref in _cprops.get_session_ref_array(self._handle, self._prop_id):
yield self._factory(ref)
| true |
f6c4ffbaf2260be3c0bb7d32f4de28c6bab2d8e6 | Python | gitGUAP/Sem5 | /MathematicalPackages/МППИван/lab1IvanPy.py | UTF-8 | 753 | 3.421875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import csv
with open("tsv.tsv") as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter = "\t")
for line in tsvreader:
tsv = line
a = float(tsv[0])
b = float(tsv[1])
c = float(tsv[2])
print(a,b,c)
delta = 0.01
x1 = np.arange(-10.0, -1.0, delta)
y1 = 1.0/(a * np.log(np.absolute(b*x1 +c)))
x2 = np.arange(-1.0 + delta, 1.0, delta)
y2 = 1.0/(a * np.log(np.absolute(b*x2 +c)))
x3 = np.arange(1.0 + delta, 10.0, delta)
y3 = 1.0/(a * np.log(np.absolute(b*x3 +c)))
f = plt.figure()
plt.plot(x1, y1, 'g', x2, y2, 'g', x3, y3, 'g')
plt.title('Lab 1')
plt.ylabel('y')
plt.xlabel('x')
#plt.text(3, 50, r'$y = 1/(a*\ln(abs(b*x+c)))$', fontsize = 13, color = 'red')
plt.grid(True)
plt.show() | true |
cb1a310f54829870180bee77485ec6b78173be2f | Python | daniel2012600/indicator | /indicator/library/download_gcs_file.py | UTF-8 | 604 | 2.625 | 3 | [] | no_license | from google.cloud import storage
import os
class gcs_download:
_storage_client = None
def __init__(self, jsonkey):
self._storage_client = storage.Client.from_service_account_json(
jsonkey)
def download_file(self, bucket_name, fpath, destination_name):
if len(fpath) == 0:
return ''
if os.path.isfile(destination_name):
return destination_name
bucket = self._storage_client.get_bucket(bucket_name)
blob = bucket.blob(fpath)
blob.download_to_filename(destination_name)
return destination_name
| true |
87e87abc6bcedda29a349fb945fd45541e8a681a | Python | AirborneRON/Python- | /chatbot/chatBot.py | UTF-8 | 1,980 | 3.625 | 4 | [] | no_license | file = open("stop_words")
stopList = file.read().split("\n")
file.close()
# how to open up my plain text file, then create a variable to stuff the read file into
#seperating each element of the list by the return key
#then close
# all responses should start with a space before typing
print(" Hello ")
response = raw_input(" what is your name ?")
words = response.split(" ")
for nextWord in words:
if nextWord not in stopList:
response = response.replace(nextWord, "")
print("Well hello" +" " +nextWord)
#because of how my stopList was formatted ive had to use the split function which has conflicted
#with the String
#print ("line 21" + nextWord)
response = raw_input ("how lovely to meet you")
if (response == "my names aaron"):
print("how is that pronounced if you dont mind me asking ? ")
response = raw_input( " Interesting name btw, my names Mac")
if (response == " nice to meet you"):
print("likewise")
response = raw_input (" where are you from originally ? ")
if (response == "im from cornwall originally"):
print("oh I hear its beautiful down those parts")
#if (response == "")
response = raw_input("is there anywhere you'd want to go for a coffee there ?")
if (response == " yes"):
print("Great I look forward to it")
elif(response == " no"):
print("sod you then" + " i'll go by myself")
response = raw_input("anyways, so how old are you ?")
if (response == " 18"):
print(" not as old as me then ")
elif (response == " 23"):
print("same age as me then")
response = raw_input(" whats your favourite colour ?")
if (response == "blue"):
print("thats mine too")
elif(response == "red"):
print("red is sick" + " but unfortunetly we must end this conversation" )
elif(response == "yellow"):
print ("yellows pretty cool too " + " anyways i really must be off TTFN")
else: print("im not a fan of that colour" + "and on that note good day to you sir")
| true |
67450b9347ee8fdd27f8bc76607f35e9bb441c8c | Python | CodeMaxx/CS386-AI-Lab | /practice/lab9/rollno_lab5/kmeans.py | UTF-8 | 13,943 | 3.28125 | 3 | [] | no_license | from math import *
import random
from copy import deepcopy
import numpy as np
def argmin(values):
return min(enumerate(values), key=lambda x: x[1])[0]
def avg(values):
return float(sum(values))/len(values)
def readfile(filename):
'''
File format: Each line contains a comma separated list of real numbers, representing a single point.
Returns a list of N points, where each point is a d-tuple.
'''
data = []
with open(filename, 'r') as f:
data = f.readlines()
data = [tuple(map(float, line.split(','))) for line in data]
return data
def writefile(filename, means):
'''
means: list of tuples
Writes the means, one per line, into the file.
'''
if filename == None: return
with open(filename, 'w') as f:
for m in means:
f.write(','.join(map(str, m)) + '\n')
print('Written means to file ' + filename)
def distance_euclidean(p1, p2):
'''
p1: tuple: 1st point
p2: tuple: 2nd point
Returns the Euclidean distance b/w the two points.
'''
distance = None
# TODO [task1]:
# Your function must work for all sized tuples.
dist = [(x1-x2)**2 for x1, x2 in zip(p1, p2)]
distance = sqrt(sum(dist))
########################################
return distance
def distance_manhattan(p1, p2):
'''
p1: tuple: 1st point
p2: tuple: 2nd point
Returns the Manhattan distance b/w the two points.
'''
# k-means uses the Euclidean distance.
# Changing the distant metric leads to variants which can be more/less robust to outliers,
# and have different cluster densities. Doing this however, can sometimes lead to divergence!
distance = None
# TODO [task1]:
# Your function must work for all sized tuples.
distance = sum([abs(x1-x2) for x1, x2 in zip(p1, p2)])
########################################
return distance
def initialization_forgy(data, k):
'''
data: list of tuples: the list of data points
k: int: the number of cluster means to return
Returns a list of tuples, representing the cluster means
'''
means = []
# TODO [task1]:
# Use the Forgy algorithm to initialize k cluster means.
means = np.random.choice(data, size=(k))
########################################
assert len(means) == k
return means
def initialization_randompartition(data, distance, k):
'''
data: list of tuples: the list of data points
distance: callable: a function implementing the distance metric to use
k: int: the number of cluster means to return
Returns a list of tuples, representing the cluster means
'''
means = []
# TODO [task3]:
# Use the randompartition algorithm to initialize k cluster means.
# Make sure you use the distance function given as parameter.
labels = list(np.random.randint(k, size=(len(data))))
# NOTE: Provide extensive comments with your code.
means = [tuple(0 for i in range(len(data[0])))] * k
counts = [labels.count(i) for i in range(k)]
for tup, lab in zip(data, labels):
means[lab] += tup
for i in range(k):
means[i] = map(lambda x: x/counts[i], means[i])
########################################
assert len(means) == k
return means
def initialization_kmeansplusplus(data, distance, k):
'''
data: list of tuples: the list of data points
distance: callable: a function implementing the distance metric to use
k: int: the number of cluster means to return
Returns a list of tuples, representing the cluster means
'''
means = []
# TODO [task3]:
# Use the kmeans++ algorithm to initialize k cluster means.
# Make sure you use the distance function given as parameter.
# NOTE: Provide extensive comments with your code.
########################################
assert len(means) == k
return means
def iteration_one(data, means, distance):
'''
data: list of tuples: the list of data points
means: list of tuples: the current cluster centers
distance: callable: function implementing the distance metric to use
Returns a list of tuples, representing the new cluster means after 1 iteration of k-means clustering algorithm.
'''
new_means = []
k = len(means)
dimension = len(data[0])
# TODO [task1]:
# You must find the new cluster means.
# Perform just 1 iteration (assignment+updation)
new_means = [tuple(0 for i in range(dimension))] * k
counts = [0.0] * k
for point in data:
closest = 0
min_dist = float('Inf')
for i in range(k):
d = distance(point, means[i])
if d < min_dist:
min_dist = d
closest = i
new_means[closest] = tuple([sum(x) for x in zip(new_means[closest], point)])
counts[closest] += 1
for i in range(k):
# import pdb; pdb.set_trace()
new_means[i] = tuple(0.0 if t == 0 else t/counts[i] for t in new_means[i])
########################################
return new_means
def hasconverged(old_means, new_means, epsilon=1e-1):
'''
old_means: list of tuples: The cluster means found by the previous iteration
new_means: list of tuples: The cluster means found by the current iteration
Returns true iff no cluster center moved more than epsilon distance.
'''
converged = False
# TODO [task1]:
# Use Euclidean distance to measure centroid displacements.
for i in range(len(old_means)):
p = [x1-x2 > epsilon for x1, x2 in zip(old_means[i], new_means[i])]
if True in p:
return False
converged = True
########################################
return converged
def iteration_many(data, means, distance, maxiter, epsilon=1e-1):
'''
maxiter: int: Number of iterations to perform
Uses the iteration_one function.
Performs maxiter iterations of the k-means clustering algorithm, and saves the cluster means of all iterations.
Stops if convergence is reached earlier.
Returns:
all_means: list of (list of tuples): Each element of all_means is a list of the cluster means found by that iteration.
'''
all_means = []
all_means.append(means)
# TODO [task1]:
# Make sure you've implemented the iteration_one, hasconverged functions.
# Perform iterations by calling the iteration_one function multiple times.
# Stop only if convergence is reached, or if max iterations have been exhausted.
# Save the results of each iteration in all_means.
# Tip: use deepcopy() if you run into weirdness.
means_copy = deepcopy(means)
for i in range(maxiter):
new_means = iteration_one(data, means_copy, distance)
all_means.append(new_means)
if hasconverged(means_copy, new_means, epsilon):
break
means_copy = new_means
########################################
return all_means
def performance_SSE(data, means, distance):
'''
data: list of tuples: the list of data points
means: list of tuples: representing the cluster means
Returns: The Sum Squared Error of the clustering represented by means, on the data.
'''
sse = None
# TODO [task1]:
# Calculate the Sum Squared Error of the clustering represented by means, on the data.
# Make sure to use the distance metric provided.
########################################
return sse
########################################################################
## DO NOT EDIT THE FOLLWOING ##
########################################################################
import sys
import argparse
import matplotlib.pyplot as plt
from itertools import cycle
from pprint import pprint as pprint
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', dest='input', type=str, help='Required. Dataset filename')
parser.add_argument('-o', '--output', dest='output', type=str, help='Output filename')
parser.add_argument('-iter', '--iter', '--maxiter', dest='maxiter', type=int, default=10000, help='Maximum number of iterations of the k-means algorithm to perform. (may stop earlier if convergence is achieved)')
parser.add_argument('-e', '--eps', '--epsilon', dest='epsilon', type=float, default=1e-1, help='Minimum distance the cluster centroids move b/w two consecutive iterations for the algorithm to continue.')
parser.add_argument('-init', '--init', '--initialization', dest='init', type=str, default='forgy', help='The initialization algorithm to be used. {forgy, randompartition, kmeans++}')
parser.add_argument('-dist', '--dist', '--distance', dest='dist', type=str, default='euclidean', help='The distance metric to be used. {euclidean, manhattan}')
parser.add_argument('-k', '--k', dest='k', type=int, default=5, help='The number of clusters to use.')
parser.add_argument('-verbose', '--verbose', dest='verbose', type=bool, default=False, help='Turn on/off verbose.')
parser.add_argument('-seed', '--seed', dest='seed', type=int, default=0, help='The RNG seed.')
parser.add_argument('-numexperiments', '--numexperiments', dest='numexperiments', type=int, default=1, help='The number of experiments to run.')
_a = parser.parse_args()
if _a.input is None:
print('Input filename required.\n')
parser.print_help()
sys.exit(1)
args = {}
for a in vars(_a):
args[a] = getattr(_a, a)
if _a.init.lower() in ['random', 'randompartition']:
args['init'] = initialization_randompartition
elif _a.init.lower() in ['k++', 'kplusplus', 'kmeans++', 'kmeans', 'kmeansplusplus']:
args['init'] = initialization_kmeansplusplus
elif _a.init.lower() in ['forgy', 'frogy']:
args['init'] = initialization_forgy
else:
print 'Unavailable initialization function.\n'
parser.print_help()
sys.exit(1)
if _a.dist.lower() in ['manhattan', 'l1', 'median']:
args['dist'] = distance_manhattan
elif _a.dist.lower() in ['euclidean', 'euclid', 'l2']:
args['dist'] = distance_euclidean
else:
print 'Unavailable distance metric.\n'
parser.print_help()
sys.exit(1)
print '-'*40 + '\n'
print 'Arguments:'
pprint(args)
print '-'*40 + '\n'
return args
def visualize_data(data, all_means, args):
print 'Visualizing...'
means = all_means[-1]
k = args['k']
distance = args['dist']
clusters = [[] for _ in range(k)]
for point in data:
dlist = [distance(point, center) for center in means]
clusters[argmin(dlist)].append(point)
# plot each point of each cluster
colors = cycle('rgbwkcmy')
for c, points in zip(colors, clusters):
x = [p[0] for p in points]
y = [p[1] for p in points]
plt.scatter(x,y, c = c)
# plot each cluster centroid
colors = cycle('krrkgkgr')
colors = cycle('rgbkkcmy')
for c, clusterindex in zip(colors, range(k)):
x = [iteration[clusterindex][0] for iteration in all_means]
y = [iteration[clusterindex][1] for iteration in all_means]
plt.plot(x,y, '-x', c = c, linewidth='1', mew=15, ms=2)
plt.axis('equal')
plt.show()
def visualize_performance(data, all_means, distance):
errors = [performance_SSE(data, means, distance) for means in all_means]
plt.plot(range(len(all_means)), errors)
plt.title('Performance plot')
plt.xlabel('Iteration')
plt.ylabel('Sum Squared Error')
plt.show()
if __name__ == '__main__':
args = parse()
# Read data
data = readfile(args['input'])
print 'Number of points in input data: {}\n'.format(len(data))
verbose = args['verbose']
totalSSE = 0
totaliter = 0
for experiment in range(args['numexperiments']):
print 'Experiment: {}'.format(experiment+1)
random.seed(args['seed'] + experiment)
print 'Seed: {}'.format(args['seed'] + experiment)
# Initialize means
means = []
if args['init'] == initialization_forgy:
means = args['init'](data, args['k']) # Forgy doesn't need distance metric
else:
means = args['init'](data, args['dist'], args['k'])
if verbose:
print 'Means initialized to:'
print means
print ''
# Run k-means clustering
all_means = iteration_many(data, means, args['dist'], args['maxiter'], args['epsilon'])
SSE = performance_SSE(data, all_means[-1], args['dist'])
totalSSE += SSE
totaliter += len(all_means)-1
print 'Sum Squared Error: {}'.format(SSE)
print 'Number of iterations till termination: {}'.format(len(all_means)-1)
print 'Convergence achieved: {}'.format(hasconverged(all_means[-1], all_means[-2]))
if verbose:
print '\nFinal means:'
print all_means[-1]
print ''
print '\n\nAverage SSE: {}'.format(float(totalSSE)/args['numexperiments'])
print 'Average number of iterations: {}'.format(float(totaliter)/args['numexperiments'])
if args['numexperiments'] == 1:
# save the result
writefile(args['output'], all_means[-1])
# If the data is 2-d and small, visualize it.
if len(data) < 5000 and len(data[0]) == 2:
visualize_data(data, all_means, args)
visualize_performance(data, all_means, args['dist'])
| true |
f393e291b964045173d745cff81ee2fd6e2824f7 | Python | marnixvds/ucaccmet2j_python | /precipitation_calculations_MvdS.py | UTF-8 | 3,213 | 3.609375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
import json
#Load JSON file into a list of dictionaries
with open('precipitation.json') as file:
precipitation_data = json.load(file)
#Make a dictionary with stations from CSV file
with open('stations.csv') as file:
stations = {}
headers = file.readline()
for line in file:
(Location, State, Station) = line.strip().split(',')
stations[Location] = {'State' : State, 'Station' : Station}
result_dictionary = {} #Create a dict for final results
#Calculate the yearly precipitation in the entire country --> Values are added in the most nested loop, to make sure everything is included.
total_country_precipitation = 0
#Create a loop to go over all the weather stations
for chosen_station in stations.keys():
#Define weather station code of interest and create an empty list for the filtered data
state = stations[chosen_station]['State'] #State code
station_code = stations[chosen_station]['Station'] #Station code
station_data = [] #Empty list to add station data
#Filter out all the entries for the station of interest, and add them to the newly created list
for item in range(len(precipitation_data)):
if precipitation_data[item]['station'] == station_code:
station_data.append(precipitation_data[item])
#Organise data per month
precipitation_per_month = [] #New data list for all the months [Jan, Feb, Mar...]
for month in range(1, 13): #Loop over the 12 months, sum up all values for that month
month_data = 0
for measurement in range(len(station_data)): # Check for all entries whether they correspond to the 'current' month
if '2010-'+str(month).zfill(2) in station_data[measurement]['date']:
month_data += station_data[measurement]['value'] #Sum up all the values for this month
total_country_precipitation += station_data[measurement]['value'] #Also add the value to the total country rain count
precipitation_per_month.append(month_data) #Add the sum for this month to the month list.
#Calculate sum of precipitation for the whole year for this station
precipitation_whole_year = sum(precipitation_per_month)
#Calculate relative precipitation (% compared to whole year)
relative_monthly_precipitation =[]
for month in range(0, 12):
relative_monthly_precipitation.append((precipitation_per_month[month]/precipitation_whole_year)*100)
#Calculate the relative precipitation of this state
relative_state_precipitation = (precipitation_whole_year/total_country_precipitation)*100
#Add all the results the final dictionary
result_dictionary[chosen_station] = {
'station': station_code,
'state': state,
'totalMonthlyPrecipitation': precipitation_per_month,
'relativeMonthlyPrecipitation': relative_monthly_precipitation,
'totalYearlyPrecipitation': precipitation_whole_year,
'relativeYearlyPrecipitation': relative_state_precipitation
}
with open('result.json', 'w') as file: #Export the final data to a JSON file
json.dump(result_dictionary, file, indent=4) | true |
921e93f3d03ab704f6db0927c0533dda0cce1ff6 | Python | trinhvanson1997/rasa | /train_nlu.py | UTF-8 | 630 | 2.546875 | 3 | [] | no_license | from rasa_nlu import config
from rasa_nlu.model import Interpreter
from rasa_nlu.model import Trainer
from rasa_nlu.training_data import load_data
def train (data, config_file, model_dir):
training_data = load_data(data)
trainer = Trainer(config.load(config_file))
trainer.train(training_data,num_threads=3)
trainer.persist(model_dir, fixed_model_name='chat')
train('data/nlu.md', 'nlu_config.yml', 'models/nlu')
interpreter = Interpreter.load('./models/nlu/default/chat')
# define function to ask question
def ask_question(text):
print(interpreter.parse(text))
ask_question('hello em')
ask_question('hi') | true |
ce4b685cf551097cfe75546f71775acb8b1e693e | Python | rmhsilva/CS110-Assignments-Python | /week02/rainbow.py | UTF-8 | 1,149 | 3.25 | 3 | [
"MIT"
] | permissive | import turtle
from turtle import *
wn = turtle.Screen()
def HSB2RGB(hues):
hues = hues * 3.59 #100转成359范围
rgb=[0.0,0.0,0.0]
i = int(hues/60)%6
f = hues/60 -i
if i == 0:
rgb[0] = 1; rgb[1] = f; rgb[2] = 0
elif i == 1:
rgb[0] = 1-f; rgb[1] = 1; rgb[2] = 0
elif i == 2:
rgb[0] = 0; rgb[1] = 1; rgb[2] = f
elif i == 3:
rgb[0] = 0; rgb[1] = 1-f; rgb[2] = 1
elif i == 4:
rgb[0] = f; rgb[1] = 0; rgb[2] = 1
elif i == 5:
rgb[0] = 1; rgb[1] = 0; rgb[2] = 1-f
return rgb
def rainbow():
hues = 0.0
color(1,0,0) #绘制彩虹
hideturtle() #隐藏乌龟
speed(5)
pensize(3)
penup()
goto(-650,-150)
pendown()
right(110)
for i in range (100):
circle(600) #圆的半径600
right(0.23)
hues = hues + 1
rgb = HSB2RGB(hues)
color(rgb[0],rgb[1],rgb[2])
penup()
def main():
setup(1200, 800, 0, 0)
bgcolor((64/255, 64/255, 1))
tracer(False)
rainbow() #输出文字
tracer(False)
goto(0,0)
pendown()
color('yellow')
write("彩虹",align="center", font=("Script MT Bold", 80, "bold"))
tracer(True)
mainloop()
if __name__ == "__main__":
main()
| true |
233f18c1bf333e12ba3520ee88fb382145047e57 | Python | ju-sh/abbrv.jabref.org | /combineJournalLists.py | UTF-8 | 874 | 3.21875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0"
] | permissive | #!/usr/bin/python
# Python script for combining several journal abbreviation lists
# and producing an alphabetically sorted list. If the same journal
# names are repeated, only the version found last is retained.
#
# Usage: combineJournalLists.py outfile infile1 infile2 ...
import sys
import fnmatch
import os
outFile = sys.argv[1]
dictionary = dict()
for i in range(2,len(sys.argv)):
count = 0
f = open(sys.argv[i], "r")
for line in f:
if "=" in line:
if line[0] != "#":
count = count+1
parts = line.partition("=")
dictionary[parts[0].strip()] = line.strip()
f.close()
print sys.argv[i]+": "+str(count)
print "Combined key count: "+str(len(dictionary))
f = open(outFile, "w")
for key in sorted(dictionary.iterkeys()):
f.write(dictionary[key]+"\n")
f.close()
| true |
99f3abddd4f5c00e310663adb772fe52203b0198 | Python | Namrata96/automatic-essay-and-grammar-scoring | /lstm_codefiles/lstm_classify_batch_test.py | UTF-8 | 5,755 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 13:57:51 2018
@author: nam
"""
from keras import backend as K
from keras import losses
from keras.optimizers import RMSprop, SGD, Adam
from keras.models import Model
from keras.layers import Input, Embedding, Dense, Bidirectional, LSTM,Dropout
import pickle, csv
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
from sklearn.metrics import confusion_matrix
VOCAB_SIZE = 44
DROPOUT_RATE = 0.5
EMBEDDING_SIZE = 45
LEARNING_RATE = 1e-2
NUMBER_EPOCHS = 3000
BATCH_SIZE = 10
ESSAY_SIZE = 300
filep = open('/home/nam/Desktop/AspiringMinds/dataset/num_to_word.txt','r')
num_to_word = pickle.load(filep)
filep.close()
def convert_to_words(m):
m = m[0]
a = list()
for i, word_val in enumerate(m):
if word_val == 0:
a.append('pad')
else:
a.append(num_to_word[word_val])
# print a
return a
# This function determines the number of unique tokens in the dataset.
def determine_vocab_size(corpus):
vocab = set()
for essay in corpus:
essay = essay.split()
for word in essay:
vocab.add(word)
global VOCAB_SIZE
VOCAB_SIZE = len(vocab)
embedding_weights = list()
for i in range(VOCAB_SIZE+1):
class_weights = list()
for j in range(VOCAB_SIZE+1):
if j>0 and j==i:
class_weights.append(1)
else:
class_weights.append(0)
embedding_weights.append(class_weights)
embedding_weights = np.asarray(embedding_weights)
# Creating the model
def lstm_model():
essay = Input(shape=(ESSAY_SIZE,), dtype='float32', name='essay')
embedding_layer = Embedding(output_dim=EMBEDDING_SIZE, input_dim=VOCAB_SIZE+1, input_length=ESSAY_SIZE, weights=[embedding_weights], name='embedding_layer', trainable=False)
essay_embedded = embedding_layer(essay)
first_lstm_layer = Bidirectional(LSTM(10,return_sequences=True, name='first_lstm'), merge_mode='concat')
temp_out_1 = first_lstm_layer(essay_embedded)
dropout_layer = Dropout(DROPOUT_RATE,name='first_dropout_layer')
first_lstm_out = dropout_layer(temp_out_1)
second_lstm_layer = Bidirectional(LSTM(10, name='first_lstm'), merge_mode='concat')
temp_out_2 = second_lstm_layer(first_lstm_out)
dropout_layer = Dropout(DROPOUT_RATE,name='second_dropout_layer')
second_lstm_out = dropout_layer(temp_out_2)
dense_layer = Dense(7, name='output_layer',activation='softmax')
out = dense_layer(second_lstm_out)
model = Model(inputs=essay, outputs=out)
return model
# This function takes mean of all essay lengths
def calculate_mean(X):
total = len(X)
length = 0
for essay in X:
length = length + len(essay)
mean = length/total
return mean
# Loading training data and scores - padding and truncating shorter and larger essays
def pad_and_truncate():
fp = open('/home/nam/Desktop/AspiringMinds/lstm_datafiles/lstm_test_data.txt','rb')
X = []
while True:
try:
X.append(pickle.load(fp))
except EOFError:
break
wp = open('/home/nam/Desktop/AspiringMinds/lstm_datafiles/lstm_test_scores.txt', 'rb')
y = []
while True:
try:
y.append(np.round(pickle.load(wp)))
except EOFError:
break
fp.close()
wp.close()
mean = calculate_mean(X) # use this to calculate the essay size
global ESSAY_SIZE
ESSAY_SIZE = mean
X_pt = []
X_pt = pad_sequences(X,maxlen=292,truncating='post',dtype='float64')
return X_pt, np.asarray(y,dtype='float64')
X_p, y = pad_and_truncate()
model = lstm_model()
rmsprop = RMSprop(lr=LEARNING_RATE, rho=0.9, epsilon=1e-7, decay=0.0)
model.load_weights("/home/nam/Desktop/AspiringMinds/lstm_datafiles/duplicate_data/duplicate_class_batch_train_weights_36.hdf5")
model.compile(optimizer = rmsprop, loss='mse', metrics=['accuracy'])
wp = open('/home/nam/Desktop/AspiringMinds/lstm_datafiles/duplicate_data/test_essays_results_classify_batch_more_dup.csv', 'w')
csvwriter = csv.writer(wp, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(['essay'])
fp = open('/home/nam/Desktop/AspiringMinds/lstm_datafiles/duplicate_data/test_metrics_results_classify_batch_more_dup.csv', 'w')
csvwriter2 = csv.writer(fp, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter2.writerow(['actual_score','predicted_score','loss'])
total_sum_mse = 0
total_sum = 0
total_sum_abs = 0
# Evaluating on entire test set
y_pred = model.predict(X_p)
metrics = model.evaluate(X_p,to_categorical(y,num_classes=7))
print model.metrics_names
print metrics
cm = confusion_matrix(y , y_pred.argmax(axis=1))
print cm
# For seeing the distribution of essays in each class
#import pandas as pd
#df=pd.Series(y)
#print df.value_counts()
# Evaluating each sample and storing the results
for x, score in zip(X_p,y):
print x
print score
if score<0.0:
continue
x = np.array(x)
x = x.reshape(1, x.shape[0])
score = np.array(score)
score = score.reshape(1)
metrics = model.evaluate(x,to_categorical(score, num_classes=7))
y_pred = model.predict(x)
y_pred = np.argmax(y_pred,axis=1)
m = convert_to_words(x)
total_sum_mse = total_sum_mse + metrics[0]
total_sum = total_sum + (score-y_pred)
total_sum_abs = total_sum_abs + abs(score-y_pred)
csvwriter.writerow([m])
csvwriter2.writerow([score,y_pred,metrics[0]])
csvwriter2.writerow(["MSE loss:", total_sum_mse])
csvwriter2.writerow(["Difference", total_sum])
csvwriter2.writerow(["AbsoluteDifference", total_sum_abs])
wp.close()
fp.close()
| true |
976cf5b5e9936162f3fa1a744f59d80cdf0517d5 | Python | kkikori/0723_analysis | /create_dummy_discussion/test.py | UTF-8 | 1,623 | 2.65625 | 3 | [] | no_license | import fetch_api
import simplejson as json
import csv
from pathlib import Path
def _csv_convert(thread_data):
posts_list = []
header = ["post_id", "sentence_id", "parent_id", "user_id", "title", "body"]
posts_list.append(header)
for post in thread_data["posts"]:
user_id = post["user"]
user_id = user_id["id"]
if post["in_reply_to"]:
rp = post["in_reply_to_id"]
rp = rp["Int64"]
title = None
else:
rp = None
title = thread_data["title"]
for s in post["sentences"]:
posts_list.append([post["id"], s["id"], rp, user_id, title, s["body"]])
return posts_list
def jp_dadta_create():
fn = Path("/Users/ida/Dropbox/AAAI/data_jp")
token = fetch_api.get_access_token("goat", "test")
# fetch_api.create_thread(token, {"title": "好きな動物", "body": "私はゴーストです.こんばんは."})
thi_list = fetch_api.get_thi_list(token)
for thi in thi_list:
thread_data = fetch_api.load_thread(token, thi)
csv_data = _csv_convert(thread_data)
fname = str(thi) + ".csv"
f_save = fn /fname
with f_save.open("w") as f:
writer = csv.writer(f,lineterminator = "\n")
writer.writerows(csv_data)
f.close()
def main():
fn = Path("/Users/ida/Dropbox/AAAI/data_jp")
token = fetch_api.get_access_token("goat", "test")
fetch_api.create_thread(token, {"title": "好きな動物", "body": "私はゴーストです.こんばんは."})
if __name__ == "__main__":
jp_dadta_create()
| true |
b51fdcaec7d34ba2da5fa62dd1fe7061160edd4e | Python | AnvarM/python_algorithms | /sort_algorithms/big_array.py | UTF-8 | 343 | 3.5625 | 4 | [] | no_license | import random
def get_big_array():
big_array = []
for i in range(499999):
big_array.append(int(random.random()*499999))
return big_array
def get_big_array_with_range_of_values():
big_array = []
for i in range(499999):
big_array.append(random.randint(0,9)) # integers will be [0..9]
return big_array
| true |
ae76abfb456dbd45746a861a93905f61e0484171 | Python | aguscerdo/183DB-phantombots | /maps/make_new_map.py | UTF-8 | 296 | 3.1875 | 3 | [] | no_license | import csv
import os
def make_map(n):
pos = [[i, j] for i in range(n) for j in range(n)]
file_name = '{0}x{0}.csv'.format(n)
with open(file_name, 'w') as file:
for t in pos:
file.write("{},{}\n".format(t[0], t[1]))
return file_name
if __name__ == '__main__':
n = 6
make_map(n) | true |
bddf968a265c35095b721ae4fe340f2bceeb1ba6 | Python | v-datnvt2/Destroyer-v1.0 | /main.py | UTF-8 | 1,741 | 3.015625 | 3 | [] | no_license | import turtle
import time
import math
import random
import os
import time
from threading import Thread
import threading
from heros import Heros
from border import Border
from monster import Monsters
from score import Score
def isCollision(t1, t2):
d = math.sqrt(math.pow(t1.xcor() - t2.xcor(), 2) + math.pow(t1.ycor() - t2.ycor(), 2))
if d < 20:
return True
else:
return False
if __name__ == "__main__":
path = os.getcwd()
screen = turtle.Screen()
screen.setup(700,700)
screen.bgpic(path + "/image/background.png")
screen.title('Destroyer v1.0')
# make Heros
player = Heros(screen)
turtle.listen()
turtle.onkey(player.turnleft, 'Left')
turtle.onkey(player.turnright,'Right')
turtle.onkey(player.accelerate,'Up')
turtle.onkey(player.deccelerate,'Down')
# make Border
border = Border()
border.draw_border()
# Compute Score
score = Score()
#Init Monsters
monsterbox = []
for i in range(random.randint(6, 12)):
monsterbox.append(Monsters(screen))
#Process
check = False
while True:
# t = threading.Thread(target = player.move, args= ())
player.move()
# t.start()
# t.join()
for qv in monsterbox:
qv.move()
# t1 = threading.Thread(target = qv.move, args= ())
# t1.start()
# t1.join()
if isCollision(player, qv):
monsterbox.remove(qv)
qv.hideturtle()
score.changeScore()
if len(monsterbox) == 0:
check = True
if check:
player.hideturtle()
score.endgame()
time.sleep(5)
break
| true |
2c992440756b5d2beda0ef1a0b4ce52b71c3bccd | Python | chuubastis/CSC121 | /Lessons/mongCSC121midterm2.py | UTF-8 | 609 | 3.765625 | 4 | [] | no_license | scores = []
scoreLen = len(scores)
for i in range (0,7):
score = int(input("Give a students test score:"))
scores. append(score)
print(scores)
scores.sort()
print("The highest score is:" ,scores[-1])
passScores =[]
i = 0
for i in scores:
if i >= 70:
passScores.append(i)
passnum = len(passScores)
print("The list of passing scores is:" ,passScores, "and the total number of those who passed is", passnum)
for i in range(0,7):
scores[i] = scores[i]+5
print("You get 5 points, and you get 5 points, EVERYONE GETS 5 POIIIIIIINTS!!!!!")
print("The new dummy-curved list is:" ,scores)
| true |
8e5570207454a5aa4f5b25bf5e08333497962c12 | Python | anupam2505/Code-Practice | /PlayerOrder.py | UTF-8 | 721 | 3.15625 | 3 | [] | no_license |
n = int (raw_input())
mat = []
for a0 in xrange(n):
S = list(str(raw_input()))
mat.append([])
mat[a0].append(a0+1)
for i in S:
mat[a0].append(i)
list = []
def matrix(mat,n, list):
ind =0
if (len(mat)==0):
return list
if (len(mat)==1):
list.append(mat[0][0])
return list
maximum =0
for i in range(len(mat)):
flag = True
pointer =0
for j in range(1, n):
if (mat[i][j]=='W'):
pointer+=1
if (maximum<pointer):
maximum = pointer
ind =i
list.append(mat[ind][0])
del mat[ind]
list = matrix(mat, n-1, list)
return list
a = matrix (mat,n, list)
print a | true |
0778ccfd4830cb34d032acaf51f4dfd764f07cb3 | Python | marlonrenzo/A01054879_1510_assignments | /A3/character.py | UTF-8 | 1,300 | 4.375 | 4 | [] | no_license | def get_character_name():
"""
Inquire the user to provide a name.
:return: a string
"""
name = input("What is your name?").capitalize()
print(f"Nice to meet you {name}\n")
return name
def create_character() -> dict:
"""
Create a dictionary including attributes to associate to a character.
:post condition: will create a character as a dictionary of attributes
:return: the information of a character as a dictionary
"""
print("\nWelcome to the Dungeon of Kather. 'Ere are where young lads, like yourself, learn to become a warrior.\n"
"It is tradition at Castor's Thrine that all younglings are placed within the walls of Kather to begin training.\n"
"The Dungeon has 25 rooms for you to traverse through and eventually escape. Go forth and slash your way through \n"
"young blood-seeker. There are monsters in this realm. Escape the dungeon with your life and you will be unstoppable.\n\n"
"Let's begin by getting to know you.........")
character = {'Name': get_character_name(), 'Alias': "You", 'HP': [10, 10], 'Inventory': [], 'position': {"x": 2, "y": 2}}
return character
# Test dictionary: {'Name': 'Marlon', 'Alias': 'You', 'HP': [10, 10], 'Inventory': [], 'position': {"x": 2, "y": 2}}
| true |
75f5d0e5d150542eaa40044f61dca5172463c1e3 | Python | ms-kim520/Coding_Study | /swea_5122_linkedlist.py | UTF-8 | 639 | 3 | 3 | [] | no_license | T=int(input())
for test_case in range(1,T+1):
N,M,L = map(int,input().split())
#N=수열의 길이, M = 추가횟수, L = 인덱스 번호
lst = list(map(int,input().split()))
for _ in range(M):
info = list(input().split())
character = info.pop(0)
info = list(map(int,info))
idx=info[0]
if character == "I":
a= [info[1]]
lst[idx:idx] = a
elif character == "D":
lst.pop(idx)
elif character == "C":
lst[idx] = info[1]
if L<len(lst):
print(f'#{test_case} {lst[L]}')
else:
print(f'#{test_case} -1')
| true |
503f113d246ca8fc41439aaf8c569e41010b8cdf | Python | e-dang/Web-Games | /tests/functional_tests/pages/home_page.py | UTF-8 | 567 | 2.859375 | 3 | [
"MIT"
] | permissive | from .base_page import BasePage
class HomePage(BasePage):
def has_correct_title(self):
return super().has_correct_title(None)
def has_correct_header(self):
return super().has_correct_header(None)
def select_game_using_cards(self, game):
id_map = {
'snake': 'snakeCard',
'sudoku': 'sudokuCard',
'tic-tac-toe': 'ticTacToeCard'
}
self.driver.find_element_by_id(id_map[game]).click()
def click_home_page(self):
self.driver.find_element_by_id('navBarHeader').click()
| true |
c940d0480aa2ab3a562acc7d72015970fbbf7d89 | Python | cassandrami/Server-Proxy | /proxy/proxy.py | UTF-8 | 5,994 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python3
import argparse
import sys
import itertools
import socket
import threading
from threading import Thread
from socket import socket as Socket
'''
class thread(threading.Thread):
def __init__(self, data):
threading.Thread.__init__(self)
self.data = data
def run(self):
'''
def main():
sys.stdout.flush()
parser = argparse.ArgumentParser()
parser.add_argument('--port', '-p', default=2080, type=int,
help='Port to use')
args = parser.parse_args()
print(args.port)
with Socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# does not work in python3 with -p
server_socket.bind(('', args.port))
server_socket.listen(0)
print("server ready")
while True:
connection_socket, address = server_socket.accept()
while (threading.activeCount() >= 20):
continue
Thread(target = http_handle, args = (connection_socket, address) ).start()
return 0
def http_handle(connection, address):
request_string = connection.recv(1024).decode('ascii')
print( request_string )
print( address )
response = ""
assert not isinstance(request_string, bytes)
host = ""
request_data = {}
try:
request_split = request_string.split("\r\n")
request_data["request_type"] = request_split[0]
for index in range(1, len(request_split)):
if (request_split[index] == ""):
continue
request_data[request_split[index].split(": ")[0]] = request_split[index].split(": ")[1]
host += request_data["request_type"].split(" ")[1]
#port_num = request_data["Host"].split(":")[1]
except:
response += "HTTP/1.0 400 Bad Request\n"
response += "Content-Type: text/html; encoding=ascii\n"
response += "Content-Length: %d\n" % len( "<html><body><h1>400: Bad Request </h1></body></html>")
response += "Connection: close\n"
response += "\n"
response += "<html><body><h1>400: Bad Request </h1></body></html>"
connection.send(response.encode("ascii"))
connection.close()
return
if (not "GET" in request_data["request_type"]):
if (not "HEAD" in request_data["request_type"] and \
not "POST" in request_data["request_type"] and \
not "PUT" in request_data["request_type"] and \
not "DELETE" in request_data["request_type"] and \
not "TRACE" in request_data["request_type"] and \
not "CONNECT" in request_data["request_type"]):
response += "HTTP/1.0 400 Bad Request\n"
response += "Content-Type: text/html; encoding=ascii\n"
response += "Content-Length: %d\n" % len( "<html><body><h1>400: Bad Request </h1></body></html>")
response += "Connection: close\n"
response += "\n"
response += "<html><body><h1>400: Bad Request </h1></body></html>"
else:
response += "HTTP/1.0 501 Not Implemented\n"
response += "Content-Type: text/html; encoding=ascii\n"
response += "Content-Length: %d\n" % len( "<html><body><h1>501: Not Implemented </h1></body></html>")
response += "Connection: close\n"
response += "\n"
response += "<html><body><h1>501: Not Implemented </h1></body></html>"
connection.send(response.encode("ascii"))
connection.close()
return
'''
removed from webserver
# try:
# new_connection = Socket(socket.AF_INET, socket.SOCK_STREAM)
# new_connection.connect((host, 80))
# new_connection.sendall("GET / HTTP/1.0\r\n\r\n")
# request_string = new_connection.recv(1024)
# new_connection.close()
#
# temp_host = host.replace("http://", "")
# if ("/" in temp_host):
# path = temp_host[temp_host.find("/")+1 ::]
# else:
# raise FileNotFoundError
#
# file = open(path, 'r')
# file_data = file.read()
# file.close()
#
# response += "HTTP/1.0 200 OK\n"
# response += "Content-Type: text/html; encoding=ascii\n"
# response += "Content-Length: %d\n" % len(file_data)
# response += "Connection: close\n"
# response += "\n"
# response += file_data
#
# except FileNotFoundError:
# response += "HTTP/1.0 404 Not Found\n"
# response += "Content-Type: text/html; encoding=ascii\n"
# response += "Content-Length: %d\n" % len( "<html><body><h1>404: Not Found</h1></body></html>")
# response += "Connection: close\n"
# response += "\n"
# response += "<html><body><h1>404: Not Found</h1></body></html>"
'''
try:
print( host )
new_connection = Socket(socket.AF_INET, socket.SOCK_STREAM)
new_connection.connect((host.replace("http://", "").replace("https://", ""), 80))
new_connection.sendall(str.encode("GET /index.html HTTP/1.0\r\nHost: %s\r\n\r\n" % host.replace("http://", "").replace("https://", "")))
request_string = new_connection.recv(1024).decode("ascii")
new_connection.close()
response += request_string
connection.send(response.encode("ascii"))
connection.close()
return
except:
response += "HTTP/1.0 400 Bad Request\n"
response += "Content-Type: text/html; encoding=ascii\n"
response += "Content-Length: %d\n" % len( "<html><body><h1>400: Bad Request </h1></body></html>")
response += "Connection: close\n"
response += "\n"
response += "<html><body><h1>400: Bad Request </h1></body></html>"
connection.send(response.encode("ascii"))
connection.close()
return
if __name__ == "__main__":
sys.exit(main())
| true |
6731b3c59aadff0bc3178867e795d8dc1d55f44d | Python | WojciechBogobowicz/UWr-Math-Students-Finder-with-SQL | /main.py | UTF-8 | 1,934 | 3.15625 | 3 | [] | no_license | import PySimpleGUI as sg
from logic import Logic
from windows import Windows
"DarkPurple"
'Topanga'
sg.theme('DarkTeal2')
l = Logic()
w = Windows()
layout = [
[sg.Button(' Aktualizuj baze '), sg.Button('Sprawdz aktualizacje ')],
[sg.Button('Znajdź wspólne grupy '), sg.Button('Zapisani na przedmiot')],
[sg.Button(' Dodaj znajomego '), sg.Button(' Exit ')],
]
window = sg.Window('Baza studentów', layout)
while True:
event, values = window.read()
print(event, values)
if event in (None, ' Exit '):
break
if event == ' Aktualizuj baze ':
login = sg.popup_get_text('Wprowadż login do USOSa:', 'Logowanie')
password = sg.popup_get_text('Wprowadż hasło do USOSa:', 'Logowanie', password_char='*')
sg.popup_ok("Baza jest aktualizowana, powiadomi, jak skonczy.", auto_close_duration=3, auto_close=True)
#sg.popup_auto_close
l.update_base(login, password)
sg.popup_ok("Aktualizowanie bazy zakończone!")
if event == 'Sprawdz aktualizacje ':
sg.popup_ok(l.update_message())
if event == 'Znajdź wspólne grupy ':
student_names = sg.popup_get_text("Podaj imię studenta", default_text="Jan Kowalski")
name, last_name = student_names.split(' ')
headers, table = l.find_student_table(name, last_name)
w.shared_groups(table, headers, name, last_name)
if event == 'Zapisani na przedmiot':
course_name = sg.popup_get_text("Wprowadź nazwę przedmiotu")
headers, table = l.find_course_table(course_name)
w.signed_for_course(table,headers,course_name)
if event == ' Dodaj znajomego ':
student_names = sg.popup_get_text("Podaj imię studenta", default_text="Jan Kowalski")
name, last_name = student_names.split(' ')
l.add_friend(name, last_name)
window.close()
l.close()
| true |
4a3c7d30e7ebe3ffdf6ff2ed69d300234bf05642 | Python | houchenAlan/Deep-Learning | /attention/test1.py | UTF-8 | 1,281 | 2.734375 | 3 | [] | no_license | from keras.datasets import imdb
from keras.preprocessing import sequence
from keras.layers import Dense,Embedding,SimpleRNN
from keras import Sequential
import matplotlib.pyplot as plt
max_feature=10000
max_len=500
batch_size=32
print('Loading data...')
(input_train,y_train),(input_test,y_test)=imdb.load_data(num_words=max_feature)
print(len(input_train),"train sequence")
print(len(input_test),"test sequence")
print("Pad sequences (samples x time)")
input_train=sequence.pad_sequences(input_train,maxlen=max_len)
input_test=sequence.pad_sequences(input_test,maxlen=max_len)
print("input_train shape:",input_train.shape)
print("input_test shape:",input_test.shape)
model=Sequential()
model.add(Embedding(max_feature,32))
model.add(SimpleRNN(32))
model.add(Dense(1,activation="sigmoid"))
model.compile(optimizer="rmsprop",loss="binary_crossentropy",metrics=['accuracy'])
history=model.fit(input_train,y_train,epochs=10,batch_size=128,validation_split=0.2)
acc=history.history["acc"]
val_acc=history.history["val_acc"]
loss=history.history["loss"]
val_loss=history.history["val_loss"]
epochs=range(1,len(acc)+1)
plt.plot(epochs,acc,'bo',label="Training acc")
plt.plot(epochs,val_acc,"b",label="Validation acc")
plt.title("Training and validation accuracy")
plt.legend()
plt.show() | true |
fcb88de2a179f1ceceb03e9a414ccbb479b88942 | Python | raunakbhupal/FingerCounting-OpenCV | /Finger_counting.py | UTF-8 | 3,160 | 2.796875 | 3 | [] | no_license | import cv2
import numpy as np
from sklearn.metrics import pairwise
background =None
acc_weight = 0.5
roi_top=20
roi_bottom=300
roi_right =300
roi_left=600
def cal_acc_weight(frame,acc_weight):
global background
if background is None:
background=frame.copy().astype('float')
return None
cv2.accumulateWeighted(frame,background,acc_weight)
def segment(frame,threshold_min=25):
diff=cv2.absdiff(background.astype('uint8'),frame)
ret,threshold=cv2.threshold(diff,threshold_min,255,cv2.THRESH_BINARY)
contours,hierarchy=cv2.findContours(threshold.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if len(contours)==0:
return None
else:
hand_segment=max(contours,key=cv2.contourArea)
return (threshold,hand_segment)
def count_fingers(thershold,hand_segment):
conv_hull=cv2.convexHull(hand_segment)
top=tuple(conv_hull[conv_hull[:,:,1].argmin()][0])
bottom=tuple(conv_hull[conv_hull[:,:,1].argmin()][0])
left=tuple(conv_hull[conv_hull[:,:,0].argmin()][0])
right=tuple(conv_hull[conv_hull[:,:,0].argmin()][0])
cX=(left[0]+right[0])//2
cY=(top[1]+bottom[1])//2
distance=pairwise.euclidean_distances([(cX,cY)],Y=[left,right,top,bottom])[0]
max_distance=distance.max()
radius=int(0.9*max_distance)
circumference=(2*np.pi*radius)
circular_roi = np.zeros(threshold.shape[:2], dtype="uint8")
cv2.circle(circular_roi, (cX, cY), radius, 255, 10)
circular_roi = cv2.bitwise_and(threshold, threshold, mask=circular_roi)
contours, hierarchy = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
count = 0
for cnt in contours:
(x, y, w, h) = cv2.boundingRect(cnt)
out_of_wrist = ((cY + (cY * 0.25)) > (y + h))
limit_points = ((circumference * 0.25) > cnt.shape[0])
if out_of_wrist and limit_points:
count += 1
return count
cam=cv2.VideoCapture(0)
num_frames=0
while True:
ret,frame=cam.read()
frame = cv2.flip(frame, 1)
frame_copy=frame.copy()
roi=frame[roi_top:roi_bottom,roi_right:roi_left]
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
if num_frames < 60:
cal_acc_weight(gray, acc_weight)
if num_frames <= 59:
cv2.putText(frame_copy, "WAIT! GETTING BACKGROUND AVG.", (200,300), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
cv2.imshow("Finger Count",frame_copy)
else:
hand=segment(gray)
if hand is not None:
threshold, hand_segment = hand
cv2.drawContours(frame_copy, [hand_segment + (roi_right, roi_top)], -1, (255, 0, 0),1)
fingers = count_fingers(threshold,hand_segment)
cv2.putText(frame_copy, str(fingers), (70, 45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
cv2.imshow("Thesholded", threshold)
cv2.rectangle(frame_copy, (roi_left, roi_top), (roi_right, roi_bottom), (0,0,255), 5)
num_frames += 1
cv2.imshow("Finger Count", frame_copy)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cam.release()
cv2.destroyAllWindows() | true |
13f65f6e3e82c7095b5b1e793b816b28b987de48 | Python | jthunt13/Cambridge-Analytic-Facebook-Data-Mining | /src/dataClean/decodeTxt.py | UTF-8 | 833 | 2.546875 | 3 | [
"MIT"
] | permissive | import pandas as pd
import os
os.getcwd()
def reEncodeDirectory(f):
for i in range(len(f)):
# strip .txt off of file names
fname = f[i].replace(".txt","")
# open file to write too
f2 = open(fname + "ascii.txt","w")
# open file to decode and decode it and write it to another file
with open(f[0], encoding = "utf-8") as f1:
for line in f1:
line = str.encode(line).decode("unicode_escape").encode("ascii",errors = "ignore")
line =line.decode("unicode_escape").replace('b"b',"").replace("b'","")
f2.write(line)
# close files
f1.close()
f2.close()
# reencode facebook
os.chdir()
f = os.listdir()
reEncodeDirectory(f)
# reencode cambridgeAnalytic
os.chdir()
f = os.listdir()
f
reEncodeDirectory(f)
| true |
3c53406ee712adb6b8c1b6b07c79d195bd3b58ba | Python | basakrajarshi/Anaheim-Road-NAM | /anaheim_test_2.py | UTF-8 | 4,462 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 2 23:59:37 2018
@author: rajar
"""
import matplotlib.pyplot as plt
import networkx as nx
import operator as op
import numpy as np
import time
start_time = time.time()
# Read and store the graph for the weighted data
# Anaheim-flow-3.txt
graph1 = nx.read_edgelist("Transportation-Networks-Data/Anaheim/Anaheim-flow-3.txt",
data = False)
# This contains the edges with the volumes and the costs
graph2 = nx.read_edgelist("Transportation-Networks-Data/Anaheim/Anaheim-flow-3.txt",
create_using = nx.MultiGraph(),
nodetype = int,
data = [('volume',float),('cost',float)])
#print(len(graph2.nodes()))
#print(len(graph2.edges()))
# Compute the betweenness centrality for unweighted graph
bc_flow_uw = nx.edge_current_flow_betweenness_centrality(graph1,
normalized=True,
weight=None,
solver='full')
bc_flow_uw_correct = {}
for (key, value) in bc_flow_uw.items():
newkey_1 = int(key[0])
newkey_2 = int(key[1])
newkey = (newkey_1, newkey_2)
bc_flow_uw_correct[newkey] = value
sorted_bc_flow_uw = sorted(bc_flow_uw_correct.items(),
key = op.itemgetter(1),
reverse=True)
# Compute the betweenness centrality for weighted graph
bc_flow_w = nx.edge_current_flow_betweenness_centrality(graph2,
normalized=True,
weight='cost',
solver='full')
sorted_bc_flow_w = sorted(bc_flow_w.items(),
key = op.itemgetter(1),
reverse=True)
#for e in graph1.edges():
# print(e)
#graph1.remove_edge('396', '410')
all_pairs_1 = []
for i in graph1.nodes():
for j in graph1.nodes():
if (i != j):
all_pairs_1.append((i,j))
# Compute and store the sum of all dijkstra shortest paths
# divided by the number of node pairs
cost_uw_1 = 0
for k in all_pairs_1:
spl = nx.dijkstra_path_length(graph1, k[0], k[1])
cost_uw_1 += spl
cost_uw_bef = cost_uw_1/len(all_pairs_1)
# Compute the change in cost as a function of centraity of
# removed edge
edge_number = 0
change_in_path = {}
x = []
y = []
# Go through each edge in the graph
for edge_data in sorted_bc_flow_uw:
edge = edge_data [0]
#print('The edge is :', edge)
# Remove the edge under inspection from graph
#print(len(graph1.edges()))
graph1.remove_edge(str(edge[0]), str(edge[1]))
#print(len(graph1.edges()))
# For all pairs of nodes in the graph:
cost_uw_2 = 0
cost_uw_aft = 0
delta_c = 0
for k in all_pairs_1:
# Compute and store the dijkstra shortest paths
spl = nx.dijkstra_path_length(graph1, k[0], k[1])
# Find sum of all the shortest paths
cost_uw_2 += spl
# Divide by the number of node pairs in graph to normalize
cost_uw_aft = cost_uw_2/len(all_pairs_1)
#print('Cost after' , cost_uw_aft)
# Find change delta_c from sum of all paths before edge removal
delta_c = cost_uw_aft - cost_uw_bef
#print('Change :' , delta_c)
# Fetch edge centrality from bc_flow_uw dictionary
ec = bc_flow_uw_correct[edge]
#print('Edge centrality :' , ec)
# Store centrality (key) and delta_c (value) in a dictionary
change_in_path[(edge, ec)] = delta_c
x.append(ec)
y.append(delta_c)
# Add edge (without weight) back to the graph
graph1.add_edge(str(edge[0]), str(edge[1]))
#print(len(graph1.edges()))
# Count edges completed
edge_number += 1
print('Edge number :', edge_number)
#print()
# Plotting the figure
plt.scatter(x, y, alpha=0.5, color = 'b')
plt.xlabel('Betweenness Centrality of removed edge')
plt.ylabel('Change in shortest path length')
plt.title('Change in net shortest paths vs. Edge betweenness')
plt.savefig('Change_vs_Betweenness_Anaheim_Unweighted', dpi = 300)
plt.show()
elapsed_time = time.time() - start_time
print((elapsed_time/3600), 'hours')
| true |
f6f392ee74d49df96ade6eb7ccd3028929a4baca | Python | xw310/CS536 | /SVM Problems/svm1.py | UTF-8 | 1,021 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python3
#-*-coding:utf-8-*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
X = np.c_[(1, 1), (-1, -1), (-1, 1), (1, -1)].T
y = [-1, -1, 1, 1]
poly_svc = svm.SVC(kernel='poly', degree=2, coef0=1).fit(X, y)
Gaussian_svc = svm.SVC(kernel='rbf').fit(X, y)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
np.arange(y_min, y_max, 0.02))
titles = ['SVC with polynomial (degree 2) kernel','SVC with Gaussian kernel']
for i, clf in enumerate([poly_svc,Gaussian_svc]):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| true |
084592a03e76ce95f6405af62aefcb2da4745abe | Python | yr0901/algo_yeeun | /SWEA/문제풀이/sw_1961.py | UTF-8 | 705 | 2.9375 | 3 | [] | no_license | #숫자 배열 회전
import sys
sys.stdin=open('input.txt','r')
tcase=int(input())
for tc in range(tcase):
N=int(input())
arr=[list(input().split()) for _ in range(N)]
new=[[0 for _ in range(N)] for _ in range(N)]
anwlist=[]
for n in range(3):
for x in range(N): #90도 돌리기
for y in range(N):
new[x][N-1-y]=arr[y][x]
for temp in new:
anwlist.append(''.join(temp))
arr=new
new = [[0 for _ in range(N)] for _ in range(N)]
print('#{}'.format(tc + 1))
for k in range(N):
for i in range(len(anwlist)):
if i%N==k:
print(anwlist[i], end=' ')
print()
| true |
7f29c94df267a6b67dab8220a8a891aabcdc8281 | Python | Ww2Zero/show-you-my-codes | /016/py016.py | UTF-8 | 1,103 | 3.265625 | 3 | [] | no_license | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# author: Ww2Zero
# Date: 2017/02/26
# Time: 12:57
# Blog: Ww2zero.github.io
# Function description
#**第 0016 题:** 纯文本文件 numbers.txt, 里面的内容(包括方括号)如下所示:
# [
# [1, 82, 65535],
# [20, 90, 13],
# [26, 809, 1024]
# ]
import json
import xlwt
class txtToXls(object):
def __init__(self):
self.jsondata = None
self.filename = None
def loadTxt(self, jsonfilename):
self.filename = jsonfilename.split('.')[0]
with open(jsonfilename, 'r') as f:
self.jsondata = json.load(f, encoding='UTF-8')
print self.jsondata
def saveToExcel(self, file_name):
excel = xlwt.Workbook(encoding='utf8')
table = excel.add_sheet(self.filename)
for i in range(len(self.jsondata)):
for j in range(len(self.jsondata[i])):
table.write(i, j, self.jsondata[i][j])
excel.save(file_name)
if __name__ == '__main__':
tx = txtToXls()
tx.loadTxt('numbers.txt')
tx.saveToExcel('numbers.xls')
| true |
55f9f43f0862cbf25747e33298ec7fc1f76ee6b2 | Python | RadioAstronomySoftwareGroup/pyuvdata | /pyuvdata/uvflag/uvflag.py | UTF-8 | 168,093 | 2.625 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Primary container for radio interferometer flag manipulation."""
import copy
import os
import pathlib
import threading
import warnings
import h5py
import numpy as np
from .. import parameter as uvp
from .. import telescopes as uvtel
from .. import utils as uvutils
from ..uvbase import UVBase
from ..uvcal import UVCal
from ..uvdata import UVData
__all__ = ["UVFlag", "flags2waterfall", "and_rows_cols"]
_future_array_shapes_warning = (
"The shapes of several attributes will be changing in the future to remove the "
"deprecated spectral window axis. You can call the `use_future_array_shapes` "
"method to convert to the future array shapes now or set the parameter of the same "
"name on this method to both convert to the future array shapes and silence this "
"warning."
)
def and_rows_cols(waterfall):
"""Perform logical and over rows and cols of a waterfall.
For a 2D flag waterfall, flag pixels only if fully flagged along
time and/or frequency
Parameters
----------
waterfall : 2D boolean array of shape (Ntimes, Nfreqs)
Returns
-------
wf : 2D array
A 2D array (size same as input) where only times/integrations
that were fully flagged are flagged.
"""
wf = np.zeros_like(waterfall, dtype=np.bool_)
Ntimes, Nfreqs = waterfall.shape
wf[:, (np.sum(waterfall, axis=0) / Ntimes) == 1] = True
wf[(np.sum(waterfall, axis=1) / Nfreqs) == 1] = True
return wf
def flags2waterfall(uv, flag_array=None, keep_pol=False):
"""Convert a flag array to a 2D waterfall of dimensions (Ntimes, Nfreqs).
Averages over baselines and polarizations (in the case of visibility data),
or antennas and jones parameters (in case of calibrationd data).
Parameters
----------
uv : A UVData or UVCal object
Object defines the times and frequencies, and supplies the
flag_array to convert (if flag_array not specified)
flag_array : Optional,
flag array to convert instead of uv.flag_array.
Must have same dimensions as uv.flag_array.
keep_pol : bool
Option to keep the polarization axis intact.
Returns
-------
waterfall : 2D array or 3D array
Waterfall of averaged flags, for example fraction of baselines
which are flagged for every time and frequency (in case of UVData input)
Size is (Ntimes, Nfreqs) or (Ntimes, Nfreqs, Npols).
"""
if not isinstance(uv, (UVData, UVCal)):
raise ValueError(
"flags2waterfall() requires a UVData or UVCal object as "
"the first argument."
)
if flag_array is None:
flag_array = uv.flag_array
if uv.flag_array.shape != flag_array.shape:
raise ValueError("Flag array must align with UVData or UVCal object.")
if isinstance(uv, UVCal):
mean_axis = [0]
if not uv.future_array_shapes:
mean_axis.append(1)
if not keep_pol:
if uv.future_array_shapes:
mean_axis.append(3)
else:
mean_axis.append(4)
mean_axis = tuple(mean_axis)
if keep_pol:
waterfall = np.swapaxes(np.mean(flag_array, axis=mean_axis), 0, 1)
else:
waterfall = np.mean(flag_array, axis=mean_axis).T
else:
mean_axis = [0]
if not uv.future_array_shapes:
mean_axis.append(1)
if not keep_pol:
if uv.future_array_shapes:
mean_axis.append(2)
else:
mean_axis.append(3)
mean_axis = tuple(mean_axis)
if keep_pol:
waterfall = np.zeros((uv.Ntimes, uv.Nfreqs, uv.Npols))
for i, t in enumerate(np.unique(uv.time_array)):
waterfall[i, :] = np.mean(
flag_array[uv.time_array == t], axis=mean_axis
)
else:
waterfall = np.zeros((uv.Ntimes, uv.Nfreqs))
for i, t in enumerate(np.unique(uv.time_array)):
waterfall[i, :] = np.mean(
flag_array[uv.time_array == t], axis=mean_axis
)
return waterfall
class UVFlag(UVBase):
"""Object to handle flag arrays and waterfalls for interferometric datasets.
Supports reading/writing, and stores all relevant information to combine
flags and apply to data.
Initialization of the UVFlag object requires some parameters. Metadata is
copied from indata object. If indata is subclass of UVData or UVCal,
the weights_array will be set to all ones.
Lists or tuples are iterated through, treating each entry with an
individual UVFlag init.
Parameters
----------
indata : UVData, UVCal, str, pathlib.Path, list of compatible combination
Input to initialize UVFlag object. If str, assumed to be path to previously
saved UVFlag object. UVData and UVCal objects cannot be directly combined,
unless waterfall is True.
mode : {"metric", "flag"}, optional
The mode determines whether the object has a floating point metric_array
or a boolean flag_array.
copy_flags : bool, optional
Whether to copy flags from indata to new UVFlag object
waterfall : bool, optional
Whether to immediately initialize as a waterfall object, with flag/metric
axes: time, frequency, polarization.
history : str, optional
History string to attach to object.
extra_keywords : dict, optional
A dictionary of metadata values not explicitly specified by another
parameter.
label: str, optional
String used for labeling the object (e.g. 'FM').
use_future_array_shapes : bool
Option to convert to the future planned array shapes before the changes go
into effect by removing the spectral window axis.
run_check : bool
Option to check for the existence and proper shapes of parameters
after creating UVFlag object.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
creating UVFlag object.
Attributes
----------
UVParameter objects :
For full list see the UVFlag Parameters Documentation.
(https://pyuvdata.readthedocs.io/en/latest/uvflag_parameters.html)
Some are always required, some are required for certain phase_types
and others are always optional.
"""
def __init__(
self,
indata=None,
mode="metric",
copy_flags=False,
waterfall=False,
history="",
label="",
use_future_array_shapes=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Initialize the object."""
desc = (
"The mode determines whether the object has a "
"floating point metric_array or a boolean flag_array. "
'Options: {"metric", "flag"}. Default is "metric".'
)
self._mode = uvp.UVParameter(
"mode",
description=desc,
form="str",
expected_type=str,
acceptable_vals=["metric", "flag"],
)
desc = (
"String used for labeling the object (e.g. 'FM'). "
"Default is empty string."
)
self._label = uvp.UVParameter(
"label", description=desc, form="str", expected_type=str
)
desc = (
"The type of object defines the form of some arrays "
"and also how metrics/flags are combined. "
"Accepted types:'waterfall', 'baseline', 'antenna'"
)
self._type = uvp.UVParameter(
"type",
description=desc,
form="str",
expected_type=str,
acceptable_vals=["antenna", "baseline", "waterfall"],
)
self._Ntimes = uvp.UVParameter(
"Ntimes", description="Number of times", expected_type=int
)
desc = "Number of baselines. Only Required for 'baseline' type objects."
self._Nbls = uvp.UVParameter(
"Nbls", description=desc, expected_type=int, required=False
)
self._Nblts = uvp.UVParameter(
"Nblts",
description="Number of baseline-times "
"(i.e. number of spectra). Not necessarily "
"equal to Nbls * Ntimes",
expected_type=int,
)
self._Nspws = uvp.UVParameter(
"Nspws",
description="Number of spectral windows "
"(ie non-contiguous spectral chunks).",
expected_type=int,
)
self._Nfreqs = uvp.UVParameter(
"Nfreqs", description="Number of frequency channels", expected_type=int
)
self._Npols = uvp.UVParameter(
"Npols", description="Number of polarizations", expected_type=int
)
desc = (
"Floating point metric information, only available in metric mode. "
"The shape depends on the `type` parameter and on the "
"`future_array_shapes` parameter. For 'baseline' type objects, "
"the shape is (Nblts, 1, Nfreq, Npols) or (Nblts, Nfreqs, Npols) if "
"future_array_shapes=True. For 'antenna' type objects, the shape is "
"(Nants_data, 1, Nfreqs, Ntimes, Npols) or "
"(Nants_data, Nfreqs, Ntimes, Npols) if future_array_shapes=True. "
"For 'waterfall' type objects, the shape is (Ntimes, Nfreq, Npols)."
)
self._metric_array = uvp.UVParameter(
"metric_array",
description=desc,
form=("Nblts", 1, "Nfreqs", "Npols"),
expected_type=float,
required=False,
)
desc = (
"Boolean flag, True is flagged, only available in flag mode. "
"The shape depends on the `type` parameter and on the "
"`future_array_shapes` parameter. For 'baseline' type objects, "
"the shape is (Nblts, 1, Nfreq, Npols) or (Nblts, Nfreqs, Npols) if "
"future_array_shapes=True. For 'antenna' type objects, the shape is "
"(Nants_data, 1, Nfreqs, Ntimes, Npols) or "
"(Nants_data, Nfreqs, Ntimes, Npols) if future_array_shapes=True. "
"For 'waterfall' type objects, the shape is (Ntimes, Nfreq, Npols)."
)
self._flag_array = uvp.UVParameter(
"flag_array",
description=desc,
form=("Nblts", 1, "Nfreqs", "Npols"),
expected_type=bool,
required=False,
)
desc = (
"Floating point weight information, only available in metric mode."
"The shape depends on the `type` parameter and on the "
"`future_array_shapes` parameter. For 'baseline' type objects, "
"the shape is (Nblts, 1, Nfreq, Npols) or (Nblts, Nfreqs, Npols) if "
"future_array_shapes=True. For 'antenna' type objects, the shape is "
"(Nants_data, 1, Nfreqs, Ntimes, Npols) or "
"(Nants_data, Nfreqs, Ntimes, Npols) if future_array_shapes=True. "
"For 'waterfall' type objects, the shape is (Ntimes, Nfreq, Npols)."
)
self._weights_array = uvp.UVParameter(
"weights_array",
description=desc,
form=("Nblts", 1, "Nfreqs", "Npols"),
expected_type=float,
)
desc = (
"Floating point weight information about sum of squares of weights "
"when weighted data is converted from baseline to waterfall type."
"Only available in metric mode, the shape is (Nfreq, Ntimes, Npols)."
)
# TODO: should this be set to None when converting back to baseline or antenna?
# If not, should the shape be adjusted?
self._weights_square_array = uvp.UVParameter(
"weights_square_array",
description=desc,
form=("Ntimes", "Nfreqs", "Npols"),
expected_type=float,
required=False,
)
desc = (
"Array of times in Julian Date, center of integration. The shape depends "
"on the `type` parameter. For 'baseline' type object, shape is (Nblts), "
"for 'antenna' and 'waterfall' type objects, shape is (Ntimes)."
)
self._time_array = uvp.UVParameter(
"time_array",
description=desc,
form=("Nblts",),
expected_type=float,
tols=1e-3 / (60.0 * 60.0 * 24.0),
) # 1 ms in days
desc = (
"Array of lsts radians, center of integration. The shape depends "
"on the `type` parameter. For 'baseline' type object, shape is (Nblts), "
"for 'antenna' and 'waterfall' type objects, shape is (Ntimes)."
)
self._lst_array = uvp.UVParameter(
"lst_array",
description=desc,
form=("Nblts",),
expected_type=float,
tols=uvutils.RADIAN_TOL,
)
desc = (
"Array of first antenna numbers, shape (Nblts). Only available for "
"'baseline' type objects."
)
self._ant_1_array = uvp.UVParameter(
"ant_1_array",
description=desc,
expected_type=int,
form=("Nblts",),
required=False,
)
desc = (
"Array of second antenna numbers, shape (Nblts). Only available for "
"'baseline' type objects."
)
self._ant_2_array = uvp.UVParameter(
"ant_2_array",
description=desc,
expected_type=int,
form=("Nblts",),
required=False,
)
desc = (
"Array of antenna numbers, shape (Nants_data), only available for "
"'antenna' type objects. "
)
self._ant_array = uvp.UVParameter(
"ant_array",
description=desc,
expected_type=int,
form=("Nants_data",),
required=False,
)
desc = (
"Array of baseline indices, shape (Nblts). "
"Only available for 'baseline' type objects. "
"type = int; baseline = 2048 * ant1 + ant2 + 2^16"
)
self._baseline_array = uvp.UVParameter(
"baseline_array",
description=desc,
expected_type=int,
form=("Nblts",),
required=False,
)
desc = (
"Array of frequencies in Hz, center of the channel. Shape (1, Nfreqs) or "
"(Nfreqs,) if type is 'waterfall' or if future_array_shapes=True."
)
self._freq_array = uvp.UVParameter(
"freq_array",
description=desc,
form=(1, "Nfreqs"),
expected_type=float,
tols=1e-3,
) # mHz
desc = "Width of frequency channels (Hz). Shape (Nfreqs,), type = float."
self._channel_width = uvp.UVParameter(
"channel_width",
description=desc,
form=("Nfreqs",),
expected_type=float,
tols=1e-3,
) # 1 mHz
self._spw_array = uvp.UVParameter(
"spw_array",
description="Array of spectral window numbers, shape (Nspws).",
form=("Nspws",),
expected_type=int,
)
desc = (
"Required if Nspws > 1 and will always be required starting in "
"version 3.0. Maps individual channels along the "
"frequency axis to individual spectral windows, as listed in the "
"spw_array. Shape (Nfreqs), type = int."
)
self._flex_spw_id_array = uvp.UVParameter(
"flex_spw_id_array",
description=desc,
form=("Nfreqs",),
expected_type=int,
required=False,
)
desc = (
"Array of polarization integers, shape (Npols). "
"AIPS Memo 117 says: pseudo-stokes 1:4 (pI, pQ, pU, pV); "
"circular -1:-4 (RR, LL, RL, LR); linear -5:-8 (XX, YY, XY, YX). "
"NOTE: AIPS Memo 117 actually calls the pseudo-Stokes polarizations "
'"Stokes", but this is inaccurate as visibilities cannot be in '
"true Stokes polarizations for physical antennas. We adopt the "
"term pseudo-Stokes to refer to linear combinations of instrumental "
"visibility polarizations (e.g. pI = xx + yy)."
)
self._polarization_array = uvp.UVParameter(
"polarization_array",
description=desc,
expected_type=int,
acceptable_vals=list(np.arange(-8, 0)) + list(np.arange(1, 5)),
form=("Npols",),
)
self._telescope_name = uvp.UVParameter(
"telescope_name",
description="Name of telescope or array (string).",
form="str",
expected_type=str,
)
self._telescope_location = uvp.LocationParameter(
"telescope_location",
description=desc,
acceptable_range=(6.35e6, 6.39e6),
tols=1e-3,
)
self._history = uvp.UVParameter(
"history",
description="String of history, units English",
form="str",
expected_type=str,
)
desc = (
"Any user supplied extra keywords, type=dict."
"Use the special key 'comment' for long multi-line string comments."
"Default is an empty dictionary."
)
self._extra_keywords = uvp.UVParameter(
"extra_keywords",
required=False,
description=desc,
value={},
spoof_val={},
expected_type=dict,
)
desc = "Flag indicating that this object is using the future array shapes."
self._future_array_shapes = uvp.UVParameter(
"future_array_shapes", description=desc, expected_type=bool, value=False
)
# ---antenna information ---
desc = (
"Number of antennas in the array. Only available for 'baseline' type "
"objects, used for calculating baseline numbers. "
"May be larger than the number of antennas with data."
)
self._Nants_telescope = uvp.UVParameter(
"Nants_telescope", description=desc, expected_type=int, required=False
)
desc = (
"Number of antennas with data present. "
"Only available for 'baseline' or 'antenna' type objects."
"May be smaller than the number of antennas in the array"
)
self._Nants_data = uvp.UVParameter(
"Nants_data", description=desc, expected_type=int, required=False
)
desc = (
"List of antenna names, shape (Nants_telescope), with numbers given by "
"antenna_numbers (which can be matched to ant_1_array and ant_2_array for "
"baseline type or ant_array for antenna type objects). Required for "
"baseline or antenna type objects. There must be one entry here for each "
"unique entry in ant_1_array and ant_2_array (for baseline type) or "
"ant_array (for antenna type), but there may be extras as well. "
)
self._antenna_names = uvp.UVParameter(
"antenna_names",
description=desc,
form=("Nants_telescope",),
expected_type=str,
)
desc = (
"List of integer antenna numbers corresponding to antenna_names, "
"shape (Nants_telescope). Required for baseline or antenna type objects. "
"There must be one entry here for each unique entry in ant_1_array and "
"ant_2_array (for baseline type) or ant_array (for antenna type), but "
"there may be extras as well. Note that these are not indices -- they do "
"not need to start at zero or be continuous."
)
self._antenna_numbers = uvp.UVParameter(
"antenna_numbers",
description=desc,
form=("Nants_telescope",),
expected_type=int,
)
desc = (
"Array giving coordinates of antennas relative to "
"telescope_location (ITRF frame), shape (Nants_telescope, 3), "
"units meters. See the tutorial page in the documentation "
"for an example of how to convert this to topocentric frame."
)
self._antenna_positions = uvp.UVParameter(
"antenna_positions",
description=desc,
form=("Nants_telescope", 3),
expected_type=float,
tols=1e-3, # 1 mm
)
# --extra information ---
desc = (
"Orientation of the physical dipole corresponding to what is "
'labelled as the x polarization. Options are "east" '
'(indicating east/west orientation) and "north" (indicating '
"north/south orientation)"
)
self._x_orientation = uvp.UVParameter(
"x_orientation",
description=desc,
required=False,
expected_type=str,
acceptable_vals=["east", "north"],
)
desc = (
"List of strings containing the unique basenames (not the full path) of "
"input files."
)
self._filename = uvp.UVParameter(
"filename", required=False, description=desc, expected_type=str
)
# initialize the underlying UVBase properties
super(UVFlag, self).__init__()
self.history = "" # Added to at the end
self.label = "" # Added to at the end
if isinstance(indata, (list, tuple)):
self.__init__(
indata[0],
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
label=label,
use_future_array_shapes=use_future_array_shapes,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
if len(indata) > 1:
for i in indata[1:]:
fobj = UVFlag(
i,
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
use_future_array_shapes=use_future_array_shapes,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
self.__add__(
fobj,
run_check=run_check,
inplace=True,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
del fobj
elif issubclass(indata.__class__, (str, pathlib.Path)):
# Given a path, read indata
self.read(
indata,
history,
use_future_array_shapes=use_future_array_shapes,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
elif issubclass(indata.__class__, UVData):
self.from_uvdata(
indata,
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
label=label,
use_future_array_shapes=use_future_array_shapes,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
elif issubclass(indata.__class__, UVCal):
self.from_uvcal(
indata,
mode=mode,
copy_flags=copy_flags,
waterfall=waterfall,
history=history,
label=label,
use_future_array_shapes=use_future_array_shapes,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
elif indata is not None:
raise ValueError(
"input to UVFlag.__init__ must be one of: "
"list, tuple, string, pathlib.Path, UVData, or UVCal."
)
@property
def _data_params(self):
"""List of strings giving the data-like parameters."""
if not hasattr(self, "mode") or self.mode is None:
return None
elif self.mode == "flag":
return ["flag_array"]
elif self.mode == "metric":
if self.weights_square_array is None:
return ["metric_array", "weights_array"]
else:
return ["metric_array", "weights_array", "weights_square_array"]
else:
raise ValueError(
"Invalid mode. Mode must be one of "
+ ", ".join(["{}"] * len(self._mode.acceptable_vals)).format(
*self._mode.acceptable_vals
)
)
@property
def data_like_parameters(self):
"""Return iterator of defined parameters which are data-like."""
for key in self._data_params:
if hasattr(self, key):
yield getattr(self, key)
@property
def pol_collapsed(self):
"""Determine if this object has had pols collapsed."""
if not hasattr(self, "polarization_array") or self.polarization_array is None:
return False
elif isinstance(self.polarization_array.item(0), str):
return True
else:
return False
def _check_pol_state(self):
if self.pol_collapsed:
# collapsed pol objects have a different type for
# the polarization array.
self._polarization_array.expected_type = str
self._polarization_array.acceptable_vals = None
else:
self._polarization_array.expected_type = uvp._get_generic_type(int)
self._polarization_array.acceptable_vals = list(np.arange(-8, 0)) + list(
np.arange(1, 5)
)
def _set_future_array_shapes(self):
"""
Set future_array_shapes to True and adjust required parameters.
This method should not be called directly by users; instead it is called
by file-reading methods and `use_future_array_shapes` to indicate the
`future_array_shapes` is True and define expected parameter shapes.
"""
self.future_array_shapes = True
self._freq_array.form = ("Nfreqs",)
data_like_params = ["metric_array", "weights_array", "flag_array"]
if self.type == "baseline":
for param_name in data_like_params:
getattr(self, "_" + param_name).form = ("Nblts", "Nfreqs", "Npols")
elif self.type == "antenna":
for param_name in data_like_params:
getattr(self, "_" + param_name).form = (
"Nants_data",
"Nfreqs",
"Ntimes",
"Npols",
)
def use_future_array_shapes(self):
"""
Change the array shapes of this object to match the planned future shapes.
This method sets allows users to convert to the planned array shapes changes
before the changes go into effect. This method sets the `future_array_shapes`
parameter on this object to True.
"""
if self.future_array_shapes:
return
self._set_future_array_shapes()
if not self.type == "waterfall":
# remove the length-1 spw axis for all data-like parameters
for param_name in self._data_params:
if param_name == "weights_square_array":
continue
setattr(self, param_name, (getattr(self, param_name))[:, 0])
# remove the length-1 spw axis for the freq_array
self.freq_array = self.freq_array[0, :]
def use_current_array_shapes(self):
"""
Change the array shapes of this object to match the current future shapes.
This method sets allows users to convert back to the current array shapes.
This method sets the `future_array_shapes` parameter on this object to False.
"""
warnings.warn(
"This method will be removed in version 3.0 when the current array shapes "
"are no longer supported.",
DeprecationWarning,
)
if not self.future_array_shapes:
return
data_like_params = ["metric_array", "weights_array", "flag_array"]
self.future_array_shapes = False
if not self.type == "waterfall":
if self.type == "baseline":
for param_name in data_like_params:
getattr(self, "_" + param_name).form = (
"Nblts",
1,
"Nfreqs",
"Npols",
)
elif self.type == "antenna":
for param_name in data_like_params:
getattr(self, "_" + param_name).form = (
"Nants_data",
1,
"Nfreqs",
"Ntimes",
"Npols",
)
for param_name in self._data_params:
if param_name == "weights_square_array":
continue
setattr(
self, param_name, (getattr(self, param_name))[:, np.newaxis, :, :]
)
self._freq_array.form = (1, "Nfreqs")
self.freq_array = self.freq_array[np.newaxis, :]
def _set_mode_flag(self):
"""Set the mode and required parameters consistent with a flag object."""
self.mode = "flag"
self._flag_array.required = True
self._metric_array.required = False
self._weights_array.required = False
if self.weights_square_array is not None:
self.weights_square_array = None
return
def _set_mode_metric(self):
"""Set the mode and required parameters consistent with a metric object."""
self.mode = "metric"
self._flag_array.required = False
self._metric_array.required = True
self._weights_array.required = True
if self.weights_array is None and self.metric_array is not None:
self.weights_array = np.ones_like(self.metric_array, dtype=float)
return
def _set_type_antenna(self):
"""Set the type and required propertis consistent with an antenna object."""
self.type = "antenna"
self._ant_array.required = True
self._baseline_array.required = False
self._ant_1_array.required = False
self._ant_2_array.required = False
self._Nants_telescope.required = False
self._Nants_data.required = True
self._Nbls.required = False
self._Nblts.required = False
if self.future_array_shapes:
self._metric_array.form = ("Nants_data", "Nfreqs", "Ntimes", "Npols")
self._flag_array.form = ("Nants_data", "Nfreqs", "Ntimes", "Npols")
self._weights_array.form = ("Nants_data", "Nfreqs", "Ntimes", "Npols")
else:
self._metric_array.form = ("Nants_data", 1, "Nfreqs", "Ntimes", "Npols")
self._flag_array.form = ("Nants_data", 1, "Nfreqs", "Ntimes", "Npols")
self._weights_array.form = ("Nants_data", 1, "Nfreqs", "Ntimes", "Npols")
self._freq_array.form = (1, "Nfreqs")
self._time_array.form = ("Ntimes",)
self._lst_array.form = ("Ntimes",)
def _set_type_baseline(self):
"""Set the type and required propertis consistent with a baseline object."""
self.type = "baseline"
self._ant_array.required = False
self._baseline_array.required = True
self._ant_1_array.required = True
self._ant_2_array.required = True
self._Nants_telescope.required = True
self._Nants_data.required = True
self._Nbls.required = True
self._Nblts.required = True
if self.time_array is not None:
self.Nblts = len(self.time_array)
if self.future_array_shapes:
self._metric_array.form = ("Nblts", "Nfreqs", "Npols")
self._flag_array.form = ("Nblts", "Nfreqs", "Npols")
self._weights_array.form = ("Nblts", "Nfreqs", "Npols")
else:
self._metric_array.form = ("Nblts", 1, "Nfreqs", "Npols")
self._flag_array.form = ("Nblts", 1, "Nfreqs", "Npols")
self._weights_array.form = ("Nblts", 1, "Nfreqs", "Npols")
self._freq_array.form = (1, "Nfreqs")
self._time_array.form = ("Nblts",)
self._lst_array.form = ("Nblts",)
def _set_type_waterfall(self):
"""Set the type and required propertis consistent with a waterfall object."""
self.type = "waterfall"
self._ant_array.required = False
self._baseline_array.required = False
self._ant_1_array.required = False
self._ant_2_array.required = False
self._Nants_telescope.required = False
self._Nants_data.required = False
self._Nbls.required = False
self._Nblts.required = False
self._metric_array.form = ("Ntimes", "Nfreqs", "Npols")
self._flag_array.form = ("Ntimes", "Nfreqs", "Npols")
self._weights_array.form = ("Ntimes", "Nfreqs", "Npols")
self._time_array.form = ("Ntimes",)
self._lst_array.form = ("Ntimes",)
if not self.future_array_shapes:
self._freq_array.form = ("Nfreqs",)
def check(self, check_extra=True, run_check_acceptability=True):
"""
Add some extra checks on top of checks on UVBase class.
Check that required parameters exist. Check that parameters have
appropriate shapes and optionally that the values are acceptable.
Parameters
----------
check_extra : bool
If true, check all parameters, otherwise only check required parameters.
run_check_acceptability : bool
Option to check if values in parameters are acceptable.
Returns
-------
bool
True if check passes
Raises
------
ValueError
if parameter shapes or types are wrong or do not have acceptable
values (if run_check_acceptability is True)
"""
# set the flex_spw_id_array to required if Nspws > 1
if self.Nspws is not None and self.Nspws > 1:
self._flex_spw_id_array.required = True
else:
self._flex_spw_id_array.required = False
# first run the basic check from UVBase
super().check(check_extra, run_check_acceptability)
# Check internal consistency of numbers which don't explicitly correspond
# to the shape of another array.
if self.type == "baseline":
if self.Nants_data != int(
np.union1d(self.ant_1_array, self.ant_2_array).size
):
raise ValueError(
"Nants_data must be equal to the number of unique "
"values in ant_1_array and ant_2_array"
)
if self.Nbls != len(np.unique(self.baseline_array)):
raise ValueError(
"Nbls must be equal to the number of unique "
"baselines in the baseline_array"
)
if self.Ntimes != len(np.unique(self.time_array)):
raise ValueError(
"Ntimes must be equal to the number of unique "
"times in the time_array"
)
if self.antenna_numbers is not None:
if not set(np.unique(self.ant_1_array)).issubset(self.antenna_numbers):
raise ValueError(
"All antennas in ant_1_array must be in antenna_numbers."
)
if not set(np.unique(self.ant_2_array)).issubset(self.antenna_numbers):
raise ValueError(
"All antennas in ant_2_array must be in antenna_numbers."
)
elif self.type == "antenna":
if self.antenna_numbers is not None:
missing_ants = self.ant_array[
~np.isin(self.ant_array, self.antenna_numbers)
]
if missing_ants.size > 0:
raise ValueError(
"All antennas in ant_array must be in antenna_numbers. "
"The antennas in ant_array that are missing in antenna_numbers "
f"are: {missing_ants}"
)
if self.flex_spw_id_array is None:
warnings.warn(
"flex_spw_id_array is not set. It will be required starting in version "
"3.0",
DeprecationWarning,
)
else:
# Check that all values in flex_spw_id_array are entries in the spw_array
if not np.all(np.isin(self.flex_spw_id_array, self.spw_array)):
raise ValueError(
"All values in the flex_spw_id_array must exist in the spw_array."
)
if run_check_acceptability:
lat, lon, alt = self.telescope_location_lat_lon_alt_degrees
uvutils.check_lsts_against_times(
jd_array=self.time_array,
lst_array=self.lst_array,
latitude=lat,
longitude=lon,
altitude=alt,
lst_tols=self._lst_array.tols,
frame=self._telescope_location.frame,
)
return True
def clear_unused_attributes(self):
"""Remove unused attributes.
Useful when changing type or mode or to save memory.
Will set all non-required attributes to None, except x_orientation,
extra_keywords, weights_square_array and filename.
"""
optional_attrs_to_keep = [
"telescope_name",
"telescope_location",
"channel_width",
"spw_array",
"Nspws",
"flex_spw_id_array", # TODO remove this from this list in version 3.0
"antenna_names",
"antenna_numbers",
"antenna_positions",
"Nants_telescope",
"x_orientation",
"weights_square_array",
"extra_keywords",
"filename",
]
for p in self:
attr = getattr(self, p)
if (
not attr.required
and attr.value is not None
and attr.name not in optional_attrs_to_keep
):
attr.value = None
setattr(self, p, attr)
def __eq__(self, other, check_history=True, check_extra=True):
"""Check Equality of two UVFlag objects.
Parameters
----------
other: UVFlag
object to check against
check_history : bool
Include the history keyword when comparing UVFlag objects.
check_extra : bool
Include non-required parameters when comparing UVFlag objects.
"""
if check_history:
return super(UVFlag, self).__eq__(other, check_extra=check_extra)
else:
# initial check that the classes are the same
# then strip the histories
if isinstance(other, self.__class__):
_h1 = self.history
self.history = None
_h2 = other.history
other.history = None
truth = super(UVFlag, self).__eq__(other, check_extra=check_extra)
self.history = _h1
other.history = _h2
return truth
else:
print("Classes do not match")
return False
def __ne__(self, other, check_history=True, check_extra=True):
"""Not Equal."""
return not self.__eq__(
other, check_history=check_history, check_extra=check_extra
)
def _set_lsts_helper(self):
latitude, longitude, altitude = self.telescope_location_lat_lon_alt_degrees
unique_times, inverse_inds = np.unique(self.time_array, return_inverse=True)
unique_lst_array = uvutils.get_lst_for_time(
unique_times, latitude, longitude, altitude
)
self.lst_array = unique_lst_array[inverse_inds]
return
def set_lsts_from_time_array(self, background=False):
"""Set the lst_array based from the time_array.
Parameters
----------
background : bool, False
When set to True, start the calculation on a threading.Thread in the
background and return the thread to the user.
Returns
-------
proc : None or threading.Thread instance
When background is set to True, a thread is returned which must be
joined before the lst_array exists on the UVData object.
"""
if not background:
self._set_lsts_helper()
return
else:
proc = threading.Thread(target=self._set_lsts_helper)
proc.start()
return proc
def set_telescope_params(self, overwrite=False, warn=True):
"""
Set telescope related parameters.
If the telescope_name is in the known_telescopes, set any missing
telescope-associated parameters (e.g. telescope location) to the value
for the known telescope.
Parameters
----------
overwrite : bool
Option to overwrite existing telescope-associated parameters with
the values from the known telescope.
Raises
------
ValueError
if the telescope_name is not in known telescopes
"""
telescope_obj = uvtel.get_telescope(self.telescope_name)
if telescope_obj is not False:
params_set = []
telescope_params = list(telescope_obj.__iter__())
# ensure that the Nants_telescope comes first so shapes work out below
telescope_params.remove("_Nants_telescope")
telescope_params.insert(0, "_Nants_telescope")
for p in telescope_params:
telescope_param = getattr(telescope_obj, p)
if p in self:
self_param = getattr(self, p)
else:
continue
if telescope_param.value is not None and (
overwrite is True or self_param.value is None
):
telescope_shape = telescope_param.expected_shape(telescope_obj)
self_shape = self_param.expected_shape(self)
if telescope_shape == self_shape:
params_set.append(self_param.name)
prop_name = self_param.name
setattr(self, prop_name, getattr(telescope_obj, prop_name))
else:
# Note dropped handling for antenna diameters that appears in
# UVData because they don't exist on UVFlag.
warnings.warn(
f"{self_param.name} is not set but cannot be set using "
f"known values for {telescope_obj.telescope_name} "
"because the expected shapes don't match."
)
if len(params_set) > 0:
if warn:
params_set_str = ", ".join(params_set)
warnings.warn(
f"{params_set_str} are not set or are being "
"overwritten. Using known values for "
f"{telescope_obj.telescope_name}."
)
else:
raise ValueError(
f"Telescope {self.telescope_name} is not in known_telescopes."
)
def antpair2ind(self, ant1, ant2):
"""Get blt indices for given (ordered) antenna pair.
Parameters
----------
ant1 : int or array_like of int
Number of the first antenna
ant2 : int or array_like of int
Number of the second antenna
Returns
-------
int or array_like of int
baseline number(s) corresponding to the input antenna number
"""
if self.type != "baseline":
raise ValueError(
"UVFlag object of type " + self.type + " does not "
"contain antenna pairs to index."
)
return np.where((self.ant_1_array == ant1) & (self.ant_2_array == ant2))[0]
def baseline_to_antnums(self, baseline):
"""Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int
baseline number
Returns
-------
tuple
Antenna numbers corresponding to baseline.
"""
assert self.type == "baseline", "Must be 'baseline' type UVFlag object."
return uvutils.baseline_to_antnums(baseline, self.Nants_telescope)
def antnums_to_baseline(self, ant1, ant2, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
attempt256 : bool
Option to try to use the older 256 standard used in many uvfits files
(will use 2048 standard if there are more than 256 antennas).
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
assert self.type == "baseline", "Must be 'baseline' type UVFlag object."
return uvutils.antnums_to_baseline(
ant1, ant2, self.Nants_telescope, attempt256=attempt256
)
def get_baseline_nums(self):
"""Return numpy array of unique baseline numbers in data."""
assert self.type == "baseline", "Must be 'baseline' type UVFlag object."
return np.unique(self.baseline_array)
def get_antpairs(self):
"""Return list of unique antpair tuples (ant1, ant2) in data."""
assert self.type == "baseline", "Must be 'baseline' type UVFlag object."
return list(zip(*self.baseline_to_antnums(self.get_baseline_nums())))
def get_ants(self):
"""
Get the unique antennas that have data associated with them.
Returns
-------
ndarray of int
Array of unique antennas with data associated with them.
"""
if self.type == "baseline":
return np.unique(np.append(self.ant_1_array, self.ant_2_array))
elif self.type == "antenna":
return np.unique(self.ant_array)
elif self.type == "waterfall":
raise ValueError("A waterfall type UVFlag object has no sense of antennas.")
def get_pols(self):
"""
Get the polarizations in the data.
Returns
-------
list of str
list of polarizations (as strings) in the data.
"""
return uvutils.polnum2str(
self.polarization_array, x_orientation=self.x_orientation
)
def parse_ants(self, ant_str, print_toggle=False):
"""
Get antpair and polarization from parsing an aipy-style ant string.
Used to support the select function. This function is only useable when
the UVFlag type is 'baseline'. Generates two lists of antenna pair tuples
and polarization indices based on parsing of the string ant_str. If no
valid polarizations (pseudo-Stokes params, or combinations of [lr] or
[xy]) or antenna numbers are found in ant_str, ant_pairs_nums and
polarizations are returned as None.
Parameters
----------
ant_str : str
String containing antenna information to parse. Can be 'all',
'auto', 'cross', or combinations of antenna numbers and polarization
indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used
in front of an antenna number or baseline to exclude it from being
output in ant_pairs_nums. If ant_str has a minus sign as the first
character, 'all,' will be added to the beginning of the string.
See the tutorial for examples of valid strings and their behavior.
print_toggle : bool
Boolean for printing parsed baselines for a visual user check.
Returns
-------
ant_pairs_nums : list of tuples of int or None
List of tuples containing the parsed pairs of antenna numbers, or
None if ant_str is 'all' or a pseudo-Stokes polarizations.
polarizations : list of int or None
List of desired polarizations or None if ant_str does not contain a
polarization specification.
"""
if self.type != "baseline":
raise ValueError(
"UVFlag objects can only call 'parse_ants' function "
"if type is 'baseline'."
)
return uvutils.parse_ants(
self,
ant_str=ant_str,
print_toggle=print_toggle,
x_orientation=self.x_orientation,
)
def collapse_pol(
self,
method="quadmean",
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Collapse the polarization axis using a given method.
If the original UVFlag object has more than one polarization,
the resulting polarization_array will be a single element array with a
comma separated string encoding the original polarizations.
Parameters
----------
method : str, {"quadmean", "absmean", "mean", "or", "and"}
How to collapse the dimension(s).
run_check : bool
Option to check for the existence and proper shapes of parameters
after collapsing polarizations.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
collapsing polarizations.
"""
method = method.lower()
if self.mode == "flag":
darr = self.flag_array
else:
darr = self.metric_array
if len(self.polarization_array) > 1:
if self.mode == "metric":
_weights = self.weights_array
else:
_weights = np.ones_like(darr)
# Collapse pol dimension. But note we retain a polarization axis.
d, w = uvutils.collapse(
darr, method, axis=-1, weights=_weights, return_weights=True
)
darr = np.expand_dims(d, axis=d.ndim)
if self.mode == "metric":
self.weights_array = np.expand_dims(w, axis=w.ndim)
self.polarization_array = np.array(
[",".join(map(str, self.polarization_array))], dtype=np.str_
)
self.Npols = len(self.polarization_array)
self._check_pol_state()
else:
warnings.warn(
"Cannot collapse polarization axis when only one pol present."
)
return
if ((method == "or") or (method == "and")) and (self.mode == "flag"):
self.flag_array = darr
else:
self.metric_array = darr
self._set_mode_metric()
self.clear_unused_attributes()
self.history += "Pol axis collapse. "
if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):
self.history += self.pyuvdata_version_str
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
def to_waterfall(
self,
method="quadmean",
keep_pol=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
return_weights_square=False,
):
"""Convert an 'antenna' or 'baseline' type object to waterfall.
Parameters
----------
method : str, {"quadmean", "absmean", "mean", "or", "and"}
How to collapse the dimension(s).
keep_pol : bool
Whether to also collapse the polarization dimension
If keep_pol is False, and the original UVFlag object has more
than one polarization, the resulting polarization_array
will be a single element array with a comma separated string
encoding the original polarizations.
run_check : bool
Option to check for the existence and proper shapes of parameters
after converting to waterfall type.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
converting to waterfall type.
return_weights_square: bool
Option to compute the sum of the squares of the weights when
collapsing baseline object to waterfall. Not used if type is not
baseline to begin with. Fills an optional parameter if so.
"""
method = method.lower()
if self.type == "waterfall" and (
keep_pol or (len(self.polarization_array) == 1)
):
warnings.warn("This object is already a waterfall. Nothing to change.")
return
if (not keep_pol) and (len(self.polarization_array) > 1):
self.collapse_pol(method)
if self.mode == "flag":
darr = self.flag_array
else:
darr = self.metric_array
if self.type == "antenna":
if self.future_array_shapes:
collapse_axes = (0,)
else:
collapse_axes = (0, 1)
d, w = uvutils.collapse(
darr,
method,
axis=collapse_axes,
weights=self.weights_array,
return_weights=True,
)
darr = np.swapaxes(d, 0, 1)
if self.mode == "metric":
self.weights_array = np.swapaxes(w, 0, 1)
elif self.type == "baseline":
Nt = len(np.unique(self.time_array))
Nf = self.freq_array.size
Np = len(self.polarization_array)
d = np.zeros((Nt, Nf, Np))
w = np.zeros((Nt, Nf, Np))
if return_weights_square:
ws = np.zeros((Nt, Nf, Np))
for i, t in enumerate(np.unique(self.time_array)):
ind = self.time_array == t
if self.mode == "metric":
_weights = self.weights_array[ind, :, :]
else:
_weights = np.ones_like(darr[ind, :, :], dtype=float)
if return_weights_square:
d[i, :, :], w[i, :, :], ws[i, :, :] = uvutils.collapse(
darr[ind, :, :],
method,
axis=0,
weights=_weights,
return_weights=True,
return_weights_square=return_weights_square,
)
else:
d[i, :, :], w[i, :, :] = uvutils.collapse(
darr[ind, :, :],
method,
axis=0,
weights=_weights,
return_weights=True,
return_weights_square=return_weights_square,
)
darr = d
if self.mode == "metric":
self.weights_array = w
if return_weights_square:
self.weights_square_array = ws
self.time_array, ri = np.unique(self.time_array, return_index=True)
self.lst_array = self.lst_array[ri]
if ((method == "or") or (method == "and")) and (self.mode == "flag"):
# If using a boolean operation (AND/OR) and in flag mode, stay in flag
# flags should be bool, but somehow it is cast as float64
# is reacasting to bool like this best?
self.flag_array = darr.astype(bool)
else:
# Otherwise change to (or stay in) metric
self.metric_array = darr
self._set_mode_metric()
self.freq_array = self.freq_array.flatten()
self._set_type_waterfall()
self.history += 'Collapsed to type "waterfall". ' # + self.pyuvdata_version_str
if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):
self.history += self.pyuvdata_version_str
self.clear_unused_attributes()
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
def to_baseline(
self,
uv,
force_pol=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Convert a UVFlag object of type "waterfall" or "antenna" to type "baseline".
Broadcasts the flag array to all baselines.
This function does NOT apply flags to uv (see utils.apply_uvflag for that).
Parameters
----------
uv : UVData or UVFlag object
Object with type baseline to match.
force_pol : bool
If True, will use 1 pol to broadcast to any other pol.
Otherwise, will require polarizations match.
For example, this keyword is useful if one flags on all
pols combined, and wants to broadcast back to individual pols.
run_check : bool
Option to check for the existence and proper shapes of parameters
after converting to baseline type.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
converting to baseline type.
"""
if self.type == "baseline":
return
if not (
issubclass(uv.__class__, UVData)
or (isinstance(uv, UVFlag) and uv.type == "baseline")
):
raise ValueError(
"Must pass in UVData object or UVFlag object of type "
'"baseline" to match.'
)
# write it out this rather than comparing the UVParameters because
# future_array_shapes might be different. In the future, when shapes are not
# variable, this can be done by comparing the UVParameters.
if self.Nfreqs != uv.Nfreqs or not np.allclose(
np.squeeze(self.freq_array),
np.squeeze(uv.freq_array),
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
):
raise ValueError(
"The freq_array on uv is not the same as the freq_array on this "
f"object. The value on this object is {self.freq_array}; the value "
f"on uv is {uv.freq_array}"
)
warn_compatibility_params = [
"telescope_name",
"telescope_location",
"antenna_names",
"antenna_numbers",
"antenna_positions",
"channel_width",
"spw_array",
]
if self.Nspws is not None and self.Nspws > 1:
# TODO: make this always be in the compatibility list in version 3.0
warn_compatibility_params.append("flex_spw_id_array")
for param in warn_compatibility_params:
if (
issubclass(uv.__class__, UVData)
and param == "channel_width"
and not (uv.future_array_shapes or uv.flex_spw)
):
if not np.allclose(
self.channel_width,
np.full(uv.Nfreqs, uv.channel_width),
rtol=self._channel_width.tols[0],
atol=self._channel_width.tols[1],
):
raise ValueError(
"channel_width is not the same this object and on uv. The "
f"value on this object is {self.channel_width}; the value on "
f"uv is {uv.channel_width}."
)
else:
# compare the UVParameter objects to properly handle tolerances
this_param = getattr(self, "_" + param)
uv_param = getattr(uv, "_" + param)
if this_param.value is not None and this_param != uv_param:
raise ValueError(
f"{param} is not the same this object and on uv. The value on "
f"this object is {this_param.value}; the value on uv is "
f"{uv_param.value}."
)
# Deal with polarization
if force_pol and self.polarization_array.size == 1:
# Use single pol for all pols, regardless
self.polarization_array = uv.polarization_array
# Broadcast arrays
if self.mode == "flag":
self.flag_array = self.flag_array.repeat(
self.polarization_array.size, axis=-1
)
else:
self.metric_array = self.metric_array.repeat(
self.polarization_array.size, axis=-1
)
self.weights_array = self.weights_array.repeat(
self.polarization_array.size, axis=-1
)
self.Npols = len(self.polarization_array)
self._check_pol_state()
# Now the pol axes should match regardless of force_pol.
if not np.array_equal(uv.polarization_array, self.polarization_array):
if self.polarization_array.size == 1:
raise ValueError(
"Polarizations do not match. Try keyword force_pol"
+ " if you wish to broadcast to all polarizations."
)
else:
raise ValueError("Polarizations could not be made to match.")
if self.type == "waterfall":
# Populate arrays
if self.mode == "flag":
if (
issubclass(uv.__class__, UVData)
and uv.future_array_shapes != self.future_array_shapes
):
if uv.future_array_shapes:
arr = np.zeros_like(uv.flag_array[:, np.newaxis, :, :])
else:
arr = np.zeros_like(uv.flag_array[:, 0, :, :])
else:
arr = np.zeros_like(uv.flag_array)
sarr = self.flag_array
elif self.mode == "metric":
if (
issubclass(uv.__class__, UVData)
and uv.future_array_shapes != self.future_array_shapes
):
if uv.future_array_shapes:
arr = np.zeros_like(
uv.flag_array[:, np.newaxis, :, :], dtype=np.float64
)
warr = np.zeros_like(
uv.flag_array[:, np.newaxis, :, :], dtype=np.float64
)
else:
arr = np.zeros_like(uv.flag_array[:, 0, :, :], dtype=np.float64)
warr = np.zeros_like(
uv.flag_array[:, 0, :, :], dtype=np.float64
)
else:
arr = np.zeros_like(uv.flag_array, dtype=np.float64)
warr = np.zeros_like(uv.flag_array, dtype=np.float64)
sarr = self.metric_array
for i, t in enumerate(np.unique(self.time_array)):
ti = np.where(
np.isclose(
uv.time_array,
t,
rtol=max(self._time_array.tols[0], uv._time_array.tols[0]),
atol=max(self._time_array.tols[1], uv._time_array.tols[1]),
)
)
if self.future_array_shapes:
arr[ti] = sarr[i][np.newaxis, :, :]
if self.mode == "metric":
warr[ti] = self.weights_array[i][np.newaxis, :, :]
else:
arr[ti] = sarr[i][np.newaxis, np.newaxis, :, :]
if self.mode == "metric":
warr[ti] = self.weights_array[i][np.newaxis, np.newaxis, :, :]
if self.mode == "flag":
self.flag_array = arr
elif self.mode == "metric":
self.metric_array = arr
self.weights_array = warr
elif self.type == "antenna":
if self.mode == "metric":
raise NotImplementedError(
"Cannot currently convert from antenna type, metric mode to "
"baseline type UVFlag object."
)
ants_data = np.unique(uv.ant_1_array.tolist() + uv.ant_2_array.tolist())
new_ants = np.setdiff1d(ants_data, self.ant_array)
if new_ants.size > 0:
self.ant_array = np.append(self.ant_array, new_ants).tolist()
# make new flags of the same shape but with first axis the
# size of the new ants
flag_shape = list(self.flag_array.shape)
flag_shape[0] = new_ants.size
new_flags = np.full(flag_shape, True, dtype=bool)
self.flag_array = np.append(self.flag_array, new_flags, axis=0)
if self.future_array_shapes:
baseline_flags = np.full(
(uv.Nblts, self.Nfreqs, self.Npols), True, dtype=bool
)
else:
baseline_flags = np.full(
(uv.Nblts, 1, self.Nfreqs, self.Npols), True, dtype=bool
)
for blt_index, bl in enumerate(uv.baseline_array):
uvf_t_index = np.nonzero(
np.isclose(
uv.time_array[blt_index],
self.time_array,
rtol=max(self._time_array.tols[0], uv._time_array.tols[0]),
atol=max(self._time_array.tols[1], uv._time_array.tols[1]),
)
)[0]
if uvf_t_index.size > 0:
# if the time is found in the uvflag object time_array
# input the or'ed data from each antenna
ant1, ant2 = uv.baseline_to_antnums(bl)
ant1_index = np.nonzero(np.array(self.ant_array) == ant1)
ant2_index = np.nonzero(np.array(self.ant_array) == ant2)
if self.future_array_shapes:
or_flag = np.logical_or(
self.flag_array[ant1_index, :, uvf_t_index, :],
self.flag_array[ant2_index, :, uvf_t_index, :],
)
else:
or_flag = np.logical_or(
self.flag_array[ant1_index, :, :, uvf_t_index, :],
self.flag_array[ant2_index, :, :, uvf_t_index, :],
)
baseline_flags[blt_index] = or_flag.copy()
self.flag_array = baseline_flags
# Check the frequency array for shape, broadcast to (1, Nfreqs) if needed
if not self.future_array_shapes:
self.freq_array = np.atleast_2d(self.freq_array)
if self.Nspws is None:
self.Nspws = uv.Nspws
self.spw_array = uv.spw_array
if uv.flex_spw_id_array is not None:
self.flex_spw_id_array = uv.flex_spw_id_array
self.baseline_array = uv.baseline_array
self.Nbls = np.unique(self.baseline_array).size
self.ant_1_array = uv.ant_1_array
self.ant_2_array = uv.ant_2_array
self.Nants_data = int(np.union1d(self.ant_1_array, self.ant_2_array).size)
self.time_array = uv.time_array
self.lst_array = uv.lst_array
self.Nblts = self.time_array.size
if self.telescope_name is None and self.telescope_location is None:
self.telescope_name = uv.telescope_name
self.telescope_location = uv.telescope_location
if (
self.antenna_numbers is None
and self.antenna_names is None
and self.antenna_positions is None
):
self.antenna_numbers = uv.antenna_numbers
self.antenna_names = uv.antenna_names
self.antenna_positions = uv.antenna_positions
self.Nants_telescope = uv.Nants_telescope
self._set_type_baseline()
self.clear_unused_attributes()
self.history += 'Broadcast to type "baseline". '
if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):
self.history += self.pyuvdata_version_str
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
def to_antenna(
self,
uv,
force_pol=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Convert a UVFlag object of type "waterfall" to type "antenna".
Broadcasts the flag array to all antennas.
This function does NOT apply flags to uv (see utils.apply_uvflag for that).
Parameters
----------
uv : UVCal or UVFlag object
object of type antenna to match.
force_pol : bool
If True, will use 1 pol to broadcast to any other pol.
Otherwise, will require polarizations match.
For example, this keyword is useful if one flags on all
pols combined, and wants to broadcast back to individual pols.
run_check : bool
Option to check for the existence and proper shapes of parameters
after converting to antenna type.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
converting to antenna type.
"""
if self.type == "antenna":
return
if not (
issubclass(uv.__class__, UVCal)
or (isinstance(uv, UVFlag) and uv.type == "antenna")
):
raise ValueError(
"Must pass in UVCal object or UVFlag object of type "
'"antenna" to match.'
)
if self.type != "waterfall":
raise ValueError(
'Cannot convert from type "' + self.type + '" to "antenna".'
)
# write it out this rather than comparing the UVParameters because
# future_array_shapes might be different. In the future, when shapes are not
# variable, this can be done by comparing the UVParameters.
if self.Nfreqs != uv.Nfreqs or not np.allclose(
np.squeeze(self.freq_array),
np.squeeze(uv.freq_array),
rtol=self._freq_array.tols[0],
atol=self._freq_array.tols[1],
):
raise ValueError(
"The freq_array on uv is not the same as the freq_array on this "
f"object. The value on this object is {self.freq_array}; the value "
f"on uv is {uv.freq_array}"
)
warn_compatibility_params = [
"telescope_name",
"telescope_location",
"antenna_names",
"antenna_numbers",
"antenna_positions",
"channel_width",
"spw_array",
]
if self.Nspws is not None and self.Nspws > 1:
# TODO: make this always be in the compatibility list in version 3.0
warn_compatibility_params.append("flex_spw_id_array")
for param in warn_compatibility_params:
if (
issubclass(uv.__class__, UVCal)
and param == "channel_width"
and not (uv.future_array_shapes or uv.flex_spw)
):
if not np.allclose(
self.channel_width,
np.full(uv.Nfreqs, uv.channel_width),
rtol=self._channel_width.tols[0],
atol=self._channel_width.tols[1],
):
raise ValueError(
"channel_width is not the same this object and on uv. The "
f"value on this object is {self.channel_width}; the value on "
f"uv is {uv.channel_width}."
)
else:
# compare the UVParameter objects to properly handle tolerances
this_param = getattr(self, "_" + param)
uv_param = getattr(uv, "_" + param)
if this_param.value is not None and this_param != uv_param:
raise ValueError(
f"{param} is not the same this object and on uv. The value on "
f"this object is {this_param.value}; the value on uv is "
f"{uv_param.value}."
)
# Deal with polarization
if issubclass(uv.__class__, UVCal):
polarr = uv.jones_array
else:
polarr = uv.polarization_array
if force_pol and self.polarization_array.size == 1:
# Use single pol for all pols, regardless
self.polarization_array = polarr
# Broadcast arrays
if self.mode == "flag":
self.flag_array = self.flag_array.repeat(
self.polarization_array.size, axis=-1
)
else:
self.metric_array = self.metric_array.repeat(
self.polarization_array.size, axis=-1
)
self.weights_array = self.weights_array.repeat(
self.polarization_array.size, axis=-1
)
self.Npols = len(self.polarization_array)
self._check_pol_state()
# Now the pol axes should match regardless of force_pol.
if not np.array_equal(polarr, self.polarization_array):
if self.polarization_array.size == 1:
raise ValueError(
"Polarizations do not match. Try keyword force_pol"
+ "if you wish to broadcast to all polarizations."
)
else:
raise ValueError("Polarizations could not be made to match.")
# Populate arrays
if self.mode == "flag":
if self.future_array_shapes:
self.flag_array = np.swapaxes(self.flag_array, 0, 1)[
np.newaxis, :, :, :
]
else:
self.flag_array = np.swapaxes(self.flag_array, 0, 1)[
np.newaxis, np.newaxis, :, :, :
]
self.flag_array = self.flag_array.repeat(len(uv.ant_array), axis=0)
elif self.mode == "metric":
if self.future_array_shapes:
self.metric_array = np.swapaxes(self.metric_array, 0, 1)[
np.newaxis, :, :, :
]
self.weights_array = np.swapaxes(self.weights_array, 0, 1)[
np.newaxis, :, :, :
]
else:
self.metric_array = np.swapaxes(self.metric_array, 0, 1)[
np.newaxis, np.newaxis, :, :, :
]
self.weights_array = np.swapaxes(self.weights_array, 0, 1)[
np.newaxis, np.newaxis, :, :, :
]
self.metric_array = self.metric_array.repeat(len(uv.ant_array), axis=0)
self.weights_array = self.weights_array.repeat(len(uv.ant_array), axis=0)
self.ant_array = uv.ant_array
self.Nants_data = len(uv.ant_array)
# Check the frequency array for Nspws, otherwise broadcast to 1,Nfreqs
if not self.future_array_shapes:
self.freq_array = np.atleast_2d(self.freq_array)
if self.telescope_name is None and self.telescope_location is None:
self.telescope_name = uv.telescope_name
self.telescope_location = uv.telescope_location
if (
self.antenna_numbers is None
and self.antenna_names is None
and self.antenna_positions is None
):
self.antenna_numbers = uv.antenna_numbers
self.antenna_names = uv.antenna_names
self.antenna_positions = uv.antenna_positions
self.Nants_telescope = uv.Nants_telescope
if self.Nspws is None:
self.Nspws = uv.Nspws
self.spw_array = uv.spw_array
if uv.flex_spw_id_array is not None:
self.flex_spw_id_array = uv.flex_spw_id_array
self._set_type_antenna()
self.history += 'Broadcast to type "antenna". '
if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):
self.history += self.pyuvdata_version_str
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
def to_flag(
self,
threshold=np.inf,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Convert to flag mode.
This function is NOT SMART. Removes metric_array and creates a
flag_array from a simple threshold on the metric values.
Parameters
----------
threshold : float
Metric value over which the corresponding flag is
set to True. Default is np.inf, which results in flags of all False.
run_check : bool
Option to check for the existence and proper shapes of parameters
after converting to flag mode.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
converting to flag mode.
"""
if self.mode == "flag":
return
elif self.mode == "metric":
self.flag_array = np.where(self.metric_array >= threshold, True, False)
self._set_mode_flag()
else:
raise ValueError(
"Unknown UVFlag mode: " + self.mode + ". Cannot convert to flag."
)
self.history += 'Converted to mode "flag". '
if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):
self.history += self.pyuvdata_version_str
self.clear_unused_attributes()
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
def to_metric(
self,
convert_wgts=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Convert to metric mode.
This function is NOT SMART. Simply recasts flag_array as float
and uses this as the metric array.
Parameters
----------
convert_wgts : bool
if True convert self.weights_array to ones
unless a column or row is completely flagged, in which case
convert those pixels to zero. This is used when reinterpretting
flags as metrics to calculate flag fraction. Zero weighting
completely flagged rows/columns prevents those from counting
against a threshold along the other dimension.
run_check : bool
Option to check for the existence and proper shapes of parameters
after converting to metric mode.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
converting to metric mode.
"""
if self.mode == "metric":
return
elif self.mode == "flag":
self.metric_array = self.flag_array.astype(np.float64)
self._set_mode_metric()
if convert_wgts:
self.weights_array = np.ones_like(self.weights_array)
if self.type == "waterfall":
for i in range(self.Npols):
self.weights_array[:, :, i] *= ~and_rows_cols(
self.flag_array[:, :, i]
)
elif self.type == "baseline":
for i in range(self.Npols):
for ap in self.get_antpairs():
inds = self.antpair2ind(*ap)
if self.future_array_shapes:
self.weights_array[inds, :, i] *= ~and_rows_cols(
self.flag_array[inds, :, i]
)
else:
self.weights_array[inds, 0, :, i] *= ~and_rows_cols(
self.flag_array[inds, 0, :, i]
)
elif self.type == "antenna":
for i in range(self.Npols):
for j in range(self.weights_array.shape[0]):
if self.future_array_shapes:
self.weights_array[j, :, :, i] *= ~and_rows_cols(
self.flag_array[j, :, :, i]
)
else:
self.weights_array[j, 0, :, :, i] *= ~and_rows_cols(
self.flag_array[j, 0, :, :, i]
)
else:
raise ValueError(
"Unknown UVFlag mode: " + self.mode + ". Cannot convert to metric."
)
self.history += 'Converted to mode "metric". '
if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):
self.history += self.pyuvdata_version_str
self.clear_unused_attributes()
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
def __add__(
self,
other,
inplace=False,
axis="time",
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Add two UVFlag objects together along a given axis.
Parameters
----------
other : UVFlag
object to combine with self.
axis : str
Axis along which to combine UVFlag objects.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining two objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining two objects.
inplace : bool
Option to perform the select directly on self or return a new UVData
object with just the selected data.
Returns
-------
uvf : UVFlag
If inplace==False, return new UVFlag object.
"""
# Handle in place
if inplace:
this = self
else:
this = self.copy()
# Check that objects are compatible
if not isinstance(other, this.__class__):
raise ValueError("Only UVFlag objects can be added to a UVFlag object")
if this.type != other.type:
raise ValueError(
"UVFlag object of type " + other.type + " cannot be "
"added to object of type " + this.type + "."
)
if this.mode != other.mode:
raise ValueError(
"UVFlag object of mode " + other.mode + " cannot be "
"added to object of mode " + this.type + "."
)
# check that both objects have the same array shapes
if this.future_array_shapes != other.future_array_shapes:
raise ValueError(
"Both objects must have the same `future_array_shapes` parameter. "
"Use the `use_future_array_shapes` or `use_current_array_shapes` "
"methods to convert them."
)
this_has_spw_id = this.flex_spw_id_array is not None
other_has_spw_id = other.flex_spw_id_array is not None
if this_has_spw_id != other_has_spw_id:
warnings.warn(
"One object has the flex_spw_id_array set and one does not. Combined "
"object will have it set."
)
# Update filename parameter
this.filename = uvutils._combine_filenames(this.filename, other.filename)
if this.filename is not None:
this._filename.form = (len(this.filename),)
# Simplify axis referencing
axis = axis.lower()
type_nums = {"waterfall": 0, "baseline": 1, "antenna": 2}
if self.future_array_shapes:
axis_nums = {
"time": [0, 0, 2],
"baseline": [None, 0, None],
"antenna": [None, None, 0],
"frequency": [1, 1, 1],
"polarization": [2, 2, 3],
"pol": [2, 2, 3],
"jones": [2, 2, 3],
}
else:
axis_nums = {
"time": [0, 0, 3],
"baseline": [None, 0, None],
"antenna": [None, None, 0],
"frequency": [1, 2, 2],
"polarization": [2, 3, 4],
"pol": [2, 3, 4],
"jones": [2, 3, 4],
}
if axis not in axis_nums.keys():
raise ValueError(f"Axis not recognized, must be one of {axis_nums.keys()}")
ax = axis_nums[axis][type_nums[self.type]]
warn_compatibility_params = ["telescope_name", "telescope_location"]
if axis != "frequency":
warn_compatibility_params.extend(
["freq_array", "channel_width", "spw_array"]
)
if self.flex_spw_id_array is not None:
# TODO: make this always be in the compatibility list in version 3.0
warn_compatibility_params.append("flex_spw_id_array")
if axis not in ["polarization", "pol", "jones"]:
warn_compatibility_params.extend(["polarization_array"])
if axis != "time":
warn_compatibility_params.extend(["time_array", "lst_array"])
if axis != "antenna" and self.type == "antenna":
warn_compatibility_params.extend(
["ant_array", "antenna_names", "antenna_numbers", "antenna_positions"]
)
if axis != "baseline" and self.type == "baseline":
warn_compatibility_params.extend(
[
"baseline_array",
"ant_1_array",
"ant_2_array",
"antenna_names",
"antenna_numbers",
"antenna_positions",
]
)
for param in warn_compatibility_params:
# compare the UVParameter objects to properly handle tolerances
this_param = getattr(self, "_" + param)
other_param = getattr(other, "_" + param)
if this_param.value is not None and this_param != other_param:
raise ValueError(
f"{param} is not the same the two objects. The value on this "
f"object is {this_param.value}; the value on the other object is "
f"{other_param.value}."
)
if axis == "time":
this.time_array = np.concatenate([this.time_array, other.time_array])
this.lst_array = np.concatenate([this.lst_array, other.lst_array])
if this.type == "baseline":
this.baseline_array = np.concatenate(
[this.baseline_array, other.baseline_array]
)
this.ant_1_array = np.concatenate([this.ant_1_array, other.ant_1_array])
this.ant_2_array = np.concatenate([this.ant_2_array, other.ant_2_array])
this.Nants_data = int(
np.union1d(this.ant_1_array, this.ant_2_array).size
)
this.Ntimes = np.unique(this.time_array).size
this.Nblts = len(this.time_array)
elif axis == "baseline":
if self.type != "baseline":
raise ValueError(
"Flag object of type " + self.type + " cannot be "
"concatenated along baseline axis."
)
this.time_array = np.concatenate([this.time_array, other.time_array])
this.lst_array = np.concatenate([this.lst_array, other.lst_array])
this.baseline_array = np.concatenate(
[this.baseline_array, other.baseline_array]
)
this.ant_1_array = np.concatenate([this.ant_1_array, other.ant_1_array])
this.ant_2_array = np.concatenate([this.ant_2_array, other.ant_2_array])
this.Nants_data = int(np.union1d(this.ant_1_array, this.ant_2_array).size)
this.Nbls = np.unique(this.baseline_array).size
this.Nblts = len(this.baseline_array)
elif axis == "antenna":
if self.type != "antenna":
raise ValueError(
"Flag object of type " + self.type + " cannot be "
"concatenated along antenna axis."
)
this.ant_array = np.concatenate([this.ant_array, other.ant_array])
this.Nants_data = len(this.ant_array)
temp_ant_nums = np.concatenate(
[this.antenna_numbers, other.antenna_numbers]
)
temp_ant_names = np.concatenate([this.antenna_names, other.antenna_names])
temp_ant_pos = np.concatenate(
[this.antenna_positions, other.antenna_positions], axis=0
)
this.antenna_numbers, unique_inds = np.unique(
temp_ant_nums, return_index=True
)
this.antenna_names = temp_ant_names[unique_inds]
this.antenna_positions = temp_ant_pos[unique_inds]
this.Nants_telescope = len(this.antenna_numbers)
elif axis == "frequency":
this.freq_array = np.concatenate(
[this.freq_array, other.freq_array], axis=-1
)
this.channel_width = np.concatenate(
[this.channel_width, other.channel_width]
)
# handle multiple spws
if this.Nspws > 1 or other.Nspws > 1 or this._spw_array != other._spw_array:
if this.flex_spw_id_array is None:
this.flex_spw_id_array = np.full(
this.Nfreqs, this.spw_array[0], dtype=int
)
if other.flex_spw_id_array is None:
other.flex_spw_id_array = np.full(
other.Nfreqs, other.spw_array[0], dtype=int
)
this.flex_spw_id_array = np.concatenate(
[this.flex_spw_id_array, other.flex_spw_id_array]
)
this.spw_array = np.concatenate([this.spw_array, other.spw_array])
# We want to preserve per-spw information based on first appearance
# in the concatenated array.
unique_index = np.sort(
np.unique(this.flex_spw_id_array, return_index=True)[1]
)
this.spw_array = this.flex_spw_id_array[unique_index]
this.Nspws = len(this.spw_array)
else:
if this_has_spw_id or other_has_spw_id:
this.flex_spw_id_array = np.full(
this.freq_array.size, this.spw_array[0], dtype=int
)
this.Nfreqs = np.unique(this.freq_array.flatten()).size
elif axis in ["polarization", "pol", "jones"]:
if this.pol_collapsed:
raise NotImplementedError(
"Two UVFlag objects with their "
"polarizations collapsed cannot be "
"added along the polarization axis "
"at this time."
)
this.polarization_array = np.concatenate(
[this.polarization_array, other.polarization_array]
)
this.Npols = len(this.polarization_array)
for attr in this._data_params:
# Check that 'other' also has the attribute filled
if getattr(other, attr) is not None:
setattr(
this,
attr,
np.concatenate(
[getattr(this, attr), getattr(other, attr)], axis=ax
),
)
# May 21, 2020 - should only happen for weights_square_array attr
else:
raise ValueError(
f"{attr} optional parameter is missing from second UVFlag"
f" object. To concatenate two {this.mode} objects, they"
" must both contain the same optional parameters set."
)
this.history += "Data combined along " + axis + " axis. "
if not uvutils._check_history_version(this.history, this.pyuvdata_version_str):
this.history += this.pyuvdata_version_str
this.Ntimes = np.unique(this.time_array).size
if run_check:
this.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
if not inplace:
return this
def __iadd__(
self,
other,
axis="time",
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""In place add.
Parameters
----------
other : UVFlag
object to combine with self.
axis : str
Axis along which to combine UVFlag objects.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining two objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining two objects.
"""
self.__add__(
other,
inplace=True,
axis=axis,
run_check=run_check,
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
return self
def __or__(
self,
other,
inplace=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Combine two UVFlag objects in "flag" mode by "OR"-ing their flags.
Parameters
----------
other : UVFlag
object to combine with self.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining two objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining two objects.
inplace : bool
Option to perform the select directly on self or return a new UVData
object with just the selected data.
Returns
-------
uvf : UVFlag
If inplace==False, return new UVFlag object.
"""
if (self.mode != "flag") or (other.mode != "flag"):
raise ValueError(
'UVFlag object must be in "flag" mode to use "or" function.'
)
# Handle in place
if inplace:
this = self
else:
this = self.copy()
this.flag_array += other.flag_array
if other.history not in this.history:
this.history += "Flags OR'd with: " + other.history
if not uvutils._check_history_version(this.history, this.pyuvdata_version_str):
this.history += this.pyuvdata_version_str
if run_check:
this.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
if not inplace:
return this
def __ior__(
self, other, run_check=True, check_extra=True, run_check_acceptability=True
):
"""Perform an inplace logical or.
Parameters
----------
other : UVFlag
object to combine with self.
run_check : bool
Option to check for the existence and proper shapes of parameters
after combining two objects.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
combining two objects.
"""
self.__or__(
other,
inplace=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
)
return self
def combine_metrics(
self,
others,
method="quadmean",
inplace=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Combine metric arrays between different UVFlag objects together.
Parameters
----------
others : UVFlag or list of UVFlags
Other UVFlag objects to combine metrics with this one.
method : str, {"quadmean", "absmean", "mean", "or", "and"}
Method to combine metrics.
inplace : bool, optional
Perform combination in place.
Returns
-------
uvf : UVFlag
If inplace==False, return new UVFlag object with combined metrics.
"""
# Ensure others is iterable (in case of single UVFlag object)
# cannot use uvutils._get_iterable because the object itself is iterable
if not isinstance(others, (list, tuple, np.ndarray)):
others = [others]
if np.any([not isinstance(other, UVFlag) for other in others]):
raise ValueError('"others" must be UVFlag or list of UVFlag objects')
if (self.mode != "metric") or np.any(
[other.mode != "metric" for other in others]
):
raise ValueError(
'UVFlag object and "others" must be in "metric" mode '
'to use "add_metrics" function.'
)
if inplace:
this = self
else:
this = self.copy()
method = method.lower()
darray = np.expand_dims(this.metric_array, 0)
warray = np.expand_dims(this.weights_array, 0)
for other in others:
if this.metric_array.shape != other.metric_array.shape:
raise ValueError("UVFlag metric array shapes do not match.")
darray = np.vstack([darray, np.expand_dims(other.metric_array, 0)])
warray = np.vstack([warray, np.expand_dims(other.weights_array, 0)])
darray, warray = uvutils.collapse(
darray, method, weights=warray, axis=0, return_weights=True
)
this.metric_array = darray
this.weights_array = warray
this.history += "Combined metric arrays. "
if not uvutils._check_history_version(this.history, this.pyuvdata_version_str):
this.history += this.pyuvdata_version_str
if run_check:
this.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
if not inplace:
return this
def _select_preprocess(
self,
antenna_nums,
ant_str,
bls,
frequencies,
freq_chans,
times,
polarizations,
blt_inds,
ant_inds,
):
"""Build up blt_inds, freq_inds, pol_inds and history_update_string for select.
Parameters
----------
antenna_nums : array_like of int, optional
The antennas numbers to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False).
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines
to keep in the object. For length-2 tuples, the ordering of the numbers
within the tuple does not matter. For length-3 tuples, the polarization
string is in the order of the two antennas. If length-3 tuples are
provided, `polarizations` must be None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to keep in the object. Can be 'auto', 'cross', 'all',
or combinations of antenna numbers and polarizations (e.g. '1',
'1_2', '1x_2y'). See tutorial for more examples of valid strings and
the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should
exist in the time_array.
polarizations : array_like of int or str, optional
The polarizations numbers to keep in the object, each value passed
here should exist in the polarization_array. If passing strings, the
canonical polarization strings (e.g. "xx", "rr") are supported and if the
`x_orientation` attribute is set, the physical dipole strings
(e.g. "nn", "ee") are also supported.
blt_inds : array_like of int, optional
The baseline-time indices to keep in the object. This is
not commonly used.
ant_inds : array_like of int, optional
The antenna indices to keep in the object. This is
not commonly used.
Returns
-------
blt_inds : list of int
list of baseline-time indices to keep. Can be None (to keep everything).
ant_inds : list of int
list of antenna number indices to keep. Can be None
(keep all; only valid for "antenna" mode).
freq_inds : list of int
list of frequency indices to keep. Can be None (to keep everything).
pol_inds : list of int
list of polarization indices to keep. Can be None (to keep everything).
history_update_string : str
string to append to the end of the history.
"""
# build up history string as we go
history_update_string = " Downselected to specific "
n_selects = 0
if self.type == "waterfall":
if antenna_nums is not None:
raise ValueError(
"Cannot select on antenna_nums with waterfall type "
"UVFlag objects."
)
if bls is not None:
raise ValueError(
"Cannot select on bls with waterfall type UVFlag objects."
)
if ant_str is not None:
if not (antenna_nums is None and bls is None and polarizations is None):
raise ValueError(
"Cannot provide ant_str with antenna_nums, bls, or polarizations."
)
else:
bls, polarizations = self.parse_ants(ant_str)
if bls is not None and len(bls) == 0:
raise ValueError(
f"There is no data matching ant_str={ant_str} in this object."
)
# Antennas, times and blt_inds all need to be combined into a set of
# blts indices to keep.
# test for blt_inds presence before adding inds from antennas & times
if blt_inds is not None:
blt_inds = uvutils._get_iterable(blt_inds)
if np.array(blt_inds).ndim > 1:
blt_inds = np.array(blt_inds).flatten()
if self.type == "baseline":
history_update_string += "baseline-times"
else:
history_update_string += "times"
n_selects += 1
if antenna_nums is not None:
antenna_nums = uvutils._get_iterable(antenna_nums)
if np.array(antenna_nums).ndim > 1:
antenna_nums = np.array(antenna_nums).flatten()
if n_selects > 0:
history_update_string += ", antennas"
else:
history_update_string += "antennas"
n_selects += 1
if self.type == "baseline":
inds1 = np.zeros(0, dtype=np.int64)
inds2 = np.zeros(0, dtype=np.int64)
for ant in antenna_nums:
if ant in self.ant_1_array or ant in self.ant_2_array:
wh1 = np.where(self.ant_1_array == ant)[0]
wh2 = np.where(self.ant_2_array == ant)[0]
if len(wh1) > 0:
inds1 = np.append(inds1, list(wh1))
if len(wh2) > 0:
inds2 = np.append(inds2, list(wh2))
else:
raise ValueError(
"Antenna number {a} is not present in the "
"ant_1_array or ant_2_array".format(a=ant)
)
ant_blt_inds = set(inds1).intersection(inds2)
if self.type == "antenna":
ant_blt_inds = None
ant_inds = np.zeros(0, dtype=np.int64)
for ant in antenna_nums:
if ant in self.ant_array:
wh = np.nonzero(self.ant_array == ant)[0]
if len(wh) > 0:
ant_inds = np.append(ant_inds, list(wh))
else:
raise ValueError(
"Antenna number {a} is not present in the "
"ant_array".format(a=ant)
)
else:
ant_blt_inds = None
if bls is not None:
if self.type != "baseline":
raise ValueError(
'Only "baseline" mode UVFlag objects may select'
" along the baseline axis"
)
if isinstance(bls, tuple) and (len(bls) == 2 or len(bls) == 3):
bls = [bls]
if not all(isinstance(item, tuple) for item in bls):
raise ValueError(
"bls must be a list of tuples of antenna numbers "
"(optionally with polarization)."
)
if not all(
[isinstance(item[0], (int, np.integer)) for item in bls]
+ [isinstance(item[1], (int, np.integer)) for item in bls]
):
raise ValueError(
"bls must be a list of tuples of integer antenna numbers "
"(optionally with polarization)."
)
if all(len(item) == 3 for item in bls):
if polarizations is not None:
raise ValueError(
"Cannot provide length-3 tuples and also specify polarizations."
)
if not all(isinstance(item[2], str) for item in bls):
raise ValueError(
"The third element in each bl must be a polarization string"
)
if n_selects > 0:
history_update_string += ", baselines"
else:
history_update_string += "baselines"
n_selects += 1
bls_blt_inds = np.zeros(0, dtype=np.int64)
bl_pols = set()
for bl in bls:
if not (bl[0] in self.ant_1_array or bl[0] in self.ant_2_array):
raise ValueError(
"Antenna number {a} is not present in the "
"ant_1_array or ant_2_array".format(a=bl[0])
)
if not (bl[1] in self.ant_1_array or bl[1] in self.ant_2_array):
raise ValueError(
"Antenna number {a} is not present in the "
"ant_1_array or ant_2_array".format(a=bl[1])
)
wh1 = np.where(
np.logical_and(self.ant_1_array == bl[0], self.ant_2_array == bl[1])
)[0]
wh2 = np.where(
np.logical_and(self.ant_1_array == bl[1], self.ant_2_array == bl[0])
)[0]
if len(wh1) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh1))
if len(bl) == 3:
bl_pols.add(bl[2])
elif len(wh2) > 0:
bls_blt_inds = np.append(bls_blt_inds, list(wh2))
if len(bl) == 3:
bl_pols.add(uvutils.conj_pol(bl[2]))
else:
raise ValueError(
"Antenna pair {p} does not have any data "
"associated with it.".format(p=bl)
)
if len(bl_pols) > 0:
polarizations = list(bl_pols)
if ant_blt_inds is not None:
# Use intersection (and) to join antenna_names/nums & ant_pairs_nums
ant_blt_inds = set(ant_blt_inds).intersection(bls_blt_inds)
else:
ant_blt_inds = bls_blt_inds
if ant_blt_inds is not None:
if blt_inds is not None:
# Use intersection (and) to join
# antenna_names/nums/ant_pairs_nums with blt_inds
blt_inds = set(blt_inds).intersection(ant_blt_inds)
else:
blt_inds = ant_blt_inds
if times is not None:
times = uvutils._get_iterable(times)
if np.array(times).ndim > 1:
times = np.array(times).flatten()
if n_selects > 0:
if (
self.type != "baseline" and "times" not in history_update_string
) or self.type == "baseline":
history_update_string += ", times"
else:
history_update_string += "times"
n_selects += 1
time_blt_inds = np.zeros(0, dtype=np.int64)
for jd in times:
if jd in self.time_array:
time_blt_inds = np.append(
time_blt_inds, np.where(self.time_array == jd)[0]
)
else:
raise ValueError(
"Time {t} is not present in the time_array".format(t=jd)
)
if blt_inds is not None:
# Use intesection (and) to join
# antenna_names/nums/ant_pairs_nums/blt_inds with times
blt_inds = set(blt_inds).intersection(time_blt_inds)
else:
blt_inds = time_blt_inds
if blt_inds is not None:
if len(blt_inds) == 0:
raise ValueError("No baseline-times were found that match criteria")
if self.type == "baseline":
compare_length = self.Nblts
else:
compare_length = self.Ntimes
if max(blt_inds) >= compare_length:
raise ValueError("blt_inds contains indices that are too large")
if min(blt_inds) < 0:
raise ValueError("blt_inds contains indices that are negative")
blt_inds = sorted(set(blt_inds))
if freq_chans is not None:
freq_chans = uvutils._get_iterable(freq_chans)
if np.array(freq_chans).ndim > 1:
freq_chans = np.array(freq_chans).flatten()
if frequencies is None:
frequencies = np.squeeze(self.freq_array)[freq_chans]
else:
frequencies = uvutils._get_iterable(frequencies)
frequencies = np.sort(
list(
set(frequencies) | set(np.squeeze(self.freq_array)[freq_chans])
)
)
if frequencies is not None:
frequencies = uvutils._get_iterable(frequencies)
if np.array(frequencies).ndim > 1:
frequencies = np.array(frequencies).flatten()
if n_selects > 0:
history_update_string += ", frequencies"
else:
history_update_string += "frequencies"
n_selects += 1
freq_inds = np.zeros(0, dtype=np.int64)
# this works because we only allow one SPW. This will have to be
# reworked when we support more.
if self.type != "waterfall" and not self.future_array_shapes:
freq_arr_use = self.freq_array[0, :]
else:
freq_arr_use = self.freq_array
for f in frequencies:
if f in freq_arr_use:
freq_inds = np.append(freq_inds, np.where(freq_arr_use == f)[0])
else:
raise ValueError(
"Frequency {f} is not present in the freq_array".format(f=f)
)
freq_inds = sorted(set(freq_inds))
else:
freq_inds = None
if polarizations is not None:
polarizations = uvutils._get_iterable(polarizations)
if np.array(polarizations).ndim > 1:
polarizations = np.array(polarizations).flatten()
if n_selects > 0:
history_update_string += ", polarizations"
else:
history_update_string += "polarizations"
n_selects += 1
pol_inds = np.zeros(0, dtype=np.int64)
for p in polarizations:
if isinstance(p, str):
p_num = uvutils.polstr2num(p, x_orientation=self.x_orientation)
else:
p_num = p
if p_num in self.polarization_array:
pol_inds = np.append(
pol_inds, np.where(self.polarization_array == p_num)[0]
)
else:
raise ValueError(
"Polarization {p} is not present in the "
"polarization_array".format(p=p)
)
pol_inds = sorted(set(pol_inds))
else:
pol_inds = None
history_update_string += " using pyuvdata."
return blt_inds, ant_inds, freq_inds, pol_inds, history_update_string
def _select_metadata(
self, blt_inds, ant_inds, freq_inds, pol_inds, history_update_string
):
"""Perform select on everything except the data-sized arrays.
Parameters
----------
blt_inds : list of int
list of baseline-time indices to keep. Can be None (to keep everything).
freq_inds : list of int
list of frequency indices to keep. Can be None (to keep everything).
pol_inds : list of int
list of polarization indices to keep. Can be None (to keep everything).
history_update_string : str
string to append to the end of the history.
keep_all_metadata : bool
Option to keep metadata for antennas that are no longer in the dataset.
"""
if blt_inds is not None:
if self.type == "baseline":
self.Nblts = len(blt_inds)
self.baseline_array = self.baseline_array[blt_inds]
self.Nbls = len(np.unique(self.baseline_array))
self.ant_1_array = self.ant_1_array[blt_inds]
self.ant_2_array = self.ant_2_array[blt_inds]
self.Nants_data = int(
np.union1d(self.ant_1_array, self.ant_2_array).size
)
self.time_array = self.time_array[blt_inds]
self.lst_array = self.lst_array[blt_inds]
self.Ntimes = len(np.unique(self.time_array))
if self.type == "antenna":
if ant_inds is not None:
self.ant_array = self.ant_array[ant_inds]
self.Nants_data = int(len(self.ant_array))
if freq_inds is not None:
self.Nfreqs = len(freq_inds)
if self.type != "waterfall" and not self.future_array_shapes:
self.freq_array = self.freq_array[:, freq_inds]
else:
self.freq_array = self.freq_array[freq_inds]
if self.channel_width is not None:
self.channel_width = self.channel_width[freq_inds]
if self.flex_spw_id_array is not None:
self.flex_spw_id_array = self.flex_spw_id_array[freq_inds]
if self.Nspws > 1:
self.spw_array = self.spw_array[
np.where(np.isin(self.spw_array, self.flex_spw_id_array))[0]
]
self.Nspws = self.spw_array.size
if pol_inds is not None:
self.Npols = len(pol_inds)
self.polarization_array = self.polarization_array[pol_inds]
self.history = self.history + history_update_string
def select(
self,
antenna_nums=None,
ant_inds=None,
bls=None,
ant_str=None,
frequencies=None,
freq_chans=None,
times=None,
polarizations=None,
blt_inds=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
inplace=True,
):
"""
Downselect data to keep on the object along various axes.
Axes that can be selected along depend on the current type of the object.
However some axis may always be selected upon, these include frequencies,
times and polarizations.
In "baseline" and "antenna" modes, antenna numbers may be selected.
In "baseline" mode, antenna pairs may be selected.
Specific baseline-time indices can also be selected in "baseline" mode,
but this is not commonly used.
The history attribute on the object will be updated to identify the
operations performed.
Parameters
----------
antenna_nums : array_like of int, optional
The antennas numbers to keep in the object (antenna positions and
names for the removed antennas will be retained unless
`keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of
baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines
to keep in the object. For length-2 tuples, the ordering of the numbers
within the tuple does not matter. For length-3 tuples, the polarization
string is in the order of the two antennas. If length-3 tuples are
provided, `polarizations` must be None.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to keep in the object. Can be 'auto', 'cross', 'all',
or combinations of antenna numbers and polarizations (e.g. '1',
'1_2', '1x_2y'). See tutorial for more examples of valid strings and
the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised.
frequencies : array_like of float, optional
The frequencies to keep in the object, each value passed here should
exist in the freq_array.
freq_chans : array_like of int, optional
The frequency channel numbers to keep in the object.
times : array_like of float, optional
The times to keep in the object, each value passed here should
exist in the time_array.
polarizations : array_like of int or str, optional
The polarizations numbers to keep in the object, each value passed
here should exist in the polarization_array. If passing strings, the
canonical polarization strings (e.g. "xx", "rr") are supported and if the
`x_orientation` attribute is set, the physical dipole strings
(e.g. "nn", "ee") are also supported.
blt_inds : array_like of int, optional
The baseline-time indices to keep in the object. This is
not commonly used.
ant_inds : array_like of int, optional
The antenna indices to keep in the object. This is
not commonly used.
run_check : bool
Option to check for the existence and proper shapes of parameters
after downselecting data on this object.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
downselecting data on this object.
inplace : bool
Option to perform the select directly on self or return a new UVData
object with just the selected data.
Returns
-------
UVData object or None
None is returned if inplace is True, otherwise a new UVData object
with just the selected data is returned
Raises
------
ValueError
If any of the parameters are set to inappropriate values.
"""
if inplace:
uv_object = self
else:
uv_object = self.copy()
(
blt_inds,
ant_inds,
freq_inds,
pol_inds,
history_update_string,
) = uv_object._select_preprocess(
antenna_nums=antenna_nums,
ant_str=ant_str,
bls=bls,
frequencies=frequencies,
freq_chans=freq_chans,
times=times,
polarizations=polarizations,
blt_inds=blt_inds,
ant_inds=ant_inds,
)
# do select operations on everything except data_array, flag_array
# and nsample_array
uv_object._select_metadata(
blt_inds, ant_inds, freq_inds, pol_inds, history_update_string
)
if blt_inds is not None:
if self.type == "baseline":
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[blt_inds])
if self.type == "waterfall":
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[blt_inds])
if self.type == "antenna":
if self.future_array_shapes:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, blt_inds, :])
else:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, :, blt_inds, :])
if ant_inds is not None and self.type == "antenna":
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[ant_inds])
if freq_inds is not None:
if self.type == "baseline":
if self.future_array_shapes:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, freq_inds, :])
else:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, freq_inds, :])
if self.type == "waterfall":
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, freq_inds, :])
if self.type == "antenna":
if self.future_array_shapes:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, freq_inds, :, :])
else:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, freq_inds, :, :])
if pol_inds is not None:
if self.type == "baseline":
if self.future_array_shapes:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, pol_inds])
else:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, :, pol_inds])
if self.type == "waterfall":
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, pol_inds])
if self.type == "antenna":
if self.future_array_shapes:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, :, pol_inds])
else:
for param_name, param in zip(
self._data_params, uv_object.data_like_parameters
):
setattr(uv_object, param_name, param[:, :, :, :, pol_inds])
# check if object is uv_object-consistent
if run_check:
uv_object.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
if not inplace:
return uv_object
def read(
self,
filename,
history="",
use_future_array_shapes=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Read in flag/metric data from a HDF5 file.
Parameters
----------
filename : str or pathlib.Path
The file name to read.
history : str
History string to append to UVFlag history attribute.
use_future_array_shapes : bool
Option to convert to the future planned array shapes before the changes go
into effect by removing the spectral window axis.
run_check : bool
Option to check for the existence and proper shapes of parameters
after reading data.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading data.
"""
# make sure we have an empty object.
self.__init__()
if isinstance(filename, (tuple, list)):
self.read(filename[0], use_future_array_shapes=use_future_array_shapes)
if len(filename) > 1:
for f in filename[1:]:
f2 = UVFlag(
f,
history=history,
use_future_array_shapes=use_future_array_shapes,
)
self += f2
del f2
else:
if not os.path.exists(filename):
raise IOError(filename + " not found.")
# update filename attribute
basename = os.path.basename(filename)
self.filename = [basename]
self._filename.form = (1,)
# Open file for reading
with h5py.File(filename, "r") as f:
header = f["/Header"]
self.type = header["type"][()].decode("utf8")
if self.type == "antenna":
self._set_type_antenna()
elif self.type == "baseline":
self._set_type_baseline()
elif self.type == "waterfall":
self._set_type_waterfall()
else:
raise ValueError(
"File cannot be read. Received type "
"parameter: {receive} but "
"must be within acceptable values: "
"{expect}".format(
receive=self.type,
expect=(", ").join(self._type.acceptable_vals),
)
)
self.mode = header["mode"][()].decode("utf8")
if self.mode == "metric":
self._set_mode_metric()
elif self.mode == "flag":
self._set_mode_flag()
else:
raise ValueError(
"File cannot be read. Received mode "
"parameter: {receive} but "
"must be within acceptable values: "
"{expect}".format(
receive=self.mode,
expect=(", ").join(self._mode.acceptable_vals),
)
)
if "x_orientation" in header.keys():
self.x_orientation = header["x_orientation"][()].decode("utf8")
self.time_array = header["time_array"][()]
if "Ntimes" in header.keys():
self.Ntimes = int(header["Ntimes"][()])
else:
self.Ntimes = np.unique(self.time_array).size
self.lst_array = header["lst_array"][()]
# read data arrays to figure out if the file has future shapes or not
future_shapes_ndim = {"antenna": 4, "baseline": 3}
dgrp = f["/Data"]
if self.mode == "metric":
self.metric_array = dgrp["metric_array"][()]
if self.type != "waterfall":
if self.metric_array.ndim == future_shapes_ndim[self.type]:
self._set_future_array_shapes()
self.weights_array = dgrp["weights_array"][()]
if "weights_square_array" in dgrp:
self.weights_square_array = dgrp["weights_square_array"][()]
elif self.mode == "flag":
self.flag_array = dgrp["flag_array"][()]
if self.type != "waterfall":
if self.flag_array.ndim == future_shapes_ndim[self.type]:
self._set_future_array_shapes()
self.freq_array = header["freq_array"][()]
# older save files will not have this spws axis
# at least_2d will preserve shape of 2d arrays and
# promote 1D to (1, Nfreqs)
if self.type != "waterfall" and not self.future_array_shapes:
self.freq_array = np.atleast_2d(self.freq_array)
elif self.freq_array.ndim > 1:
self.freq_array = np.squeeze(self.freq_array)
if "Nfreqs" in header.keys():
self.Nfreqs = int(header["Nfreqs"][()])
else:
self.Nfreqs = np.unique(self.freq_array).size
if "channel_width" in header.keys():
self.channel_width = header["channel_width"][()]
else:
# older files do not have the channel_width parameter. Guess it from
# the freq array spacing.
msg = (
"channel_width not available in file, computing it from the "
"freq_array spacing."
)
freq_delta = np.diff(np.squeeze(self.freq_array))
if uvutils._test_array_constant_spacing(
self.freq_array, tols=self._freq_array.tols
):
self.channel_width = np.full(self.Nfreqs, freq_delta[0])
else:
msg += (
" The freq_array does not have equal spacing, so the last "
"channel_width is set equal to the channel width below it."
)
self.channel_width = np.concatenate(
(freq_delta, np.array([freq_delta[-1]]))
)
warnings.warn(msg)
if "spw_array" in header.keys():
self.spw_array = header["spw_array"][()]
else:
self.spw_array = np.array([0])
if "Nspws" in header.keys():
self.Nspws = int(header["Nspws"][()])
else:
self.Nspws = self.spw_array.size
if "flex_spw_id_array" in header.keys():
self.flex_spw_id_array = header["flex_spw_id_array"][()]
elif self.Nspws == 1:
# set it by default
self.flex_spw_id_array = np.full(
self.Nfreqs, self.spw_array[0], dtype=int
)
if "telescope_name" in header.keys():
self.telescope_name = header["telescope_name"][()].decode("utf8")
if "telescope_location" in header.keys():
self.telescope_location = header["telescope_location"][()]
self.history = header["history"][()].decode("utf8")
self.history += history
if not uvutils._check_history_version(
self.history, self.pyuvdata_version_str
):
self.history += self.pyuvdata_version_str
# get extra_keywords
if "extra_keywords" in header.keys():
self.extra_keywords = {}
for key in header["extra_keywords"].keys():
if header["extra_keywords"][key].dtype.type in (
np.string_,
np.object_,
):
self.extra_keywords[key] = bytes(
header["extra_keywords"][key][()]
).decode("utf8")
else:
self.extra_keywords[key] = header["extra_keywords"][key][()]
else:
self.extra_keywords = {}
if "label" in header.keys():
self.label = header["label"][()].decode("utf8")
polarization_array = header["polarization_array"][()]
if isinstance(polarization_array[0], np.string_):
polarization_array = np.asarray(polarization_array, dtype=np.str_)
self.polarization_array = polarization_array
self._check_pol_state()
if "Npols" in header.keys():
self.Npols = int(header["Npols"][()])
else:
self.Npols = len(self.polarization_array)
if self.type == "baseline":
self.ant_1_array = header["ant_1_array"][()]
self.ant_2_array = header["ant_2_array"][()]
self.baseline_array = self.antnums_to_baseline(
self.ant_1_array, self.ant_2_array
)
if "Nblts" in header.keys():
self.Nblts = int(header["Nblts"][()])
else:
self.Nblts = len(self.baseline_array)
self.Nbls = np.unique(self.baseline_array).size
if "Nants_data" in header.keys():
self.Nants_data = int(header["Nants_data"][()])
n_ants_detected = int(
np.union1d(self.ant_1_array, self.ant_2_array).size
)
if self.Nants_data != n_ants_detected:
warnings.warn(
"Nants_data in file does not match number of antennas "
"with data. Resetting Nants_data."
)
self.Nants_data = n_ants_detected
else:
self.Nants_data = int(
np.union1d(self.ant_1_array, self.ant_2_array).size
)
elif self.type == "antenna":
self.ant_array = header["ant_array"][()]
if "Nants_data" in header.keys():
self.Nants_data = int(header["Nants_data"][()])
else:
self.Nants_data = len(self.ant_array)
if "Nants_telescope" in header.keys():
self.Nants_telescope = int(header["Nants_telescope"][()])
if "antenna_numbers" in header.keys():
self.antenna_numbers = header["antenna_numbers"][()]
if "antenna_names" in header.keys():
self.antenna_names = np.array(
[bytes(n).decode("utf8") for n in header["antenna_names"][:]]
)
if "antenna_positions" in header.keys():
self.antenna_positions = header["antenna_positions"][()]
if self.telescope_name is None:
warnings.warn(
"telescope_name not available in file, so telescope related "
"parameters cannot be set. This will result in errors when the "
"object is checked. To avoid the errors, use `run_check=False` "
"to turn off the check."
)
elif (
self.telescope_location is None
or self.antenna_numbers is None
or self.antenna_names is None
or self.antenna_positions is None
):
if (
self.antenna_numbers is None
and self.antenna_names is None
and self.antenna_positions is None
):
self.Nants_telescope = None
self.set_telescope_params()
if self.antenna_numbers is None and self.type in [
"baseline",
"antenna",
]:
msg = "antenna_numbers not in file"
if (
self.Nants_telescope is None
or self.Nants_telescope == self.Nants_data
):
if self.type == "baseline":
msg += ", setting based on ant_1_array and ant_2_array."
self.antenna_numbers = np.unique(
np.union1d(self.ant_1_array, self.ant_2_array)
)
else:
msg += ", setting based on ant_array."
self.antenna_numbers = np.unique(self.ant_array)
else:
msg += ", cannot be set based on "
if self.type == "baseline":
msg += "ant_1_array and ant_2_array"
else:
msg += "ant_array"
msg += (
" because Nants_telescope is greater than Nants_data. This "
"will result in errors when the object is checked. To "
"avoid the errors, use `run_check=False` to turn off the "
"check."
)
warnings.warn(msg)
if self.antenna_names is None and self.antenna_numbers is not None:
warnings.warn(
"antenna_names not in file, setting based on antenna_numbers"
)
self.antenna_names = self.antenna_numbers.astype(str)
if self.Nants_telescope is None:
if self.antenna_numbers is not None:
self.Nants_telescope = self.antenna_numbers.size
elif self.antenna_names is not None:
self.Nants_telescope = self.antenna_names.size
elif self.antenna_positions is not None:
self.Nants_telescope = (self.antenna_positions.shape)[0]
self.clear_unused_attributes()
if use_future_array_shapes != self.future_array_shapes:
if use_future_array_shapes:
self.use_future_array_shapes()
else:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="This method will be removed in version 3.0 when "
"the current array shapes are no longer supported.",
)
self.use_current_array_shapes()
if not use_future_array_shapes:
warnings.warn(_future_array_shapes_warning, DeprecationWarning)
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
)
def write(self, filename, clobber=False, data_compression="lzf"):
"""Write a UVFlag object to a hdf5 file.
Parameters
----------
filename : str
The file to write to.
clobber : bool
Option to overwrite the file if it already exists.
data_compression : str
HDF5 filter to apply when writing the data_array.
If no compression is wanted, set to None.
"""
if os.path.exists(filename):
if clobber:
print("File " + filename + " exists; clobbering")
else:
raise ValueError("File " + filename + " exists; skipping")
with h5py.File(filename, "w") as f:
header = f.create_group("Header")
# write out metadata
if self.future_array_shapes:
# this is Version 1.0
header["version"] = np.string_("1.0")
else:
header["version"] = np.string_("0.1")
header["type"] = np.string_(self.type)
header["mode"] = np.string_(self.mode)
if self.telescope_name is not None:
header["telescope_name"] = np.string_(self.telescope_name)
if self.telescope_location is not None:
header["telescope_location"] = self.telescope_location
header["Ntimes"] = self.Ntimes
header["time_array"] = self.time_array
header["lst_array"] = self.lst_array
header["freq_array"] = self.freq_array
header["Nfreqs"] = self.Nfreqs
header["channel_width"] = self.channel_width
header["Nspws"] = self.Nspws
header["spw_array"] = self.spw_array
if self.flex_spw_id_array is not None:
header["flex_spw_id_array"] = self.flex_spw_id_array
header["Npols"] = self.Npols
if self.x_orientation is not None:
header["x_orientation"] = np.string_(self.x_orientation)
if isinstance(self.polarization_array.item(0), str):
polarization_array = np.asarray(
self.polarization_array, dtype=np.string_
)
else:
polarization_array = self.polarization_array
header["polarization_array"] = polarization_array
if not uvutils._check_history_version(
self.history, self.pyuvdata_version_str
):
self.history += self.pyuvdata_version_str
# write out extra keywords if it exists and has elements
if self.extra_keywords:
extra_keywords = header.create_group(
"extra_keywords"
) # create spot in header
for k in self.extra_keywords.keys():
if isinstance(self.extra_keywords[k], str):
extra_keywords[k] = np.string_(self.extra_keywords[k])
else:
extra_keywords[k] = self.extra_keywords[k]
header["history"] = np.string_(self.history)
header["label"] = np.string_(self.label)
if self.type == "baseline":
header["Nblts"] = self.Nblts
header["ant_1_array"] = self.ant_1_array
header["ant_2_array"] = self.ant_2_array
header["Nants_data"] = self.Nants_data
elif self.type == "antenna":
header["ant_array"] = self.ant_array
header["Nants_data"] = self.Nants_data
header["Nants_telescope"] = self.Nants_telescope
if self.antenna_names is not None:
header["antenna_names"] = np.asarray(self.antenna_names, dtype="bytes")
if self.antenna_numbers is not None:
header["antenna_numbers"] = self.antenna_numbers
if self.antenna_positions is not None:
header["antenna_positions"] = self.antenna_positions
dgrp = f.create_group("Data")
if self.mode == "metric":
dgrp.create_dataset(
"metric_array",
chunks=True,
data=self.metric_array,
compression=data_compression,
)
dgrp.create_dataset(
"weights_array",
chunks=True,
data=self.weights_array,
compression=data_compression,
)
if self.weights_square_array is not None:
dgrp.create_dataset(
"weights_square_array",
chunks=True,
data=self.weights_square_array,
compression=data_compression,
)
elif self.mode == "flag":
dgrp.create_dataset(
"flag_array",
chunks=True,
data=self.flag_array,
compression=data_compression,
)
def from_uvdata(
self,
indata,
mode="metric",
copy_flags=False,
waterfall=False,
history="",
label="",
use_future_array_shapes=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Construct a UVFlag object from a UVData object.
Parameters
----------
indata : UVData
Input to initialize UVFlag object.
mode : {"metric", "flag"}, optional
The mode determines whether the object has a floating point metric_array
or a boolean flag_array.
copy_flags : bool, optional
Whether to copy flags from indata to new UVFlag object
waterfall : bool, optional
Whether to immediately initialize as a waterfall object, with flag/metric
axes: time, frequency, polarization.
history : str, optional
History string to attach to object.
label: str, optional
String used for labeling the object (e.g. 'FM').
use_future_array_shapes : bool
Option to convert to the future planned array shapes before the changes go
into effect by removing the spectral window axis.
run_check : bool
Option to check for the existence and proper shapes of parameters
after creating UVFlag object.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
creating UVFlag object.
"""
if not issubclass(indata.__class__, UVData):
raise ValueError(
"from_uvdata can only initialize a UVFlag object from an input "
"UVData object or a subclass of a UVData object."
)
if mode.lower() == "metric":
self._set_mode_metric()
elif mode.lower() == "flag":
self._set_mode_flag()
else:
raise ValueError(
"Input mode must be within acceptable values: "
"{}".format((", ").join(self._mode.acceptable_vals))
)
if use_future_array_shapes:
self._set_future_array_shapes()
self.Nfreqs = indata.Nfreqs
self.polarization_array = copy.deepcopy(indata.polarization_array)
self.Npols = indata.Npols
self.Nants_telescope = indata.Nants_telescope
self.Ntimes = indata.Ntimes
if indata.future_array_shapes or indata.flex_spw:
self.channel_width = copy.deepcopy(indata.channel_width)
else:
self.channel_width = np.full(self.Nfreqs, indata.channel_width)
self.telescope_name = indata.telescope_name
self.telescope_location = indata.telescope_location
self.antenna_names = copy.deepcopy(indata.antenna_names)
self.antenna_numbers = copy.deepcopy(indata.antenna_numbers)
self.antenna_positions = copy.deepcopy(indata.antenna_positions)
self.Nspws = indata.Nspws
self.spw_array = copy.deepcopy(indata.spw_array)
if indata.flex_spw_id_array is not None:
self.flex_spw_id_array = copy.deepcopy(indata.flex_spw_id_array)
if waterfall:
self._set_type_waterfall()
self.history += 'Flag object with type "waterfall" created. '
if not uvutils._check_history_version(
self.history, self.pyuvdata_version_str
):
self.history += self.pyuvdata_version_str
self.time_array, ri = np.unique(indata.time_array, return_index=True)
if indata.future_array_shapes:
self.freq_array = copy.deepcopy(indata.freq_array)
else:
self.freq_array = indata.freq_array[0, :]
self.lst_array = indata.lst_array[ri]
if copy_flags:
raise NotImplementedError(
"Cannot copy flags when initializing waterfall UVFlag from "
"UVData or UVCal."
)
else:
if self.mode == "flag":
self.flag_array = np.zeros(
(self.Ntimes, self.Nfreqs, self.Npols), np.bool_
)
elif self.mode == "metric":
self.metric_array = np.zeros((self.Ntimes, self.Nfreqs, self.Npols))
else:
self._set_type_baseline()
self.history += 'Flag object with type "baseline" created. '
if not uvutils._check_history_version(
self.history, self.pyuvdata_version_str
):
self.history += self.pyuvdata_version_str
self.baseline_array = copy.deepcopy(indata.baseline_array)
self.Nbls = indata.Nbls
self.Nblts = indata.Nblts
self.ant_1_array = copy.deepcopy(indata.ant_1_array)
self.ant_2_array = copy.deepcopy(indata.ant_2_array)
self.Nants_data = indata.Nants_data
self.time_array = copy.deepcopy(indata.time_array)
self.lst_array = copy.deepcopy(indata.lst_array)
if self.future_array_shapes == indata.future_array_shapes:
# match on future shape
self.freq_array = copy.deepcopy(indata.freq_array)
elif indata.future_array_shapes:
# input is future shaped, self is not
self.freq_array = indata.freq_array[np.newaxis, :]
else:
# input is not future shaped, self is
self.freq_array = indata.freq_array[0, :]
if copy_flags:
if self.future_array_shapes == indata.future_array_shapes:
self.flag_array = copy.deepcopy(indata.flag_array)
elif indata.future_array_shapes:
self.flag_array = indata.flag_array[:, np.newaxis, :, :]
else:
self.flag_array = indata.flag_array[:, 0, :, :]
self.history += (
" Flags copied from " + str(indata.__class__) + " object."
)
if self.mode == "metric":
warnings.warn(
'Copying flags to type=="baseline" results in mode=="flag".'
)
self._set_mode_flag()
else:
if self.future_array_shapes:
array_shape = (self.Nblts, self.Nfreqs, self.Npols)
else:
array_shape = (self.Nblts, 1, self.Nfreqs, self.Npols)
if self.mode == "flag":
self.flag_array = np.zeros(array_shape, dtype=np.bool_)
elif self.mode == "metric":
self.metric_array = np.zeros(array_shape, dtype=np.float64)
self.filename = indata.filename
self._filename.form = indata._filename.form
if indata.x_orientation is not None:
self.x_orientation = indata.x_orientation
if self.mode == "metric":
self.weights_array = np.ones(self.metric_array.shape)
if indata.extra_keywords is not None:
self.extra_keywords = copy.deepcopy(indata.extra_keywords)
if history not in self.history:
self.history += history
self.label += label
self.clear_unused_attributes()
if not use_future_array_shapes:
warnings.warn(_future_array_shapes_warning, DeprecationWarning)
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
return
def from_uvcal(
self,
indata,
mode="metric",
copy_flags=False,
waterfall=False,
history="",
label="",
use_future_array_shapes=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
):
"""Construct a UVFlag object from a UVCal object.
Parameters
----------
indata : UVData
Input to initialize UVFlag object.
mode : {"metric", "flag"}, optional
The mode determines whether the object has a floating point metric_array
or a boolean flag_array.
copy_flags : bool, optional
Whether to copy flags from indata to new UVFlag object
waterfall : bool, optional
Whether to immediately initialize as a waterfall object, with flag/metric
axes: time, frequency, polarization.
history : str, optional
History string to attach to object.
label: str, optional
String used for labeling the object (e.g. 'FM').
use_future_array_shapes : bool
Option to convert to the future planned array shapes before the changes go
into effect by removing the spectral window axis.
run_check : bool
Option to check for the existence and proper shapes of parameters
after creating UVFlag object.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
creating UVFlag object.
"""
if not issubclass(indata.__class__, UVCal):
raise ValueError(
"from_uvcal can only initialize a UVFlag object from an input "
"UVCal object or a subclass of a UVCal object."
)
if indata.wide_band:
raise ValueError(
"from_uvcal can only initialize a UVFlag object from a non-wide-band "
"UVCal object."
)
if mode.lower() == "metric":
self._set_mode_metric()
elif mode.lower() == "flag":
self._set_mode_flag()
else:
raise ValueError(
"Input mode must be within acceptable values: "
"{}".format((", ").join(self._mode.acceptable_vals))
)
if use_future_array_shapes:
self._set_future_array_shapes()
self.Nfreqs = indata.Nfreqs
self.polarization_array = copy.deepcopy(indata.jones_array)
self.Npols = indata.Njones
self.Nants_telescope = indata.Nants_telescope
self.Ntimes = indata.Ntimes
self.time_array = copy.deepcopy(indata.time_array)
self.lst_array = copy.deepcopy(indata.lst_array)
if indata.future_array_shapes or indata.flex_spw:
self.channel_width = copy.deepcopy(indata.channel_width)
else:
self.channel_width = np.full(self.Nfreqs, indata.channel_width)
self.telescope_name = indata.telescope_name
self.telescope_location = indata.telescope_location
self.antenna_names = copy.deepcopy(indata.antenna_names)
self.antenna_numbers = copy.deepcopy(indata.antenna_numbers)
self.antenna_positions = copy.deepcopy(indata.antenna_positions)
self.Nspws = indata.Nspws
self.spw_array = copy.deepcopy(indata.spw_array)
if indata.flex_spw_id_array is not None:
self.flex_spw_id_array = copy.deepcopy(indata.flex_spw_id_array)
if waterfall:
self._set_type_waterfall()
self.history += 'Flag object with type "waterfall" created. '
if not uvutils._check_history_version(
self.history, self.pyuvdata_version_str
):
self.history += self.pyuvdata_version_str
if indata.future_array_shapes:
self.freq_array = copy.deepcopy(indata.freq_array)
else:
self.freq_array = indata.freq_array[0, :]
if copy_flags:
raise NotImplementedError(
"Cannot copy flags when "
"initializing waterfall UVFlag "
"from UVData or UVCal."
)
else:
if self.mode == "flag":
self.flag_array = np.zeros(
(self.Ntimes, self.Nfreqs, self.Npols), np.bool_
)
elif self.mode == "metric":
self.metric_array = np.zeros((self.Ntimes, self.Nfreqs, self.Npols))
else:
self._set_type_antenna()
self.history += 'Flag object with type "antenna" created. '
if not uvutils._check_history_version(
self.history, self.pyuvdata_version_str
):
self.history += self.pyuvdata_version_str
self.ant_array = copy.deepcopy(indata.ant_array)
self.Nants_data = len(self.ant_array)
if self.future_array_shapes == indata.future_array_shapes:
# match on future shape
self.freq_array = copy.deepcopy(indata.freq_array)
elif indata.future_array_shapes:
# input is future shaped, self is not
self.freq_array = indata.freq_array[np.newaxis, :]
else:
# input is not future shaped, self is
self.freq_array = indata.freq_array[0, :]
if copy_flags:
if self.future_array_shapes == indata.future_array_shapes:
self.flag_array = copy.deepcopy(indata.flag_array)
elif indata.future_array_shapes:
self.flag_array = indata.flag_array[:, np.newaxis, :, :]
else:
self.flag_array = indata.flag_array[:, 0, :, :]
self.history += (
" Flags copied from " + str(indata.__class__) + " object."
)
if self.mode == "metric":
warnings.warn(
'Copying flags to type=="antenna" results in mode=="flag".'
)
self._set_mode_flag()
else:
if self.future_array_shapes:
array_shape = (
self.Nants_data,
self.Nfreqs,
self.Ntimes,
self.Npols,
)
else:
array_shape = (
self.Nants_data,
1,
self.Nfreqs,
self.Ntimes,
self.Npols,
)
if self.mode == "flag":
self.flag_array = np.zeros(array_shape, dtype=np.bool_)
elif self.mode == "metric":
self.metric_array = np.zeros(array_shape, dtype=np.float64)
if self.mode == "metric":
self.weights_array = np.ones(self.metric_array.shape)
self.filename = indata.filename
self._filename.form = indata._filename.form
if indata.x_orientation is not None:
self.x_orientation = indata.x_orientation
if history not in self.history:
self.history += history
self.label += label
self.clear_unused_attributes()
if not use_future_array_shapes:
warnings.warn(_future_array_shapes_warning, DeprecationWarning)
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
return
| true |
f206235eac078aa0894c71a9cab2ba1a0c815ed8 | Python | Nilutpal-Gogoi/DataStructures-Algorithms | /Recursion In Python/2. IterationVsRecursion/5. Search First Occurence Of A Number.py | UTF-8 | 896 | 4.34375 | 4 | [] | no_license | # Implement a function that takes an array "arr", a "testVariable" (containing the
# number to search) and "currentIndex" (containing the starting index) as parameters.
# This function should output the index of the first occurrence of testVariable in arr.
# If testVariable is not found in arr it should return -1.
# ITERATIVE METHOD
def firstOccurrence(arr, test_var, start_index):
for i in range(start_index, len(arr)):
if arr[i] == test_var:
return i
return -1
print(firstOccurrence([9, 8, 1, 8, 1, 7],1,3))
# RECURSIVE METHOD
def firstOccurrence_rec(arr, test_var, start_index):
if arr[start_index] == test_var:
return start_index
elif arr[start_index] != test_var and start_index == len(arr)-1:
return -1
else:
return firstOccurrence_rec(arr,test_var, start_index+1)
print(firstOccurrence_rec([9, 8, 1, 8, 1, 7],1,3)) | true |
5c7221192a98935ae0583c4ee279589dcae73539 | Python | sadfire/mafia_bot | /Tests/TimerTest.py | UTF-8 | 367 | 2.5625 | 3 | [] | no_license | import time
from GameView.Timer import Timer
def timer_test():
timer = Timer(seconds=10,
callback_process=lambda h: print(h, "Process"),
callback_stop=lambda h: print(h, "Stop"),
args=("World", "Condor"))
timer.start()
while True:
time.sleep(10)
if __name__ == "__main__":
timer_test() | true |
9e5b0148de6033e608c8e66f9aa8921d72d9206f | Python | abishekravi/guvipython | /pro24.py | UTF-8 | 281 | 2.703125 | 3 | [] | no_license | #a
ni=int(input())
n1=2**ni
list1=[]
for i in range(0,n1):
l=bin(i)[2:].zfill(ni)
if(len(l)<len(bin(2**ni-1)[2:])):
list1.append([l.count("1"),l])
else:
list1.append([l.count("1"),l])
list1.sort()
for i in range(len(list1)):
print(list1[i][1])
| true |
03353bd32fc76b41108d70e6c9841d6857ae0f64 | Python | fedebatti/Boxing-Atari-Deep-Reinforcement-Learning | /Reinforce/montecarlo.py | UTF-8 | 1,466 | 3.03125 | 3 | [] | no_license | import gym
from obs_preprocessing import observation_preprocessing
from reinforce_agent import reinforce_agent
#MonteCarlo Rollout implementation to play the full episode
def montecarlo_rollout(agent, env, training=True):
#Variables init
steps_list = []
reward_accumulator = 0
step_index = 0
done = False
#Env init
state = env.reset()
if training:
#Playing each step until the end of the episode to train the network
while not done:
previous_state = observation_preprocessing(state)
action = agent.play_one_step(previous_state)
# Take action
state, reward, done, info = env.step(action)
# print(reward)
reward_accumulator += reward
steps_list.append((step_index, previous_state, action, reward))
step_index += 1
print("Total episode reward: {}".format(reward_accumulator))
return steps_list, reward_accumulator
else:
#Playing each step until the end of the episode for simulation
while not done:
previous_state = observation_preprocessing(state)
action = agent.play_one_step_greedy(previous_state)
# Take action
state, reward, done, _ = env.step(action)
reward_accumulator += reward
steps_list.append((step_index, previous_state, action, reward))
step_index += 1
print("Total episode reward: {}".format(reward_accumulator))
return steps_list, reward_accumulator | true |
4c4313b3fb221c4875eb5d62cba5af7c0d70bc98 | Python | crystal80314/DMMT | /實習判官-A數值(每群組玩過的劇本數).py | UTF-8 | 1,255 | 2.890625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
####import json檔案
data="https://judicial-intern.dmmt.design/api/v1/group_story_ships?fbclid=IwAR0YSAzg_DpezpZSe-cp59oSHuC_yNtKLl-PetnwlLvHqx0p01_IudUrO_I"
raw_df= pd.read_json(data)
df= raw_df.loc[:,["group_id","story_id","created_at"]]
count_story_id= []
count_max_group_id=[]
count=0
line=len(df.index)-1
#A數值分子:計算story_id總數(每行)
while count<= line:
count_story_id.append(count)
count += 1
#A數值分母:計算group_id總數(每行)
count2=0
while count2<= line:
count_max_group_id.append(df.loc[0:count2,"group_id"].nunique())
count2 += 1
#計算每行的A值
df.loc[:,"count_story_id"]= count_story_id
df.loc[:,"count_max_group_id"]= count_max_group_id
# add array in df
df.loc[:,"num_of_story_per_group"]= df.loc[:,"count_story_id"]/df.loc[:,"count_max_group_id"]
df= df.loc[:,["created_at","num_of_story_per_group"]]
#### 繪圖 A值
x = df.loc[:,"created_at"].dt.date
y = df.loc[:,"num_of_story_per_group"]
plt.plot(x,y,color="r",label="num_of_story_per_group")
plt.ylabel('num_of_story_per_group')
plt.xlabel('created_at')
plt.title('num of story per group')
plt.show()
| true |
08bd4eb284b3114ed4166ff63f4d3dbaa922df6d | Python | JQmainblack/XOJ | /Crawler/HDU.py | UTF-8 | 2,991 | 2.734375 | 3 | [] | no_license | import urllib.request, urllib.parse
import http.cookiejar
from bs4 import BeautifulSoup
import re
class HDU:
def __init__(self):
self.index_url = 'http://acm.split.hdu.edu.cn/'
self.login_url = self.index_url + 'userloginex.php?action=login'
self.submit_url = self.index_url + 'submit.php?action=submit'
self.status_url = self.index_url + 'status.php'
self.problem_url = self.index_url + 'showproblem.php'
self.encoding = 'GB2312'
self.language = {
'G++': 0,
'GCC': 1,
'C++': 2,
'C': 3,
'PASCAL': 4,
'JAVA': 5,
'C#': 6
}
def login(self, username, password):
self.username = username
self.password = password
cookiejar = http.cookiejar.CookieJar()
handler = urllib.request.HTTPCookieProcessor(cookiejar)
self.opener = urllib.request.build_opener(handler)
data = {
'username': username,
'userpass': password
}
data = urllib.parse.urlencode(data).encode()
request = urllib.request.Request(self.login_url, data)
html = self.opener.open(request).read().decode(self.encoding)
if (html.find('Sign Out') != -1):
return True
else:
return False
def problem_list(number = 10):
pass
def problem_detail(problemid):
data = {
'pid': problemid,
}
url = self.problem_url + '?' + urllib.parse.urlencode(data)
html = self.opener.open(url).read().decode(self.encoding)
soup = BeautifulSoup(html, 'lxml')
problem = {}
pass
def submit(self, problemid, language, code):
data = {
'problemid': problemid,
'language': self.language[language.upper()],
'usercode': code
}
data = urllib.parse.urlencode(data).encode(self.encoding)
request = urllib.request.Request(self.submit_url, data)
html = self.opener.open(request).read().decode(self.encoding)
data = {
'pid': problemid,
'user': username
}
url = self.status_url + '?' + urllib.parse.urlencode(data)
html = self.opener.open(url).read().decode(self.encoding)
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table', class_ = 'table_text')
tr = table.find_all('tr')[1]
td = tr.find_all('td')[0]
return td.string
def status(self, runid):
data = {
'first': runid,
}
url = self.status_url + '?' + urllib.parse.urlencode(data)
html = self.opener.open(url).read().decode(self.encoding)
soup = BeautifulSoup(html, 'lxml')
table = soup.find('table', class_ = 'table_text')
tr = table.find_all('tr')[1]
td = tr.find_all('td')[2]
result = td.find('font').string
return result;
if __name__ == '__main__':
username = 'DaDaMr_X'
password = '199707161239x'
hdu = HDU()
if (hdu.login(username, password)):
print('Login Successfully!')
else:
print('Username or Passowrd is Wrong!')
problemid = '1000'
language = 'g++'
code = '''
#include <cstdio>
int main()
{
int a, b;
while (~scanf("%d%d", &a, &b))
printf("%d\\n", a + b);
return 0;
}
'''
runid = hdu.submit(problemid, language, code)
print(runid)
result = hdu.status(runid)
print(result)
| true |
8e91bb7491478516ab6cd8331ae82371dceabdb4 | Python | impatmcb/report-automation | /training_resource_management/extravars.py | UTF-8 | 2,632 | 3.015625 | 3 | [] | no_license | # Determine the date of the class
def classdate(number):
return (datetime.date.today() + datetime.timedelta(days=(number-datetime.date.today().weekday()))).strftime("%m/%d")
nextmon, nexttue, nextwed, nextthu, nextfri = classdate(7), classdate(8), classdate(9), classdate(10), classdate(11)
# Get coaches for each day based on priority
primarycoachmon = {}
secondarycoachmon = {}
thirdcoachmon = {}
primarycoachtues = {}
secondarycoachtues = {}
thirdcoachtues = {}
primarycoachwed = {}
secondarycoachwed = {}
thirdcoachwed = {}
primarycoachthur = {}
secondarycoachthur = {}
thirdcoachthur = {}
primarycoachfri= {}
secondarycoachfri = {}
thirdcoachfri = {}
with open('I:/signupsheet.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
datelist = row[2].split("/")
dateofclass = datetime.datetime.strptime(f"{datelist[0]}/{datelist[1]}", "%m/%d")
date = dateofclass.strftime("%m/%d")
if date == nextmon:
if row[6] in primarycoachmon:
if row[6] in secondarycoachmon:
thirdcoachmon[row[6]] = row[7].title()
else:
secondarycoachmon[row[6]] = row[7].title()
else:
primarycoachmon[row[6]] = row[7].title()
if date == nexttue:
if row[6] in primarycoachtues:
if row[6] in secondarycoachtues:
thirdcoachtues[row[6]] = row[7].title()
else:
secondarycoachtues[row[6]] = row[7].title()
else:
primarycoachtues[row[6]] = row[7].title()
if date == nextwed:
if row[6] in primarycoachwed:
if row[6] in secondarycoachwed:
thirdcoachwed[row[6]] = row[7].title()
else:
secondarycoachwed[row[6]] = row[7].title()
else:
primarycoachwed[row[6]] = row[7].title()
if date == nextthu:
if row[6] in primarycoachthur:
if row[6] in secondarycoachthur:
thirdcoachthur[row[6]] = row[7].title()
else:
secondarycoachthur[row[6]] = row[7].title()
else:
primarycoachthur[row[6]] = row[7].title()
if date == nextfri:
if row[6] in primarycoachfri:
if row[6] in secondarycoachfri:
thirdcoachfri[row[6]] = row[7].title()
else:
secondarycoachfri[row[6]] = row[7].title()
else:
primarycoachfri[row[6]] = row[7].title()
| true |
d54f10c3c7742e0df8fe9e3eec5cd5f1b6fddce1 | Python | gbtami/flexx | /flexx/app/funcs.py | UTF-8 | 21,511 | 2.65625 | 3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """
Functional API for flexx.app
"""
import os
import sys
import json
from .. import webruntime, config, set_log_level
from . import model, logger
from .model import Model
from .session import manager
from .assetstore import assets
from .tornadoserver import TornadoServer
from ..event import _loop
reprs = json.dumps
## Main loop functions
# There is always a single current server (except initially there is None)
_current_server = None
def create_server(host=None, port=None, new_loop=False, backend='tornado'):
"""
Create a new server object. This is automatically called; users generally
don't need this, unless they want to explicitly specify host/port,
create a fresh server in testing scenarios, or run Flexx in a thread.
Flexx uses a notion of a single current server object. This function
(re)creates that object. If there already was a server object, it is
replaced. It is an error to call this function if the current server
is still running.
Arguments:
host (str): The hostname to serve on. By default
``flexx.config.hostname`` is used. If ``False``, do not listen
(e.g. when integrating with an existing Tornado application).
port (int, str): The port number. If a string is given, it is
hashed to an ephemeral port number. By default
``flexx.config.port`` is used.
new_loop (bool): Whether to create a fresh Tornado IOLoop instance,
which is made current when ``start()`` is called. If ``False``
(default) will use the current IOLoop for this thread.
backend (str): Stub argument; only Tornado is currently supported.
Returns:
server: The server object, see ``current_server()``.
"""
global _current_server
if backend.lower() != 'tornado':
raise RuntimeError('Flexx server can only run on Tornado (for now).')
# Handle defaults
if host is None:
host = config.hostname
if port is None:
port = config.port
# Stop old server
if _current_server:
_current_server.close()
# Start hosting
_current_server = TornadoServer(host, port, new_loop)
# Schedule pending calls
_current_server.call_later(0, _loop.loop.iter)
while _pending_call_laters:
delay, callback, args, kwargs = _pending_call_laters.pop(0)
call_later(delay, callback, *args, **kwargs)
return _current_server
def current_server():
"""
Get the current server object. Creates a server if there is none.
Currently, this is always a TornadoServer object, which has properties:
* serving: a tuple ``(hostname, port)`` specifying the location
being served (or ``None`` if the server is closed).
* app: the ``tornado.web.Application`` instance
* loop: the ``tornado.ioloop.IOLoop`` instance
* server: the ``tornado.httpserver.HttpServer`` instance
"""
if not _current_server:
create_server()
return _current_server
def start():
"""
Start the server and event loop. This function generally does not
return until the application is stopped (although it may in
interactive environments (e.g. Pyzo)).
"""
server = current_server()
logger.info('Starting Flexx event loop.')
server.start()
def run():
"""
Start the event loop in desktop app mode; the server will close
down when there are no more connections.
"""
server = current_server()
server._auto_stop = True
return start()
def stop():
"""
Stop the event loop. This function is thread safe (it can be used
even if ``flexx.start()`` was called from another thread).
The server can be restarted after it has been stopped. Note that
calling ``stop()`` too often will cause a subsequent call to `start()``
to return almost immediately.
"""
server = current_server()
server.stop()
def call_later(delay, callback, *args, **kwargs):
"""
Schedule a function call in the current event loop. This function is
thread safe.
Arguments:
delay (float): the delay in seconds. If zero, the callback will
be executed in the next event loop iteration.
callback (callable): the function to call.
args: the positional arguments to call the callback with.
kwargs: the keyword arguments to call the callback with.
"""
if not _current_server:
_pending_call_laters.append((delay, callback, args, kwargs))
return
server = current_server()
server.call_later(delay, callback, *args, **kwargs)
# Work around circular dependency
model.call_later = call_later
_pending_call_laters = []
# Integrate the "event-loop" of flexx.event
_loop.loop.integrate(lambda f: call_later(0, f))
@manager.connect('connections_changed')
def _auto_closer(*events):
server = current_server()
if not getattr(server, '_auto_stop', False):
return
for name in manager.get_app_names():
proxies = manager.get_connections(name)
if proxies:
return
else:
logger.info('Stopping Flexx event loop.')
server.stop()
## App functions
def init_interactive(cls=None, runtime=None):
""" Initialize Flexx for interactive mode. This creates a default session
and launches a runtime to connect to it.
Parameters:
cls (None, Model): a subclass of ``app.Model`` (or ``ui.Widget``) to use
as the *default active model*. Only has effect the first time that
this function is called.
runtime (str): the runtime to launch the application in. Default 'xul'.
"""
# Determine default model class (which is a Widget if ui is imported)
if cls is None and 'flexx.ui' in sys.modules:
from .. import ui
cls = ui.Widget
# Create the default session
session = manager.get_default_session()
if session is None:
session = manager.create_default_session(cls)
else:
return # default session already running
# Launch web runtime, the server will wait for the connection
server = current_server()
host, port = server.serving
url = '%s:%i/%s/?session_id=%s' % (host, port, session.app_name, session.id)
session._runtime = launch('http://' + url, runtime=runtime)
class App:
""" Specification of a Flexx class.
In the strict sense, this is a container for a Model class plus the
args and kwargs that it is to be instantiated with.
Arguments:
cls (Model): the Model class (or Widget) that represents this app.
args: positional arguments used to instantiate the class (and received
in its ``init()`` method).
kwargs: keyword arguments used to initialize the model's properties.
"""
def __init__(self, cls, *args, **kwargs):
if not isinstance(cls, type) and issubclass(type, Model):
raise ValueError('App needs a Model class as its first argument.')
self._cls = cls
self.args = args
self.kwargs = kwargs
self._path = cls.__name__ # can be overloaded by serve()
self._is_served = False
def __call__(self, *args, **kwargs):
a = list(self.args) + list(args)
kw = {}
kw.update(self.kwargs)
kw.update(kwargs)
return self.cls(*a, **kw)
def __repr__(self):
t = '<App based on class %s pre-initialized with %i args and %i kwargs>'
return t % (self.cls.__name__, len(self.args), len(self.kwargs))
@property
def cls(self):
""" The Model class that is the basis of this app.
"""
return self._cls
@property
def is_served(self):
""" Whether this app is already registered by the app manager.
"""
return self._is_served
@property
def url(self):
""" The url to acces this app. This raises an error if serve() has not
been called yet or if Flexx' server is not yet running.
"""
if not self._is_served:
raise RuntimeError('Cannot determine app url if app is not yet "served".')
elif not (_current_server and _current_server.serving):
raise RuntimeError('Cannot determine app url if the server is not '
'yet running.')
else:
host, port = _current_server.serving
return 'http://%s:%i/%s/' % (host, port, self._path)
@property
def name(self):
""" The name of the app, i.e. the url path that this app is served at.
"""
return self._path or '__main__'
def serve(self, name=None):
""" Start serving this app.
This registers the given class with the internal app manager. The
app can be loaded via 'http://hostname:port/app_name'.
Arguments:
name (str, optional): the relative URL path to serve the app on.
If this is ``''`` (the empty string), this will be the main app.
"""
# Note: this talks to the manager; it has nothing to do with the server
if self._is_served:
raise RuntimeError('This app (%s) is already served.' % self.name)
if name is not None:
self._path = name
manager.register_app(self)
self._is_served = True
def launch(self, runtime=None, **runtime_kwargs):
""" Launch this app as a desktop app in the given runtime.
Arguments:
runtime (str): the runtime to launch the application in. Default 'xul'.
runtime_kwargs: kwargs to pass to the ``webruntime.launch`` function.
Returns:
app (Model): an instance of the given class.
"""
# Create session
if not self._is_served:
self.serve()
session = manager.create_session(self.name)
# Launch web runtime, the server will wait for the connection
current_server() # creates server if it did not yet exist
if runtime == 'nodejs':
js_assets, _ = session.get_assets_in_order()
all_js = '\n\n'.join([asset.to_string() for asset in js_assets])
session._runtime = launch(self.url, runtime=runtime, code=all_js)
else:
url = self.url + '?session_id=%s' % session.id
session._runtime = webruntime.launch(url,
runtime=runtime,
**runtime_kwargs)
return session.app
def export(self, filename=None, link=None, write_shared=True):
""" Export the given Model class to an HTML document.
Arguments:
filename (str, optional): Path to write the HTML document to.
If not given or None, will return the html as a string.
link (int): whether to link assets or embed them:
* 0: all assets are embedded.
* 1: normal assets are embedded, remote assets remain remote.
* 2: all assets are linked (as separate files).
* 3: (default) normal assets are linked, remote assets remain remote.
write_shared (bool): if True (default) will also write shared assets
when linking to assets. This can be set to False when
exporting multiple apps to the same location. The shared assets can
then be exported last using ``app.assets.export(dirname)``.
Returns:
html (str): The resulting html. If a filename was specified
this returns None.
Notes:
If the given filename ends with .hta, a Windows HTML Application is
created.
"""
# Prepare name, based on exported file name (instead of cls.__name__)
if not self._is_served:
name = os.path.basename(filename).split('.')[0]
name = name.replace('-', '_').replace(' ', '_')
self.serve(name)
# Create session with id equal to the app name. This would not be strictly
# necessary to make exports work, but it makes sure that exporting twice
# generates the exact same thing (no randomly generated dir names).
session = manager.create_session(self.name, self.name)
# Make fake connection using exporter object
exporter = ExporterWebSocketDummy()
manager.connect_client(exporter, session.app_name, session.id)
# Clean up again - NO keep in memory to ensure two sessions dont get same id
# manager.disconnect_client(session)
# Warn if this app has data and is meant to be run standalone
if (not link) and session.get_data_names():
logger.warn('Exporting a standalone app, but it has registered data.')
# Get HTML - this may be good enough
html = session.get_page_for_export(exporter._commands, link)
if filename is None:
return html
elif filename.lower().endswith('.hta'):
hta_tag = '<meta http-equiv="x-ua-compatible" content="ie=edge" />'
html = html.replace('<head>', '<head>\n ' + hta_tag, 1)
elif not filename.lower().endswith(('.html', 'htm')):
raise ValueError('Invalid extension for exporting to %r' %
os.path.basename(filename))
# Save to file. If standalone, all assets will be included in the main html
# file, if not, we need to export shared assets and session assets too.
filename = os.path.abspath(os.path.expanduser(filename))
if link:
if write_shared:
assets.export(os.path.dirname(filename))
session._export(os.path.dirname(filename))
with open(filename, 'wb') as f:
f.write(html.encode())
app_type = 'standalone app' if link else 'app'
logger.info('Exported %s to %r' % (app_type, filename))
class NoteBookHelper:
""" Object that captures commands send to the websocket during the
execution of a cell, and then applies these commands using a script
node. This way, Flexx widgets keep working in the exported notebook.
"""
close_code = None
def __init__(self, session):
self._session = session
self._real_ws = None
self._commands = []
self.enable()
def enable(self):
from IPython import get_ipython
ip = get_ipython()
ip.events.register('pre_execute', self.capture)
ip.events.register('post_execute', self.release)
def capture(self):
if self._real_ws is not None:
logger.warn('Notebookhelper already is in capture mode.')
else:
assert self._session._ws is not None
self._real_ws = self._session._ws
self._session._ws = self
def release(self):
if self._session._ws is self:
self._session._ws = self._real_ws
self._real_ws = None
if self._commands:
from IPython.display import display, Javascript
commands = ['flexx.command(%s);' % reprs(msg) for msg in self._commands]
self._commands = []
display(Javascript('\n'.join(commands)))
def command(self, msg):
self._commands.append(msg)
@property
def ping_counter(self):
if self._session._ws is self:
return self._real_ws.ping_counter
else:
return self._session._ws.ping_counter
def init_notebook():
""" Initialize the Jupyter notebook by injecting the necessary CSS
and JS into the browser. Note that any Flexx-based libraries that
you plan to use should probably be imported *before* calling this.
"""
# Note: not using IPython Comm objects yet, since they seem rather
# undocumented and I could not get them to work when I tried for a bit.
# This means though, that flexx in the notebook only works on localhost.
from IPython.display import display, clear_output, HTML
# from .. import ui # noqa - make ui assets available
# Make default log level warning instead of "info" to avoid spamming
# This preserves the log level set by the user
config.load_from_string('log_level = warning', 'init_notebook')
set_log_level(config.log_level)
# Get session or create new
session = manager.get_default_session()
if session is None:
session = manager.create_default_session()
# Open server - the notebook helper takes care of the JS resulting
# from running a cell, but any interaction goes over the websocket.
server = current_server()
host, port = server.serving
# Trigger loading phosphor assets
if 'flexx.ui' in sys.modules:
from flexx import ui
session.register_model_class(ui.Widget)
# Get assets, load all known modules to prevent dynamic loading as much as possible
js_assets, css_assets = session.get_assets_in_order(css_reset=False, load_all=True)
# Pop the first JS asset that sets flexx.app_name and flexx.session_id
# We set these in a way that it does not end up in exported notebook.
js_assets.pop(0)
url = 'ws://%s:%i/flexx/ws/%s' % (host, port, session.app_name)
flexx_pre_init = """<script>window.flexx = window.flexx || {};
window.flexx.app_name = "%s";
window.flexx.session_id = "%s";
window.flexx.ws_url = "%s";
window.flexx.is_live_notebook = true;
</script>""" % (session.app_name, session.id, url)
# Check if already loaded, if so, re-connect
if not getattr(session, 'init_notebook_done', False):
session.init_notebook_done = True # also used in assetstore
else:
display(HTML(flexx_pre_init))
clear_output()
display(HTML("""<script>
flexx.is_exported = !flexx.is_live_notebook;
flexx.init();
</script>
<i>Flexx already loaded. Reconnected.</i>
"""))
return # Don't inject Flexx twice
# Note that exporting will not work anymore since out assets
# are no longer in the outputs
# Install helper to make things work in exported notebooks
NoteBookHelper(session)
# Compose HTML to inject
t = "<i>Injecting Flexx JS and CSS</i>"
t += '\n\n'.join([asset.to_html('{}', 0) for asset in css_assets + js_assets])
t += """<script>
flexx.is_notebook = true;
flexx.is_exported = !flexx.is_live_notebook;
/* If Phosphor is already loaded, disable our Phosphor CSS. */
if (window.jupyter && window.jupyter.lab) {
document.getElementById('phosphor-all.css').disabled = true;
}
flexx.init();
</script>"""
display(HTML(flexx_pre_init)) # Create initial Flexx info dict
clear_output() # Make sure the info dict is gone in exported notebooks
display(HTML(t))
# Note: the Widget._repr_html_() method is responsible for making
# the widget show up in the notebook output area.
# todo: deprecate these
def serve(cls, name=None, properties=None):
""" Backwards compat.
"""
# Note: this talks to the manager; it has nothing to do with the server
assert (isinstance(cls, type) and issubclass(cls, Model))
a = App(cls, **(properties or {}))
a.serve(name)
return cls
def launch(cls, runtime=None, properties=None, **runtime_kwargs):
""" Backwards compat.
"""
if isinstance(cls, str):
return webruntime.launch(cls, runtime, **runtime_kwargs)
assert (isinstance(cls, type) and issubclass(cls, Model))
a = App(cls, **(properties or {}))
return a.launch(runtime, **runtime_kwargs)
def export(cls, filename=None, properties=None, single=None, link=None,
write_shared=True):
""" Backward compat.
"""
if not (isinstance(cls, type) and issubclass(cls, Model)):
raise ValueError('runtime must be a string or Model subclass.')
# Backward comp - deprecate "single" argument at some point
if link is None:
if single is not None:
logger.warn('Export single arg is deprecated, use link instead.')
if not single:
link = 3
link = int(link or 0)
a = App(cls, **(properties or {}))
return a.export(filename, link=link, write_shared=write_shared)
class ExporterWebSocketDummy:
""" Object that can be used by an app inplace of the websocket to
export apps to standalone HTML. The object tracks the commands send
by the app, so that these can be re-played in the exported document.
"""
close_code = None
def __init__(self):
self._commands = []
self.ping_counter = 0
# todo: make icon and title work
#self.command('ICON %s.ico' % session.id)
# self.command('TITLE %s' % session._runtime_kwargs.get('title',
# 'Exported flexx app'))
def command(self, cmd):
self._commands.append(cmd)
| true |
28ebb6a230ece56b4c3ed0de11466de605e43f5c | Python | ichko/differentiable-simulation | /notebooks/pong_gym/data.py | UTF-8 | 1,048 | 2.984375 | 3 | [] | no_license | import random
import numpy as np
import gym
def get_single_sequence(seq_len):
env = gym.make('PongDeterministic-v4')
env.reset()
env.seed(0)
actions, observations, rewards, done = [], [], [], []
for _ in range(seq_len):
# stay, up, down
action = random.choice([1, 2, 3]) # env.action_space.sample()
observation, reward, d, _info = env.step(action)
one_hot_action = (np.array([1, 2, 3]) == action).astype(int)
actions.append(one_hot_action)
observations.append(observation)
rewards.append(reward)
done.append(d)
return (np.array(actions),), (
# Extract only one color channel
(np.array(observations) / 255)[:, :, :, 1],
np.array(rewards).reshape(-1, 1),
np.array(done).reshape(-1, 1)
)
def env_sequences_generator(seq_len):
while True:
yield get_single_sequence(seq_len)
if __name__ == '__main__':
gen = env_sequences_generator(8)
input, output = next(gen)
print([o.shape for o in output])
| true |
d390291804915f3511e2cea8838a104aa7cad27d | Python | Sam-ONeill/ComputerVisionTheBasics | /ISL-Detection/Hand-Detection-master/Fingers.py | UTF-8 | 11,902 | 2.578125 | 3 | [] | no_license | from Detector import HandDetector
import cv2
import math
import numpy as np
handDetector = HandDetector(min_detection_confidence=0.7)
webcamFeed = cv2.VideoCapture(0)
while True:
status, image = webcamFeed.read()
handLandmarks = handDetector.findHandLandMarks(image=image, draw=True)
count = 0
letter = "?"
if (len(handLandmarks) != 0):
Pinky_Tip_H = handLandmarks[20][2]
Pinky_Dip_H = handLandmarks[19][2]
Pinky_Pip_H = handLandmarks[18][2]
Pinky_MCP_H = handLandmarks[17][2]
Ring_Tip_H = handLandmarks[16][2]
Ring_Dip_H = handLandmarks[15][2]
Ring_Pip_H = handLandmarks[14][2]
Ring_MCP_H = handLandmarks[13][2]
Middle_Tip_H = handLandmarks[12][2]
Middle_Dip_H = handLandmarks[11][2]
Middle_Pip_H = handLandmarks[10][2]
Middle_MCP_H = handLandmarks[9][2]
Index_Tip_H = handLandmarks[8][2]
Index_Dip_H = handLandmarks[7][2]
Index_Pip_H = handLandmarks[6][2]
Index_MCP_H = handLandmarks[5][2]
Thumb_Tip_H = handLandmarks[4][2]
Thumb_Dip_H = handLandmarks[3][2]
Thumb_Pip_H = handLandmarks[2][2]
Thumb_MCP_H = handLandmarks[1][2]
Wrist_H = handLandmarks[0][2]
Pinky_Tip_W = handLandmarks[20][1]
Pinky_Dip_W = handLandmarks[19][1]
Pinky_Pip_W = handLandmarks[18][1]
Pinky_MCP_W = handLandmarks[17][1]
Ring_Tip_W = handLandmarks[16][1]
Ring_Dip_W = handLandmarks[15][1]
Ring_Pip_W = handLandmarks[14][1]
Ring_MCP_W = handLandmarks[13][1]
Middle_Tip_W = handLandmarks[12][1]
Middle_Dip_W = handLandmarks[11][1]
Middle_Pip_W = handLandmarks[10][1]
Middle_MCP_W = handLandmarks[9][1]
Index_Tip_W = handLandmarks[8][1]
Index_Dip_W = handLandmarks[7][1]
Index_Pip_W = handLandmarks[6][1]
Index_MCP_W = handLandmarks[5][1]
Thumb_Tip_W = handLandmarks[4][1]
Thumb_Dip_W = handLandmarks[3][1]
Thumb_Pip_W = handLandmarks[2][1]
Thumb_MCP_W = handLandmarks[1][1]
Wrist_W = handLandmarks[0][1]
# we will get y coordinate of finger-tip and check if it lies above middle landmark of that finger
# details: https://google.github.io/mediapipe/solutions/hands
# if handLandmarks[4][3] == "Right" and handLandmarks[4][1] > handLandmarks[3][1]: # Right Thumb
# count = count + 1
# elif handLandmarks[4][3] == "Left" and handLandmarks[4][1] < handLandmarks[3][1]: # Left Thumb
# count = count + 1
# if handLandmarks[8][2] < handLandmarks[6][2]: # Index finger
# count = count + 1
# if handLandmarks[12][2] < handLandmarks[10][2]: # Middle finger
# count = count + 1
# if handLandmarks[16][2] < handLandmarks[14][2]: # Ring finger
# count = count + 1
# if handLandmarks[20][2] < handLandmarks[18][2]: # Little finger
# count = count + 1
if Index_Tip_H > Index_Pip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H and Thumb_Tip_H < Index_Pip_H:
letter = "A"
if Index_Tip_H < Index_Pip_H and Middle_Tip_H < Middle_Pip_H and \
Ring_Tip_H < Ring_Pip_H and Pinky_Tip_H < Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "B"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "B"
if Index_Dip_H + 30 >= Index_Tip_H >= Index_Dip_H - 30 and Middle_Dip_H + 30 >= Middle_Tip_H >= Middle_Dip_H - 30 and Ring_Dip_H + 30 >= Ring_Tip_H >= Ring_Dip_H - 30:
if handLandmarks[4][3] == "Left" and Thumb_Tip_W < Thumb_Dip_W and Index_Tip_H <= Thumb_Tip_H:
letter = "C"
elif handLandmarks[4][3] == "Right" and Thumb_Tip_W > Thumb_Dip_W and Index_Tip_H <= Thumb_Tip_H:
letter = "C"
if Index_Tip_H < Index_Pip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "D"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "D"
if Index_Tip_H > Index_Pip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H and Middle_Tip_H + 20 >= Thumb_Tip_H >= Middle_Tip_H - 20:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "E"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "E"
if Index_Tip_H > Middle_Dip_H and Middle_Tip_H < Middle_Pip_H and Ring_Tip_H < Ring_Pip_H and Pinky_Tip_H < Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Index_Tip_H - 50 <= Thumb_Tip_H <= Index_Tip_H:
letter = "F"
elif handLandmarks[4][3] == "Right" and Index_Pip_W + 50 >= Thumb_Tip_W >= Index_Pip_W:
letter = "F"
if Index_Tip_H > Middle_Dip_H and Middle_Tip_H < Middle_Pip_H and Ring_Tip_H < Ring_Pip_H and Pinky_Tip_H < Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Index_Pip_W - 50 <= Thumb_Tip_W <= Index_Pip_W:
letter = "G"
elif handLandmarks[4][3] == "Right" and Index_Tip_H + 50 >= Thumb_Tip_H >= Index_Tip_H:
letter = "G"
if Index_Tip_H < Middle_Dip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H < Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "H"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "H"
if Index_Tip_H > Middle_Dip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H < Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "I"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "I"
if Index_Pip_H < Pinky_Pip_H: #and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Wrist_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "J"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "J"
if Index_Tip_H < Middle_Dip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H < Ring_Pip_H and Pinky_Tip_H < Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Middle_MCP_W:
letter = "K"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Middle_MCP_W:
letter = "K"
if Index_Tip_H < Middle_Dip_H and Middle_Tip_H < Middle_Pip_H and Ring_Tip_H < Ring_Pip_H and Pinky_Tip_H < Pinky_Pip_H:
if handLandmarks[4][3] == "Right" and handLandmarks[4][1] > handLandmarks[3][1]: # Right Thumb
letter = "L"
elif handLandmarks[4][3] == "Left" and handLandmarks[4][1] < handLandmarks[3][1]: # Left Thumb
letter = "L"
if Index_Tip_H > Wrist_H and Middle_Tip_H > Wrist_H and Ring_Tip_H > Ring_Dip_H and Pinky_Dip_H < Index_Dip_H:
letter = "M"
if Index_Tip_H > Wrist_H and Middle_Tip_H > Wrist_H and Pinky_Tip_H > Pinky_MCP_H and Ring_Dip_H < Index_Dip_H and Pinky_Dip_H < Index_Dip_H:
letter = "N"
#if Index_Tip_H > Index_Pip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H:
#if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
# letter = "O"
#elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
# letter = "O"
if Index_Tip_H < Middle_Dip_H and Middle_Tip_H < Middle_Pip_H and Ring_Tip_H < Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "P"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "P"
if Index_Tip_H < Middle_Dip_H and Middle_Tip_H < Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H < Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "Q"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "Q"
if Index_Tip_H < Index_Pip_H and Middle_Tip_H < Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W and Middle_Tip_W < Index_Tip_W:
letter = "R"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W and Middle_Tip_W > Index_Tip_W:
letter = "R"
if Index_Tip_H > Index_Pip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "S"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "S"
if Index_Tip_H < Thumb_Tip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H and Thumb_Tip_H < Index_Pip_H:
letter = "T"
if Index_Tip_H < Index_Pip_H and Middle_Tip_H < Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W:
letter = "U"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W:
letter = "U"
if Index_Tip_H < Index_Pip_H and Middle_Tip_H < Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W and Middle_Tip_W - 30 >= Index_Tip_W:
letter = "V"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W and Middle_Tip_W <= Index_Tip_W - 30:
letter = "V"
if Index_Tip_H < Index_Pip_H and Middle_Tip_H < Middle_Pip_H and Ring_Tip_H < Ring_Pip_H and Pinky_Tip_H > Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Ring_MCP_W - 50 <= Thumb_Tip_W <= Ring_MCP_W and Middle_Tip_W - 30 >= Index_Tip_W and Ring_Tip_W - 30 >= Middle_Tip_W:
letter = "W"
elif handLandmarks[4][3] == "Right" and Ring_MCP_W + 50 >= Thumb_Tip_W >= Ring_MCP_W and Middle_Tip_W <= Index_Tip_W - 30 and Ring_Tip_W <= Middle_Tip_W - 30:
letter = "W"
if Index_Tip_H > Middle_Dip_H and Middle_Tip_H > Middle_Pip_H and Ring_Tip_H > Ring_Pip_H and Pinky_Tip_H < Pinky_Pip_H:
if handLandmarks[4][3] == "Left" and Thumb_Tip_W < Thumb_Dip_W:
letter = "Y"
elif handLandmarks[4][3] == "Right" and Thumb_Tip_W > Thumb_Dip_W:
letter = "Y"
cv2.putText(image, letter, (45, 375), cv2.FONT_HERSHEY_SIMPLEX, 5, (255, 0, 0), 25)
cv2.imshow("Volume", image)
cv2.waitKey(1)
| true |
eaaefcae83961578231c4cd29a24440f63b93d1d | Python | surrealwork-dev/TF-Tutorials | /wFmConv.py | UTF-8 | 5,781 | 3.203125 | 3 | [] | no_license | # Necessary architecture to implement calculation of the weighted
# Frechet mean.
import numpy as np
import doctest
def enforce_bounds(theta):
'''Ensures that theta remains within the interval [-pi,pi].
>>> enforce_bounds(-4)
2.2831853071795862
>>> enforce_bounds(-np.pi)
-3.141592653589793
>>> enforce_bounds(np.pi)
3.141592653589793
>>> enforce_bounds(3*np.pi)
3.141592653589793
>>> enforce_bounds(0)
0
>>> enforce_bounds(np.sqrt(2))
1.4142135623730951
'''
while theta < -np.pi:
theta += 2*np.pi
while theta > np.pi:
theta -= 2*np.pi
return theta
def identify(re, im):
'''Identifies re + j*im with r*exp(-j*theta) in the relevant
manifold, the complex numbers.
>>> identify(1/np.sqrt(2), 1/np.sqrt(2))
(0.9999999999999999, 0.7853981633974483)
'''
r = np.sqrt(re**2 + im**2)
theta = np.arctan2(im,re)
#print('Original theta: ', theta)
theta = enforce_bounds(theta)
#print('Bounded theta: ', theta)
return r, theta
def get_manifold_distance(z1, z2):
'''Computes the distance between complex numbers z1 and z2
in the manifold defined on R^+ x SO(2) = {(r,R(theta))}.
>>> get_manifold_distance(np.complex(1,0), 0.01)
4.605170185988091
>>> get_manifold_distance([1,0], [0.01,0])
4.605170185988091
>>> get_manifold_distance([1,0], np.complex(0,1))
2.221441469079183
'''
#print('Original z1: ', z1)
if type(z1) not in [list, np.ndarray, tuple]:
z1 = [np.real(z1), np.imag(z1)]
# print('New z1: ', z1)
r1, theta1 = identify(z1[0], z1[1])
# print('r1, theta1: ', r1, theta1)
#print('Original z2: ', z2)
if type(z2) not in [list, np.ndarray, tuple]:
z2 = [np.real(z2), np.imag(z2)]
# print('New z1: ', z1)
r2, theta2 = identify(z2[0], z2[1])
# print('r1, theta1: ', r1, theta1)
theta_diff = theta2 - theta1
theta_diff = enforce_bounds(theta_diff)
dman = np.sqrt( (np.log(r2/r1))**2 + 2*(theta_diff**2) )
return dman
def pick_three_rand(m, realmin, realmax, imagmin, imagmax):
'''Generates three unique 1x2 arrays of random floats from the
intervals [realmin, realmax] and [imagmin, imagmax], respectively.
'''
real_diff = realmax-realmin
imag_diff = imagmax-imagmin
a = np.array(\
[ real_diff*np.random.random_sample() + realmin, \
imag_diff*np.random.random_sample() + imagmin] )
while np.complex(a[0], a[1]) == np.complex(m[0], m[1]):
a = np.array(\
[ real_diff*np.random.random_sample() + realmin, \
imag_diff*np.random.random_sample() + imagmin] )
b = np.array(\
[ real_diff*np.random.random_sample() + realmin, \
imag_diff*np.random.random_sample() + imagmin] )
while np.complex(b[0],b[1]) == np.complex(m[0], m[1]) or\
np.complex(b[0],b[1]) == np.complex(a[0],a[1]):
b = np.array(\
[ real_diff*np.random.random_sample() + realmin, \
imag_diff*np.random.random_sample() + imagmin] )
c = np.array(\
[ real_diff*np.random.random_sample() + realmin, \
imag_diff*np.random.random_sample() + imagmin] )
while np.complex(c[0],c[1]) == np.complex(m[0],m[1]) or \
np.complex(c[0],c[1]) == np.complex(a[0],a[1]) or \
np.complex(c[0],c[1]) == np.complex(b[0], b[1]):
c = np.array(\
[ real_diff*np.random.random_sample() + realmin, \
imag_diff*np.random.random_sample() + imagmin] )
return a,b,c
def get_weighted_sum(point_list, weight_list, m):
'''Computes the weighted sum
Sum_i=1^K( weight_list[i] * (dman([point_list[:][i]], m))**2 )
>>> get_weighted_sum([[1],[1]], [0.5], [1,0])
0.67690690180786
'''
point_list = np.array(point_list)
weight_list = np.array(weight_list)
if point_list.shape[0] != 2:
point_list = point_list.reshape(2,-1)
s = [weight_list[i] * \
(get_manifold_distance([point_list[0][i], point_list[1][i]],m))**2 for i in range(weight_list.shape[0]) ]
S = np.sum(s)
return S
def calc_wfm(point_list, weight_list, num_iters=500, crossover_prob=0.75,\
F = 0.25, verbose=0):
'''Computes the weighted Frechet mean of point_list with filter
weights weight_list.
Uses differential evolution to minimize the weighted variance.
'''
realmin, realmax = min(point_list[0]), max(point_list[0])
imagmin, imagmax = min(point_list[1]), max(point_list[1])
iternum = 0
start_point = \
(np.mean(point_list[0]), np.mean(point_list[1]))
m = start_point
if verbose: print('m: ', m)
while iternum < num_iters:
iternum += 1
f_m = get_weighted_sum(point_list, weight_list, m)
# Implement differential evolution.
# For each agent m (1 agent), pick three agents a,b,c
# from the possible parameter space.
a,b,c = pick_three_rand(m, realmin, realmax, imagmin, imagmax)
# Pick random number to determine crossover.
r = np.random.rand(1)
if r < crossover_prob:
y = a + F*(b - c)
f_y = get_weighted_sum(point_list, weight_list, y)
if verbose: print('f_y: ', f_y)
if f_y <= f_m:
m = y
if verbose:
print('New min: ', m)
else:
if verbose:
print('No new min')
else:
if verbose:
print('No crossover')
return m
#### Example usage ####
def main():
point_list = [[1,2,3],[4,5,6]]
weight_list = [1, 0.5, 0.33]
m = [1,0]
return calc_wfm(point_list, weight_list, verbose=1)
| true |
821a19b7416ea15e6bf50cbd14524289b51e0da0 | Python | GlennGuan/learn_cookbook | /chapter7/7_10.py | UTF-8 | 2,568 | 3.859375 | 4 | [] | no_license | #. 在回调函数中携带额外的状态 p239
# 回调函数可以携带额外的状态以便在回调函数内部使用。
# 调用一个回调函数
def apply_async(func, args, *, callback): # 线程,进程 定时器。
# compute the result
result = func(*args)
# invoke the callback with the result
callback(result)
def print_result(result):
print('Go:', result)
def add(x, y):
return x + y
apply_async(add, (2, 3), callback=print_result)
apply_async(add, ("hello", "world"), callback=print_result) # 这里并没有传入其他的信息到函数。
# 回调函数可以同其他变量或者部分环境进行交互时,缺乏这类信息就会带来问题。
# 在回调函数中携带额外信息的方法是使用绑定方法而不是普通的函数
# 保存了一个内部的序列号码,每当接收到一个结果是递增这个号码
class ResultHandler:
def __init__(self):
self.sequence = 0
def handler(self, result):
self.sequence += 1
print('[{}] Got: {}'.format(self.sequence, result))
r = ResultHandler() # 创建一个实例并将绑定方法handler当做回调函数来用
apply_async(add, (2, 3), callback=r.handler)
apply_async(add, ("hello", "world"), callback=r.handler)
# 作为类的替代方案,也可以使用闭包来捕获状态
def make_handler():
sequence = 0
def handler(result):
nonlocal sequence
sequence += 1
print('[{}] Got: {}'.format(sequence, result))
return handler
handler = make_handler()
apply_async(add, (2, 3), callback=handler)
apply_async(add, ("hello", "world"), callback=handler)
# 利用携程来完成同样的任务:
def make_handler():
sequence = 0
while True:
result = yield
sequence += 1
print('[{}] Got: {}'.format(sequence, result))
handler = make_handler()
next(handler) # advance to the yield
apply_async(add, (2, 3), callback=handler.send)
apply_async(add, ("hello", "world"), callback=handler.send)
# 最后同样重要,通过额外的参数在回调函数中携带状态,然后用parital()来处理参数个数的问题(7.8)
class SequenceNo:
def __init__(self):
self.sequence = 0
def handler(result, seq):
seq.sequence += 1
print('[{}] Got: {}'.format(seq.sequence, result))
seq = SequenceNo()
from functools import partial
apply_async(add, (2, 3), callback=partial(handler, seq=seq))
apply_async(add, ("hello", "world"), callback=partial(handler, seq=seq))
# 详细注解见p239
apply_async(add, (2, 3), callback=lambda r: handler(r, seq))
apply_async(add, ("hello", "world"), callback=lambda r: handler(r, seq))
| true |
734c5b8f5fb62b861d5c6563e875d1aa57a21ebd | Python | neurips2020submission11699/metarl | /src/metarl/sampler/sampler.py | UTF-8 | 3,727 | 3.15625 | 3 | [
"MIT"
] | permissive | """Base sampler class."""
import abc
import copy
class Sampler(abc.ABC):
"""Abstract base class of all samplers.
Implementations of this class should override `construct`,
`obtain_samples`, and `shutdown_worker`. `construct` takes a
`WorkerFactory`, which implements most of the RL-specific functionality a
`Sampler` needs. Specifically, it specifies how to construct `Worker`s,
which know how to perform rollouts and update both agents and environments.
Currently, `__init__` is also part of the interface, but calling it is
deprecated. `start_worker` is also deprecated, and does not need to be
implemented.
"""
def __init__(self, algo, env):
"""Construct a Sampler from an Algorithm.
Args:
algo(metarl.RLAlgorithm): The RL Algorithm controlling this
sampler.
env(gym.Env): The environment being sampled from.
Calling this method is deprecated.
"""
self.algo = algo
self.env = env
@classmethod
def from_worker_factory(cls, worker_factory, agents, envs):
"""Construct this sampler.
Args:
worker_factory(WorkerFactory): Pickleable factory for creating
workers. Should be transmitted to other processes / nodes where
work needs to be done, then workers should be constructed
there.
agents(Agent or List[Agent]): Agent(s) to use to perform rollouts.
If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
envs(gym.Env or List[gym.Env]): Environment rollouts are performed
in. If a list is passed in, it must have length exactly
`worker_factory.n_workers`, and will be spread across the
workers.
Returns:
Sampler: An instance of `cls`.
"""
# This implementation works for most current implementations.
# Relying on this implementation is deprecated, but calling this method
# is not.
fake_algo = copy.copy(worker_factory)
fake_algo.policy = agents
return cls(fake_algo, envs)
def start_worker(self):
"""Initialize the sampler.
i.e. launching parallel workers if necessary.
This method is deprecated, please launch workers in construct instead.
"""
@abc.abstractmethod
def obtain_samples(self, itr, num_samples, agent_update, env_update=None):
"""Collect at least a given number transitions (timesteps).
Args:
itr(int): The current iteration number. Using this argument is
deprecated.
num_samples(int): Minimum number of transitions / timesteps to
sample.
agent_update(object): Value which will be passed into the
`agent_update_fn` before doing rollouts. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
env_update(object): Value which will be passed into the
`env_update_fn` before doing rollouts. If a list is passed in,
it must have length exactly `factory.n_workers`, and will be
spread across the workers.
Returns:
metarl.TrajectoryBatch: The batch of collected trajectories.
"""
@abc.abstractmethod
def shutdown_worker(self):
"""Terminate workers if necessary.
Because Python object destruction can be somewhat unpredictable, this
method isn't deprecated.
"""
| true |
f34a776661ddc463692bbc0641cd7510fe46b381 | Python | MiYoShi8225/cohabi-api | /db/util.py | UTF-8 | 325 | 2.765625 | 3 | [] | no_license | import json
def get_db_dsn(path: str) -> str:
with open(path) as f:
acskey = json.load(f)
db_acs = acskey['database']
return 'mysql://{user}:{passwd}@{host}/{dbname}'.format(
user=db_acs["user"],
passwd=db_acs["passwd"],
host=db_acs["host"],
dbname=db_acs["db"],
)
| true |
2c4f2f6186d5eb07da5a9cbe36ce3ab8d814e8f9 | Python | MysteriousSonOfGod/asyncframeworks | /official/qt5frames/examples/layouts.py | UTF-8 | 3,049 | 2.609375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# Copyright (c) Sebastian Klaassen. All Rights Reserved.
# Distributed under the MIT License. See LICENSE file for more info.
from asyncframes import Frame, hold, sleep
from qt5frames import *
from asyncframes.pyqt5_eventloop import EventLoop
@MainWindow(size=(200, 100), title="Central Layout Example")
async def layout_central():
PushButton('1')
await hold()
@MainWindow(size=(200, 100), title="HBox Layout Example")
async def layout_hbox():
with HBoxLayout:
PushButton('1')
PushButton('2')
await hold()
@MainWindow(size=(200, 100), title="VBox Layout Example")
async def layout_vbox():
with VBoxLayout:
PushButton('1')
PushButton('2')
await hold()
@MainWindow(size=(200, 100), title="Grid Layout Example")
async def layout_grid():
with GridLayout:
PushButton('1', row=0, col=0)
PushButton('2', row=0, col=1)
PushButton('3', row=1, col=0, colspan=2)
await hold()
@MainWindow(size=(200, 100), title="Form Layout Example")
async def layout_form():
with FormLayout:
PushButton('2', label=PushButton('1'))
PushButton('4', label='3')
await hold()
@MainWindow(size=(200, 100), title="Stacked Layout Example")
async def layout_stacked():
with StackedLayout as sl:
PushButton('1')
PushButton('2')
while True:
sl.setCurrentIndex(0)
await sleep(1)
sl.setCurrentIndex(1)
await sleep(1)
@MainWindow(size=(200, 100), title="No Layout Example")
async def layout_none():
with Layout:
PushButton('1')
PushButton('2').move(20, 20)
await hold()
@MainWindow(size=(400, 400), title="Compound Layout Example")
async def layout_compound():
with GridLayout:
with HBoxLayout(row=0, col=0):
PushButton('h1')
PushButton('h2')
with VBoxLayout(row=0, col=1):
PushButton('v1')
PushButton('v2')
with GridLayout(row=1, col=0):
PushButton('g1', row=0, col=0)
PushButton('g2', row=0, col=1)
PushButton('g3', row=1, col=0, colspan=2)
with FormLayout(row=1, col=1):
PushButton('f2', label=PushButton('f1'))
PushButton('f4', label='f3')
with Layout(row=2, col=0):
PushButton('n1')
PushButton('n2').move(20, 20)
with StackedLayout(row=2, col=1) as sl:
PushButton('s1')
PushButton('s2')
with GroupBox(row=3, col=0, title="GroupBox", layout=Layout.hbox):
PushButton('1')
PushButton('2')
with TabWidget(row=3, col=1):
with TabPage("hbox", layout=Layout.hbox):
PushButton('1')
PushButton('2')
with TabPage("vbox", layout=Layout.vbox):
PushButton('1')
PushButton('2')
while True:
sl.setCurrentIndex(0)
await sleep(1)
sl.setCurrentIndex(1)
await sleep(1)
loop = EventLoop()
loop.run(layout_compound)
| true |
cf7815596cf00315b42703cf9d29f77818fa3498 | Python | redswallow/project-euler | /p23.py | UTF-8 | 668 | 3.09375 | 3 | [] | no_license | def divisors(n):
l=[]
for i in range(1,n):
if n%i==0:l.append(i)
return l
def abundant(n):
return sum(divisors(n))>n
ans=[]
'''
abundant_num=filter(abundant,range(1,28123))
#print abundant_num
file=open("p23.data","w")
for l in abundant_num:
file.write(str(l)+'\n')
'''
file=open("p23.data","r")
abundant_num=[int(line.replace('\n','')) for line in file.readlines()]
#print abundant_num
l=[]
for i in range(1,28123*2):
l.append(i)
for i in range(len(abundant_num)):
for j in range(len(abundant_num)):
l[abundant_num[i]+abundant_num[j]]=0
for i in range(1,28123):
if l[i]!=0:
ans.append(i)
print ans,sum(ans)
| true |
fac765bb35a23d18fd9e5850e81f1be95af11181 | Python | xyt556/geothermal_image_classification | /CNN_test.py | UTF-8 | 3,921 | 2.53125 | 3 | [] | no_license | ### July 2021
### Taken from https://towardsdatascience.com/neural-network-for-satellite-data-classification-using-tensorflow-in-python-a13bcf38f3e1
### Ideas: Need labeled training data
### Need to take geologic map and rasterize it (or certain lables of it)
### 1. Turn of Jupyter Notebook
### 2. Import Geologic map as 4dv file
### 3. Look at `grouping`; filter on Ter. & Qut. volcanics
### 4. cv_4dvtoim of those two classes
### 5. Use that image as
### 6.
import os
import numpy as np
from tensorflow import keras
from pyrsgis import raster
from pyrsgis.convert import changeDimension
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, precision_score, recall_score
# Change the directory
os.chdir(r"C:/Users/Graham/OneDrive - Dynamic Graphics, Inc/Python/33_IMAGE_CLASSIFICATION/geothermal")
# Assign file names
mxInput = './L1C_T12SUH_A018271_20200904T182231/allbands.tif'
builtupBangalore = 'l5_Bangalore2011_builtup.tif'
mxHyderabad = 'l5_Hyderabad2011_raw.tif'
# Read the rasters as array
ds1, featuresInput = raster.read(mxInput, bands='all')
# ds2, labelBangalore = raster.read(builtupBangalore, bands=1)
# ds3, featuresHyderabad = raster.read(mxHyderabad, bands='all')
# Print the size of the arrays
print("\nImport Multispectral image shape: ", featuresInput.shape)
# print("\nBangalore Binary built-up image shape: ", labelBangalore.shape)
# print("\nHyderabad Multispectral image shape: ", featuresHyderabad.shape)
# Clean the labelled data to replace NoData values by zero
labelBangalore = (labelBangalore == 1).astype(int)
# Reshape the array to single dimensional array
featuresBangalore = changeDimension(featuresBangalore)
labelBangalore = changeDimension (labelBangalore)
featuresHyderabad = changeDimension(featuresHyderabad)
nBands = featuresBangalore.shape[1]
print("Bangalore Multispectral image shape: ", featuresBangalore.shape)
print("Bangalore Binary built-up image shape: ", labelBangalore.shape)
print("Hyderabad Multispectral image shape: ", featuresHyderabad.shape)
# Split testing and training datasets
xTrain, xTest, yTrain, yTest = train_test_split(featuresBangalore, labelBangalore, test_size=0.4, random_state=42)
print(xTrain.shape)
print(yTrain.shape)
print(xTest.shape)
print(yTest.shape)
# Normalise the data
xTrain = xTrain / 255.0
xTest = xTest / 255.0
featuresHyderabad = featuresHyderabad / 255.0
# Reshape the data
xTrain = xTrain.reshape((xTrain.shape[0], 1, xTrain.shape[1]))
xTest = xTest.reshape((xTest.shape[0], 1, xTest.shape[1]))
featuresHyderabad = featuresHyderabad.reshape((featuresHyderabad.shape[0], 1, featuresHyderabad.shape[1]))
# Print the shape of reshaped data
print(xTrain.shape, xTest.shape, featuresHyderabad.shape)
# Define the parameters of the model
model = keras.Sequential([
keras.layers.Flatten(input_shape=(1, nBands)),
keras.layers.Dense(14, activation='relu'),
keras.layers.Dense(2, activation='softmax')])
# Define the accuracy metrics and parameters
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
# Run the model
model.fit(xTrain, yTrain, epochs=2)
# Predict for test data
yTestPredicted = model.predict(xTest)
yTestPredicted = yTestPredicted[:,1]
# Calculate and display the error metrics
yTestPredicted = (yTestPredicted>0.5).astype(int)
cMatrix = confusion_matrix(yTest, yTestPredicted)
pScore = precision_score(yTest, yTestPredicted)
rScore = recall_score(yTest, yTestPredicted)
print("Confusion matrix: for 14 nodes\n", cMatrix)
print("\nP-Score: %.3f, R-Score: %.3f" % (pScore, rScore))
predicted = model.predict(featuresHyderabad)
predicted = predicted[:,1]
# Predict new data and export the probability raster
prediction = np.reshape(predicted, (ds3.RasterYSize, ds3.RasterXSize))
outFile = 'Hyderabad_2011_BuiltupNN_predicted.tif'
raster.export(prediction, ds3, filename=outFile, dtype='float')
| true |
54e3cf09b15f967ad6dbcad7b808283e4e5616e3 | Python | bulmasen/learn-to-program | /GB_LearnProgramming/Python_Programming/lesson-05/homeWork-lesson05_5.py | UTF-8 | 970 | 3.5 | 4 | [] | no_license | # Создать (программно) текстовый файл, записать в него программно набор чисел,
# разделенных пробелами. Программа должна подсчитывать сумму чисел в файле и
# выводить ее на экран.
from random import randint, random
from functools import reduce
from os.path import abspath
s1 = 'proc_file.txt'
with open(s1, 'w') as proc_file:
for i in range(randint(10, 40)):
for i2 in range(randint(10, 15)):
proc_file.write(f'{round(random() * 10, 2)} ')
proc_file.seek(proc_file.tell() - 1)
proc_file.write('\n')
with open(s1, 'r') as proc_file:
result = 0
for line in proc_file:
nums_in_line = line.split()
result += reduce(lambda a, b: a + b, [float(el) for el in nums_in_line])
print(f'Сумма цифр в файле\n{abspath(s1)}\nравна {round(result, 2)}')
| true |
e63dfffb7bfa1358b5514ce9a736604cdbfa867e | Python | Bradley999/Street-Sprinter | /Street_Sprinter.py | UTF-8 | 28,334 | 2.828125 | 3 | [] | no_license | import random
import pygame
import os
import time
WIDTH = 600
HEIGHT = 1000
FPS = 30
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
vec = pygame.math.Vector2
WHITE = (255, 255, 255)
#ASSET FOLDER
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, "img")
pygame.init()
pygame.mixer.init()
accel_snd = pygame.mixer.Sound(os.path.join(img_folder, "Acceleration.wav"))
accel_snd.set_volume(0.2)
boost_snd = pygame.mixer.Sound(os.path.join(img_folder, "Boost.wav"))
boost_snd.set_volume(0.2)
deccel_snd = pygame.mixer.Sound(os.path.join(img_folder, "Deceleration.wav"))
deccel_snd.set_volume(0.2)
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Street Sprinter")
#SOUNDTRACK METHHOOD
game_folder = os.path.dirname(__file__)
img_folder = os.path.join(game_folder, "img")
snd_folder = os.path.join(game_folder, "snd")
pygame.init()
pygame.mixer.init()
clock = pygame.time.Clock()
#pygame.mixer.music.play(loops = -1)
#BACKGROUND
bg = pygame.image.load(os.path.join(img_folder, "Road0.png")).convert()
bg = pygame.transform.scale(bg, (WIDTH, HEIGHT))
bg_rect = bg.get_rect()
bg_y = 0
#GAME OVER SCREEN
gameover = pygame.image.load(os.path.join(img_folder, "Game_Over0.PNG")).convert()
gameover = pygame.transform.scale(gameover, (WIDTH, HEIGHT))
gameover_rect = gameover.get_rect()
#RADIO FUNCTION/METHOOD
def Radio(track):
if track == 1:
pygame.mixer.music.load(os.path.join(img_folder, "Ooh_Ahh(My_Life_Be_Like).wav"))
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play(loops = -1)
elif track == 2:
pygame.mixer.music.load(os.path.join(img_folder, "Six Days(Remix).wav"))
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play(loops = -1)
elif track == 3:
pygame.mixer.music.load(os.path.join(img_folder, "Furious_Ja_Rule.wav"))
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play(loops = -1)
elif track == 4:
pygame.mixer.music.load(os.path.join(img_folder, "Horses.wav"))
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play(loops = -1)
elif track == 5:
pygame.mixer.music.load(os.path.join(img_folder, "Pump_It_Up_Joe_Budden.wav"))
pygame.mixer.music.set_volume(1.0)
pygame.mixer.music.play(loops = -1)
elif track == 0:
pygame.mixer.music.stop()
#DRAW TEXT FUNCTION
font_name = pygame.font.match_font('arial')
def draw_text(surf, text, size, x, y, color):
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
#SHOW START SCREEN FUNCTION
def show_start_screen():
screen.blit(bg, bg_rect)
draw_text(screen, "Street", 64, WIDTH / 2, HEIGHT / 5, RED)
draw_text(screen, "Sprinter", 64, WIDTH / 2, HEIGHT / 5 + 65, RED)
draw_text(screen, "ARROW KEYS TO MOVE SPACE TO SHOOT AND SHIFT TO BOOST!", 18, WIDTH / 2, HEIGHT / 2, RED)
draw_text(screen, "PRESS ANY KEY TO BEGIN!", 18, WIDTH / 2, HEIGHT * 3 / 4, RED)
pygame.display.flip()
waiting = True
while waiting:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYUP:
print("KEY PRESSED TO START GAME!")
waiting = False
#SHOW GAME OVER SCREEN FUNCTION
def show_gameover_screen():
screen.blit(gameover, gameover_rect)
pygame.display.flip()
print("gameover")
waiting = True
while waiting:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYUP:
waiting = False
#BULLET CLASS
class Bullet(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(img_folder, "Missile0.png")).convert()
self.image = pygame.transform.scale(self.image, (50, 125))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.top = y
def update(self):
self.rect.y += -10
if self.rect.bottom < 0:
self.kill()
#SCORE COUNT
class Score(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(img_folder, "Score_Count0.png")).convert()
self.image = pygame.transform.scale(self.image, (150, 100))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = 10
self.rect.top = HEIGHT - 150
def update(self):
pass
#NOS CLASS
class NOS(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.images = [
pygame.image.load(os.path.join(img_folder, "NOS_Re-filling0.png")).convert(),
pygame.image.load(os.path.join(img_folder, "NOS_Re-filling1.png")).convert(),
pygame.image.load(os.path.join(img_folder, "NOS_Re-filling2.png")).convert(),
pygame.image.load(os.path.join(img_folder, "NOS_Re-filling3.png")).convert(),
pygame.image.load(os.path.join(img_folder, "NOS_Re-filling4.png")).convert(),
pygame.image.load(os.path.join(img_folder, "NOS_Re-filling5.png")).convert()
]
#self.count = 0
self.level = 5
self.boosting = False
self.nos_delay = 400
self.last_nos = pygame.time.get_ticks()
self.image = self.images[self.level]
self.image = pygame.transform.scale(self.image, (60, 216))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = WIDTH - 85
self.rect.top = 20
def update(self):
print(self.level)
if self.boosting:
now = pygame.time.get_ticks()
if now -self.last_nos > self.nos_delay:
self.last_nos = now
if self.level < 1:
self.boosting = False
else:
self.level -= 1
self.image = self.images[self.level]
self.image = pygame.transform.scale(self.image, (60, 216))
self.image.set_colorkey(BLACK)
else:
now = pygame.time.get_ticks()
if now -self.last_nos > self.nos_delay:
self.last_nos = now
if self.level < 5:
self.level += 1
self.image = self.images[self.level]
self.image = pygame.transform.scale(self.image, (60, 216))
self.image.set_colorkey(BLACK)
def getNOSLevel(self):
return self.level
def setNOSBoost(self, boosting):
self.boosting = boosting
#BULLET COUNT METHOOD/ CLASS
class Bullet_Count(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.images = [
pygame.image.load(os.path.join(img_folder, "sprite_00.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_01.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_02.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_03.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_04.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_05.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_06.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_07.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_08.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_09.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_10.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_11.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_12.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_13.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_14.png")).convert(),
pygame.image.load(os.path.join(img_folder, "sprite_15.png")).convert()
]
#self.count = 0
self.level = 15
self.image = self.images[self.level]
self.image = pygame.transform.scale(self.image, (150, 100))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = 10
self.rect.top = 20
def update(self):
self.image = self.images[self.level]
self.image = pygame.transform.scale(self.image, (150, 100))
self.image.set_colorkey(BLACK)
print(self.level)
def getBullet_Count(self):
return self.level
def increaseBullet_Count(self):
self.level -= 1
if self.level < 0:
self.level = 14
def load_Bullets(self):
self.level = 14
#HEALTH BAR COUNT CLASS
class Health_Bar(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.images = [
pygame.image.load(os.path.join(img_folder, "Health_Bar0.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Health_Bar1.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Health_Bar2.png")).convert()
]
#self.count = 0
self.level = 0
self.image = self.images[self.level]
self.image = pygame.transform.scale(self.image, (150, 100))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = 10
self.rect.top = 130
def update(self):
self.image = self.images[self.level]
self.image = pygame.transform.scale(self.image, (150, 100))
self.image.set_colorkey(BLACK)
print(self.level)
def getHealth_Count(self):
return self.level
def decreaseHealth_Count(self):
self.level += 1
if self.level < 0:
self.level = 0
def load_Health(self):
self.level = 2
#RADIO BAR CLASS
class Radio_Bar(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.images = [
pygame.image.load(os.path.join(img_folder, "Health_Bar2.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Ooh_Aah_Cover.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Six_Days_Cover.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Health_Bar2.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Health_Bar2.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Health_Bar2.png")).convert()
]
#self.count = 0
self.level = 0
self.image = self.images[self.level]
self.image = pygame.transform.scale(self.image, (150, 100))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = 10
self.rect.top = HEIGHT -260
def update(self):
self.image = self.images[self.level]
self.image = pygame.transform.scale(self.image, (150, 100))
self.image.set_colorkey(BLACK)
print(self.level)
def load_radio(self, track):
self.level = track
#RAM GUARD METHOOD/ CLASS
class Ram_Guard(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(img_folder, "Ram_Guard0.png")).convert()
self.image = pygame.transform.scale(self.image, (100, 25))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.centerx = WIDTH/ 2
self.rect.top = 0
self.reset_delay = 2000
self.last_reset = pygame.time.get_ticks()
def update(self):
self.rect.y += 15
if self.rect.top > HEIGHT:
self.kill()
#now = pygame.time.get_ticks()
#if now - self.last_reset > self.reset_delay:
#self.last_reset = now
#self.rect.bottom = 0
#BULLET RELOAD METHOOD/ CLASS
class Bullet_Strap(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(img_folder, "Bullet_Strap0.png")).convert()
self.image = pygame.transform.scale(self.image, (100, 50))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.centerx = WIDTH/ 2
self.rect.top = 0
self.reset_delay = 2000
self.last_reset = pygame.time.get_ticks()
self.left_lane = vec(220, -180)
self.right_lane = vec(380, -180)
self.rect.center = self.left_lane
def update(self):
self.rect.y += 15
if self.rect.top > HEIGHT:
self.kill()
def set_lane(self):
self.lane = random.randint(0, 1)
if self.lane == 0:
self.rect.center = self.left_lane
else:
self.rect.center = self.right_lane
#TRUCK EXPLOSION
class Truck_Explosion(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.images = [
pygame.image.load(os.path.join(img_folder, "Truck_Explosion0.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Truck_Explosion1.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Truck_Explosion2.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Truck_Explosion4.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Truck_Explosion5.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Truck_Explosion6.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Truck_Explosion7.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Truck_Explosion8.png")).convert()
]
self.count = 0
self.image = self.images[self.count]
self.image = pygame.transform.scale(self.image, (230, 350))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.centerx = x
self.rect.centery = y
#Update Methood
def update(self):
self.image = self.images[self.count]
self.image = pygame.transform.scale(self.image, (230, 350))
self.image.set_colorkey(BLACK)
self.count += 1
if self.count > 7:
self.kill()
#P1 CAR EXPLOSION
class Car_Explosion(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.images = [
pygame.image.load(os.path.join(img_folder, "P1_Explosion0.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Explosion1.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Explosion2.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Explosion3.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Explosion4.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Explosion5.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Explosion6.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Explosion7.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Explosion8.png")).convert(),
]
self.count = 0
self.image = self.images[self.count]
self.image = pygame.transform.scale(self.image, (200, 300))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.centerx = x
self.rect.centery = y
#Update Methood
def update(self):
self.image = self.images[self.count]
self.image = pygame.transform.scale(self.image, (200, 300))
self.image.set_colorkey(BLACK)
self.count += 1
if self.count > 8:
self.kill()
#TRUCK CLASS/ ENEMY G
class Enemy_G(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.Grey_Trucks = [
pygame.image.load(os.path.join(img_folder, "Grey_Truck0.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Grey_Truck1.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Grey_Truck2.png")).convert()
]
self.count = 0
self.image = self.Grey_Trucks[self.count]
self.image = pygame.transform.scale(self.image, (115, 175))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.left_lane = vec(220, -180)
self.right_lane = vec(380, -180)
self.rect.center = self.left_lane
self.reset_delay = 2000
self.last_reset = pygame.time.get_ticks()
#Update Methood
def update(self):
self.image = self.Grey_Trucks[self.count]
self.image = pygame.transform.scale(self.image, (115, 175))
self.image.set_colorkey(BLACK)
self.count += 1
if self.count > 2:
self.count = 0
self.rect.y += 10
if self.rect.top > HEIGHT:
now = pygame.time.get_ticks()
if now - self.last_reset > self.reset_delay:
self.last_reset = now
self.rect.bottom = 0
#SET LANE FOR ENEMY G/ GREY TRUCK
def set_lane(self):
self.lane = random.randint(0, 1)
if self.lane == 0:
self.rect.center = self.left_lane
else:
self.rect.center = self.right_lane
#Player Class
class Player(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.red_cars = [
pygame.image.load(os.path.join(img_folder, "P1_Car_RED0.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Car_RED1.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Car_RED2.png")).convert()
]
self.ram_cars = [
pygame.image.load(os.path.join(img_folder, "Ram_Guard_P10.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Ram_Guard_P11.png")).convert(),
pygame.image.load(os.path.join(img_folder, "Ram_Guard_P12.png")).convert()
]
self.Damaged1 = [ pygame.image.load(os.path.join(img_folder, "P1_Car_Damaged10.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Car_Damaged11.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Car_Damaged12.png")).convert()
]
self.Damaged2 = [ pygame.image.load(os.path.join(img_folder, "P1_Car_Damaged20.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Car_Damaged21.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Car_Damaged22.png")).convert()
]
self.Damaged3 = [ pygame.image.load(os.path.join(img_folder, "P1_Car_Damaged30.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Car_Damaged31.png")).convert(),
pygame.image.load(os.path.join(img_folder, "P1_Car_Damaged32.png")).convert()
]
self.health = 0
self.mode = "Normal_Mode"
self.count = 0
self.bullet_count = 15
self.image = self.red_cars[self.count]
self.image = pygame.transform.scale(self.image, (100, 150))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.center = (WIDTH / 2, HEIGHT / 2)
self.shoot_delay = 250
self.last_shot = pygame.time.get_ticks()
self.ram_delay = 10000
self.start_ram = pygame.time.get_ticks()
# NOS BOOST
self.nos = NOS()
all_sprites.add(self.nos)
self.boost_delay = 10000
self.start_boost = pygame.time.get_ticks()
self.boost = False
self.speed = 5
#Bullet Count
self.Bullet_Count = Bullet_Count()
all_sprites.add(self.Bullet_Count)
def load_Bullets(self):
self.Bullet_Count.load_Bullets()
def update(self):
#NOS BOOST
self.boost = False
self.nos.setNOSBoost(False)
if self.health == 0:
if self.mode == "Normal_Mode":
self.image = self.red_cars[self.count]
if self.mode == "Ram_Mode":
now = pygame.time.get_ticks()
if now - self.start_ram < self.ram_delay:
self.image = self.ram_cars[self.count]
else:
self.mode = "Normal_Mode"
else:
if self.health == 1:
self.image = self.Damaged1[self.count]
elif self.health == 2:
self.image = self.Damaged2[self.count]
elif self.health == 3:
self.image = self.Damaged3[self.count]
self.image = pygame.transform.scale(self.image, (100, 150))
self.image.set_colorkey(BLACK)
self.count += 1
if self.count > 2:
self.count = 0
# RETURNS A LIST, keystate, OF ALL PRESSED KEYS
keystate = pygame.key.get_pressed()
# CHECKS TO SEE WHICH KEYS WERE IN THE LIST (A.K.A. PRESSED)
if keystate[pygame.K_d]:
self.rect.x += self.speed
if keystate[pygame.K_a]:
self.rect.x += -self.speed
if keystate[pygame.K_w]:
self.rect.y += -self.speed
boost_snd.stop()
deccel_snd.stop()
accel_snd.play()
if keystate[pygame.K_s]:
self.rect.y += self.speed
boost_snd.stop()
accel_snd.stop()
deccel_snd.play()
if keystate[pygame.K_SPACE]:
self.shoot()
if keystate[pygame.K_LSHIFT]: # NOS BOOST
self.boost = True
self.nos.setNOSBoost(True)
self.speed = 10
self.rect.y += -self.speed
accel_snd.stop()
deccel_snd.stop()
boost_snd.play()
else:
self.speed = 5
if self.rect.top <= 0:
self.rect.top = 0
def update_health(self, h):
self.health += h
def get_health(self):
return self.health
def setMode(self, mode):
if mode == "Ram_Mode":
self.start_ram = pygame.time.get_ticks()
self.mode = mode
def getMode(self):
return self.mode
def hide(self):
self.rect.x = -100
self.rect.y = -100
accel_snd.stop()
boost_snd.stop()
#SHOOT METHOOD
def shoot(self):
now = pygame.time.get_ticks()
if now - self.last_shot > self.shoot_delay:
self.bullet_count -= 1
self.last_shot = now
if self.bullet_count >= 0:
self.Bullet_Count.increaseBullet_Count()
bullet = Bullet(self.rect.centerx, self.rect.top)
all_sprites.add(bullet)
bullets.add(bullet)
else:
self.bullet_count = self.Bullet_Count.getBullet_Count()
#SPRITE GROUPS
all_sprites = pygame.sprite.Group()
player = Player()
all_sprites.add(player)
Grey_Truck = Enemy_G()
all_sprites.add(Grey_Truck)
mobs = pygame.sprite.Group()
bullets = pygame.sprite.Group()
mobs.add(Grey_Truck)
guards = pygame.sprite.Group()
bullet_straps = pygame.sprite.Group()
#nos = NOS()
strap_delay = 15000
last_strap = pygame.time.get_ticks()
#all_sprites.add(nos)
score = Score()
all_sprites.add(score)
Score_Tally = 0
health_bar = Health_Bar()
all_sprites.add(health_bar)
radio_bar = Radio_Bar()
all_sprites.add(radio_bar)
ram_delay = 15000
last_ram = pygame.time.get_ticks() + 3000
end_delay = 300
last_end = pygame.time.get_ticks()
def new_strap():
bullet_strap = Bullet_Strap()
bullet_strap.set_lane()
all_sprites.add(bullet_strap)
bullet_straps.add(bullet_strap)
def new_guard():
ram_guard = Ram_Guard()
all_sprites.add(ram_guard)
guards.add(ram_guard)
def new_mob():
Grey_Truck = Enemy_G()
Grey_Truck.set_lane()
all_sprites.add(Grey_Truck)
mobs.add(Grey_Truck)
#EXPLOSION METHOOD
def explode(x, y):
explosion = Truck_Explosion(x, y)
all_sprites.add(explosion)
def P1_Explode(x, y):
p1_explosion = Car_Explosion(x, y)
all_sprites.add(p1_explosion)
# GAME LOOP:
# Process Events
# Upadte
# Draw
start = True
end = False
last_end = pygame.time.get_ticks()
end_delay = 1000
running = True
while running:
now = pygame.time.get_ticks()
#SHOW START SCREEN ONCE
if start:
show_start_screen()
start = False
if end:
if now - last_end > end_delay:
show_gameover_screen()
end = False
all_sprites.empty()
Score_Tally = 0
player == Player()
all_sprites.add(player)
new_mob()
clock.tick(FPS)
#PROCESS EVENTS
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYUP:
if event.key == pygame.K_0:
Radio(0)
radio_bar.load_radio(0)
elif event.key == pygame.K_1:
Radio(1)
radio_bar.load_radio(1)
elif event.key == pygame.K_2:
Radio(2)
radio_bar.load_radio(2)
elif event.key == pygame.K_3:
Radio(3)
radio_bar.load_radio(3)
elif event.key == pygame.K_4:
Radio(4)
radio_bar.load_radio(4)
elif event.key == pygame.K_5:
Radio(5)
radio_bar.load_radio(5)
if now - last_ram > ram_delay:
last_ram = now
new_guard()
if now - last_strap > strap_delay:
last_strap = now
new_strap()
hits = pygame.sprite.groupcollide(mobs, bullets, True, True)
for hit in hits:
Score_Tally += 1
explode(hit.rect.centerx, hit.rect.centery)
new_mob()
hits = pygame.sprite.spritecollide(player, mobs, True)
for hit in hits:
if player.getMode() == "Ram_Mode":
hit.kill()
explode(hit.rect.centerx, hit.rect.centery)
new_mob()
else:
player.update_health(1)
hit.kill()
explode(hit.rect.centerx, hit.rect.centery)
new_mob()
if player.get_health() > 3:
P1_Explode(player.rect.centerx, player.rect.centery)
player.kill()
player.hide()
last_end = pygame.time.get_ticks()
end = True
hits = pygame.sprite.spritecollide(player, bullet_straps, False)
for hit in hits:
player.load_Bullets()
hit.kill()
all_sprites.update()
#CHECK FOR RAM
hits = pygame.sprite.spritecollide(player, guards, True)
for hit in hits:
player.setMode("Ram_Mode")
#screen.blit(bg, bg_rect)
rel_y = bg_y % bg.get_rect().height
screen.blit(bg, (0, rel_y - bg.get_rect().height))
if rel_y < HEIGHT:
screen.blit(bg, (0, rel_y))
bg_y += 7
all_sprites.draw(screen)
draw_text(screen, str(Score_Tally), 28, 85, HEIGHT -100, WHITE)
pygame.display.flip()
pygame.quit()
| true |
450d30a25a79d6d945a7eec2462548a545ef6454 | Python | mackmason/hw3-p | /main.py | UTF-8 | 338 | 4.21875 | 4 | [] | no_license | # Author: Mack Mason mjm8542@psu.print
def digit_sum(n):
if(n > 0):
remainder = (n % 10)
return remainder + digit_sum(n//10)
else:
return 0
def run():
userInt = input("Enter an int: ")
userInt = int(userInt)
digitSum = digit_sum(userInt)
print(f"sum of digits of {userInt} is {digitSum}.")
if __name__ == "__main__":
run() | true |
33d207855d7ab1d38faa3b7dfce7155eaac74fe1 | Python | china-university-mooc/Python-Basics | /ChapterI/Exercise/I.2-1-Temperature-Conversion.py | UTF-8 | 237 | 3.390625 | 3 | [] | no_license | str = input()
if str[-1] in ['C', 'c']:
f = eval(str[:-1]) * 1.8 + 32
print('{:.2f}F'.format(f))
elif str[-1] in ['F', 'f']:
c = (eval(str[:-1]) - 32) / 1.8
print('{:.2f}C'.format(c))
else:
print('输入格式错误') | true |
b2f31153aff0c01fbfaa33e4a3168ad982216c6c | Python | Ryan-Rhys/Heteroscedastic-BayesOpt | /objectives.py | UTF-8 | 4,548 | 3.4375 | 3 | [
"MIT"
] | permissive | # Copyright Ryan-Rhys Griffiths 2020
# Author: Ryan-Rhys Griffiths
"""
This module contains objective functions for heteroscedastic Bayesian Optimisation. Train objectives represent the
noise-corrupted values that a model will observe within the BO loop. Exact objectives represent the ground truth
black-box objective being optimised.
"""
from matplotlib import pyplot as plt
import torch
def train_sin_objective(X, noise_coefficient=0.25, coefficient=0.2, fplot=False):
"""
1D sin wave where heteroscedastic noise increases linearly in the input domain.
Bounds for the bimodal function are [0, 10]. One maxima is higher than the other.
:param X: input dimension
:param noise_coefficient: noise level coefficient for linearly increasing noise
:param coefficient: Has the effect of making the maximum with larger noise larger
:param fplot: Boolean indicating whether to plot noisy samples of g(x)
:return: train_obj: g(X) + noise(X)
"""
train_obj = torch.sin(X) + coefficient*X + (noise_coefficient * torch.randn_like(X))
if fplot:
plt.plot(X, train_obj, '+', color='green', markersize='12', linewidth='8', label='samples with noise')
plt.xlabel('x')
plt.ylabel('g(x)')
plt.title('Noisy Samples of g(x)')
plt.legend()
plt.ylim(-2, 2)
plt.xlim(0, 10)
plt.show()
return train_obj
def exact_sin_objective(X, plot_sample, noise_coefficient=0.25, coefficient=0.2, fplot=True):
"""
Objective function f(x) = g(x) - s(x) for the sin wave with linear noise.
Used for monitoring the best value in the optimisation obtained so far.
:param X: input to evaluate objective; can be an array of values
:param plot_sample: Sample for plotting purposes (points in the input domain)
:param noise_coefficient: noise level coefficient
:param coefficient: Has the effect of making the maximum with larger noise larger
:param fplot: Boolean indicating whether to plot the black-box objective and ground truth values of the acquisitions
:return: exact_obj f(x)
noise_obj s(x)
optimal_value: global maximiser of f(x)
"""
main_obj = torch.sin(X) + coefficient*X # g(x)
noise_obj = noise_coefficient * X # s(x)
exact_obj = main_obj - noise_obj # f(x)
optimal_value = None
# Only if we want to plot the objective and/or compute the global maximiser
if plot_sample is not None:
plot_main_obj = torch.sin(plot_sample) + coefficient*plot_sample
plot_exact_obj = plot_main_obj - noise_coefficient*plot_sample
optimal_value = torch.max(plot_exact_obj) # treat plot grid as input to maximise continous function
if fplot:
plt.scatter(X, exact_obj, color='green', marker='+', label='Acquisitions')
plt.plot(plot_sample, plot_exact_obj, color='purple', label='f(x)')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Black-box Objective f(x)')
plt.ylim(-3, 1)
plt.xlim(0, 10)
plt.legend()
plt.show()
exact_obj = exact_obj
noise_obj = noise_obj
return exact_obj, noise_obj, optimal_value
# Legacy Code below will need to be modified for compatibility with BoTorch
def max_one_off_sin_noise_objective(X, noise, coefficient, fplot=True):
"""
Objective function for maximising objective + aleatoric noise (a one-off good value!) for the sin wave with linear
noise. Used for monitoring the best value in the optimisation obtained so far.
:param X: input to evaluate objective; can be an array of values
:param noise: noise level coefficient
:param coefficient: Has the effect of making the maximum with larger noise larger
:param fplot: Boolean indicating whether to plot the black-box objective
:return: value of the black-box objective that penalises aleatoric noise, value of the noise at X
"""
noise_value = noise * X # value of the heteroscedastic noise at the point(s) X
objective_value = torch.sin(X) + coefficient*X
composite_objective = objective_value + noise_value
if fplot:
plt.plot(X, composite_objective, color='purple', label='objective + aleatoric noise')
plt.xlabel('x')
plt.ylabel('objective(x)')
plt.title('Black-box Objective')
plt.ylim(-3, 1)
plt.xlim(0, 10)
plt.show()
composite_objective = float(composite_objective)
noise_value = float(noise_value)
return composite_objective, noise_value
| true |
ecf5bd25e3d4ab769385fd52a5bf8b98bc6cd497 | Python | infinitel8p/forex_exch_calculator | /main.py | UTF-8 | 5,114 | 2.71875 | 3 | [] | no_license | from kivymd.uix.screen import MDScreen
from kivymd.app import MDApp
from kivy.uix.image import Image
from kivymd.uix.button import MDFillRoundFlatIconButton, MDFillRoundFlatButton
from kivymd.uix.textfield import MDTextField
from kivymd.uix.label import MDLabel
from kivymd.uix.toolbar import MDToolbar
import requests
url_hrktoeur = "https://free.currconv.com/api/v7/convert?q=HRK_EUR&compact=ultra&apiKey=75f83697d28865fde0f4"
url_eurtohrk = "https://free.currconv.com/api/v7/convert?q=EUR_HRK&compact=ultra&apiKey=75f83697d28865fde0f4"
with open("cache.txt") as f:
contents = f.readlines()
print(contents)
hrktoeur = float(contents[0])
eurtohrk = float(contents[1])
class ConverterApp(MDApp):
def flip(self):
if self.state == 0:
self.state = 1
self.toolbar.title = "EUR to HRK converter"
self.input.hint_text = "Enter the value in EUR"
self.input.text = ""
self.label.text = ""
self.converted.text = ""
else:
self.state = 0
self.toolbar.title = "HRK to EUR converter"
self.input.hint_text = "Enter the value in HRK"
self.input.text = ""
self.label.text = ""
self.converted.text = ""
def convert(self, args):
if self.state == 0:
val = round(float(self.input.text) * hrktoeur, 2)
self.label.text = "is in EUR:"
self.converted.text = str(val)
if self.state == 1:
val = round(float(self.input.text) * eurtohrk, 2)
self.label.text = "is in HRK:"
self.converted.text = str(val)
def build(self):
self.state = 0
self.theme_cls.primary_palette = "Teal"
screen = MDScreen()
# top toolbar
self.toolbar = MDToolbar(title="HRK to EUR converter")
self.toolbar.pos_hint = {"top": 1}
self.toolbar.right_action_items = [
["rotate-3d-variant", lambda x: self.flip()]]
screen.add_widget(self.toolbar)
# connection status label
self.status_label = MDLabel(
halign="right",
theme_text_color="Hint",
pos_hint={"center_x": 0.485, 'center_y': 0.05},
text="Checking connection...",
#font_size = 12,
font_style="Caption"
)
screen.add_widget(self.status_label)
try:
response = requests.get(url_hrktoeur)
print(f"\nStatus code: {int(response.status_code)}")
global hrktoeur, eurtohrk
if response.status_code == requests.codes.ok:
global hrktoeur, eurtohrk
hrktoeur = response.json()["HRK_EUR"]
eurtohrk = requests.get(url_eurtohrk).json()["EUR_HRK"]
file = open("cache.txt", "w")
file.write(f"{hrktoeur}\n{eurtohrk}")
file.close()
self.status_label.text = f"Status {int(response.status_code)}: CONNECTED\n1 EUR = {eurtohrk} HRK\n1 HRK = {hrktoeur} EUR"
else:
self.status_label.theme_text_color = "Error"
self.status_label.text = f"Status {int(response.status_code)}: DISCONNECTED\n1 EUR = {eurtohrk} HRK\n1 HRK = {hrktoeur} EUR"
except Exception as e:
self.status_label.theme_text_color = "Error"
self.status_label.text = f"Status error: NO INTERNET\n1 EUR = {eurtohrk} HRK\n1 HRK = {hrktoeur} EUR"
# logo
screen.add_widget(Image
(source="icon.png",
pos_hint={"center_x": 0.5, "center_y": 0.7},
size_hint=(0.35, 0.35)
)
)
# user input
self.input = MDTextField(
#font_size = 22,
hint_text="Enter the value in HRK",
helper_text="Please use . insted of , for cents",
helper_text_mode="on_focus",
halign="center",
icon_right="calculator",
multiline=False,
on_text_validate=self.convert,
pos_hint={"center_x": 0.5, "center_y": 0.5},
size_hint=(0.8, 1.2),
input_type="number",
mode="rectangle"
)
screen.add_widget(self.input)
# convert button
screen.add_widget(MDFillRoundFlatButton(
text="CONVERT",
#font_size = 17,
pos_hint={"center_x": 0.5, "center_y": 0.15},
on_press=self.convert
)
)
# more labels
self.label = MDLabel(
halign="center",
pos_hint={"center_x": 0.5, "center_y": 0.35},
theme_text_color="Secondary"
)
screen.add_widget(self.label)
self.converted = MDLabel(
halign="center",
pos_hint={"center_x": 0.5, "center_y": 0.3},
theme_text_color="Primary",
font_style="H5"
)
screen.add_widget(self.converted)
return screen
if __name__ == '__main__':
ConverterApp().run()
| true |
794a95255a3457723e3aa93ef4e06a168fd8bb27 | Python | TheRaven5520/Die-Module | /Die.py | UTF-8 | 1,338 | 3.484375 | 3 | [
"MIT"
] | permissive | from datetime import datetime
import random
class Die:
def __init__(self, numSidesP = 6, sidesP = [], weightP = []):
if sidesP == []:
for i in range(1,numSidesP + 1):
sidesP.append(i)
self.sides = sidesP
if weightP == []:
weightP = [1]*numSidesP
self.weights = []
for i in weightP:
self.weights.append(i/sum(weightP))
self.numSides = numSidesP
self.roll()
def __str__(self):
string = "This die has " + str(self.numSides) + " sides,"
string += "\n"
for i in self.sides:
string += str(i) + "\n"
string += "and weights, respectively,"
string += "\n"
for i in self.weights:
string += str(i) + "\n"
return string
def roll(self):
random.seed(datetime.now())
randRoll = random.random() # in [0,1]
mySum = 0
i = 0
for mass in self.weights:
mySum += mass
if randRoll < mySum:
self.top = self.sides[i]
break
i += 1
return self.top
def getTop(self):
return self.top
def setTop(self, value):
if value in self.sides:
self.top = value
| true |
98a4dd0ab3882176dd538cb667d4e7656dfbf8f8 | Python | dataAlgorithms/data | /python/fileIO_iterOverFixedSizedRecords.py | UTF-8 | 394 | 3.03125 | 3 | [] | no_license | In [3]: !more somefile.txt
111111
222222
333333
444444
555555
666666
In [4]: from functools import partial
In [5]: with open('somefile.txt', 'rb') as f:
...: records = iter(partial(f.read, 3), b'')
...: for r in records:
...: print(r)
...:
b'111'
b'111'
b'\r\n2'
b'222'
b'22\r'
b'\n33'
b'333'
b'3\r\n'
b'444'
b'444'
b'\r\n5'
b'555'
b'55\r'
b'\n66'
b'666'
b'6\r\n'
| true |
f33a272d71d4eb2c38ae3e97f714a17024a919c5 | Python | Kaiquenakao/Python | /Coleções Python/Exercicio10.py | UTF-8 | 344 | 4.1875 | 4 | [] | no_license | """
10. Faça um programa para ler a nota da prova de 15 alunos e armazene num vetor calcule
e imprima a média geral
"""
import statistics
notas = []
for i in range(1,5):
nota = float(input(f'Aluno{i}:Insira a sua nota:'))
if nota < 10 and nota > 0:
notas.append(nota)
print(f'Média geral: {statistics.mean(notas)}') | true |
e88d74192c4e8dee4e868f88bc5945f596b67079 | Python | abhiksark/Machine-Learning | /machine learning/hotstar/solution.py | UTF-8 | 7,877 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 5 18:05:45 2017
@author: abhik
"""
import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
from external_functions import printing_Kfold_scores
from sklearn.svm import SVC
test_data = pd.read_csv("test_data.csv")
train_data = pd.read_csv("train_data.csv")
ID = pd.read_csv("ID.csv")
inspect_test = test_data.head()
inspect_train = train_data.head()
test_data.drop(['Unnamed: 0'], inplace=True, axis=1)
train_data.drop(['Unnamed: 0'], inplace=True, axis=1)
ID.drop(['Unnamed: 0'], inplace=True, axis=1)
drop_array = ['Crime',
'Kabaddi',
'Boxing',
'Mythology',
'Reality',
'Travel',
'Hockey',
'FormulaE',
'Comedy',
'Teen',
'NA',
'Horror',
'Football',
'Awards',
'Science',
'Tennis',
'Thriller',
'Wildlife',
'Kids',
'IndiaVsSa',
'Table Tennis',
'Volleyball',
'Athletics',
'Documentary',
'Swimming',
'Formula1',
'Badminton',
'Sport',
'1',
'3',
'2',
'5',
'4',
'7',
'6']
#drop array from inspection
test_data.drop(drop_array, inplace=True, axis=1)
train_data.drop(drop_array, inplace=True, axis=1)
heading_test = list(test_data) #heading for data frame
heading_train = list(train_data)
i=0
while i in range(len(heading_test)):
print(heading_test[i],heading_train[i+1])
i =i+1
number_records_one = len(train_data[train_data.segment == 1])
one_indices = np.array(train_data[train_data.segment == 1].index)
# Picking the indices of the normal classes
normal_indices = train_data[train_data.segment == 0].index
# Out of the indices we picked, randomly select "x" number (number_records_fraud)
random_normal_indices = np.random.choice(normal_indices, int(number_records_one * 1.65), replace = False)
random_normal_indices = np.array(random_normal_indices)
# Appending the 2 indices
under_sample_indices = np.concatenate([one_indices,random_normal_indices])
# Under sample dataset
under_sample_data = train_data.iloc[under_sample_indices,:]
X_undersample = under_sample_data.iloc[:, under_sample_data.columns != 'segment']
y_undersample = under_sample_data.iloc[:, under_sample_data.columns == 'segment']
# Showing ratio
print("Percentage of normal transactions: ", len(under_sample_data[under_sample_data.segment == 0])/float(len(under_sample_data)))
print("Percentage of one transactions: ", len(under_sample_data[under_sample_data.segment == 1])/float(len(under_sample_data)))
print("Total number of transactions in resampled data: ", len(under_sample_data))
################################################################################
#prediction part
y = np.array(train_data['segment'])
train_data.drop(['segment'],inplace=True, axis=1)
X = np.array(train_data)
from sklearn.cross_validation import train_test_split
# Whole dataset
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.3, random_state = 0)
print("Number transactions train dataset: ", len(X_train))
print("Number transactions test dataset: ", len(X_test))
print("Total number of transactions: ", len(X_train)+len(X_test))
# Undersampled dataset
X_train_undersample, X_test_undersample, y_train_undersample, y_test_undersample = train_test_split(X_undersample
,y_undersample
,test_size = 0.3
,random_state = 0)
print("")
print("Number transactions train dataset: ", len(X_train_undersample))
print("Number transactions test dataset: ", len(X_test_undersample))
print("Total number of transactions: ", len(X_train_undersample)+len(X_test_undersample))
###############################################################################
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import KFold, cross_val_score
from sklearn.metrics import confusion_matrix,precision_recall_curve,auc,roc_auc_score,roc_curve,recall_score,classification_report
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
def printing_Kfold_scores(x_train_data,y_train_data):
fold = KFold(len(y_train_data),5,shuffle=False)
# Different C parameters
c_param_range = [50,100]
results_table = pd.DataFrame(index = range(len(c_param_range),2), columns = ['C_parameter','Mean recall score'])
results_table['C_parameter'] = c_param_range
# the k-fold will give 2 lists: train_indices = indices[0], test_indices = indices[1]
j = 0
for c_param in c_param_range:
print('-------------------------------------------')
print('C parameter: ', c_param)
print('-------------------------------------------')
print('')
recall_accs = []
for iteration, indices in enumerate(fold,start=1):
# Call the logistic regression model with a certain C parameter
#lr =LogisticRegression( C=c_param,penalty='l1',class_weight='balanced')
lr = RandomForestClassifier(n_estimators = c_param, criterion = 'entropy', random_state = 0)
# Use the training data to fit the model. In this case, we use the portion of the fold to train the model
# with indices[0]. We then predict on the portion assigned as the 'test cross validation' with indices[1]
lr.fit(x_train_data.iloc[indices[0],:],y_train_data.iloc[indices[0],:].values.ravel())
# Predict values using the test indices in the training data
y_pred_undersample = lr.predict(x_train_data.iloc[indices[1],:].values)
# Calculate the recall score and append it to a list for recall scores representing the current c_parameter
recall_acc = recall_score(y_train_data.iloc[indices[1],:].values,y_pred_undersample)
recall_accs.append(recall_acc)
print('Iteration ', iteration,': recall score = ', recall_acc)
# The mean value of those recall scores is the metric we want to save and get hold of.
results_table.loc[j,'Mean recall score'] = np.mean(recall_accs)
j += 1
print('')
print('Mean recall score ', np.mean(recall_accs))
print('')
best_c = results_table.loc[results_table['Mean recall score'].idxmax()]['C_parameter']
# Finally, we can check which C parameter is the best amongst the chosen.
print('*********************************************************************************')
print('Best model to choose from cross validation is with C parameter = ', best_c)
print('*********************************************************************************')
return best_c
###############################################################################
best_c = printing_Kfold_scores(X_train_undersample,y_train_undersample)
from xgboost import XGBClassifier
lr= XGBClassifier()
#lr = LogisticRegression(C = 0.01, penalty = 'l1')
y_pred_undersample_score = lr.fit(X_train_undersample,y_train_undersample.values.ravel()).decision_function(X_test_undersample.values)
fpr, tpr, thresholds = roc_curve(y_test_undersample.values.ravel(),y_pred_undersample_score)
roc_auc = auc(fpr,tpr)
# Plot ROC
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b',label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.0])
plt.ylim([-0.1,1.01])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
X_submit = np.array(test_data)
y_submit = lr.predict_proba(X_submit)
y_submit[:,1] = y_submit[:,1] > 0.25
columns = ['segment']
sub = pd.DataFrame(data=y_submit[:,1], columns=columns)
sub['ID'] = ID
sub = sub[['ID','segment']]
sub.to_csv("sub_hot.csv", index=False)
| true |
13d462d614a0e198309ba1cda84c9113ac71dbba | Python | Ahiganbana/Datacrt | /src/fighter.py | UTF-8 | 4,964 | 2.828125 | 3 | [] | no_license | import numpy as np
import math
import random
from matplotlib import pyplot as plt
from firecontrolradar import FireControlRadar
from cplane import Plane
class Fighter(Plane):
"""
战斗机
具有的动作:
直线 转弯 爬升 俯冲 筋斗 上升转弯
"""
def __init__(self, id, init_data):
#初始化数据
self.init_data = init_data
#飞机的编号
self.id = id
#飞机携带的雷达
self.radar = FireControlRadar()
#保存数据
self.Q = []
#爬升率 随高度上升爬升率下降
self.rate_of_climb = [305, 283, 100, 12]
if(self.init_data[2] > 0 and self.init_data[2] < 1000):
#飞机的各种属性 最大速度 加速度 爬升率
self.plane_attr = [1470, 30, self.rate_of_climb[0]]
if(self.init_data[2] > 1000 and self.init_data[2] < 10000):
self.plane_attr = [1470, 30, self.rate_of_climb[1]]
if(self.init_data[2] > 10000 and self.init_data[2] < 17000):
self.plane_attr = [1470, 30, self.rate_of_climb[2]]
if(self.init_data[2] >= 17000):
self.plane_attr = [1470, 30, self.rate_of_climb[3]]
def turn_up(self):
"""
上升转弯机动
"""
t = self.creatT(self.init_data[2], 1) / 3
t1 = random.uniform(t / 4, t / 2) #生成第一段动作运动时间
a1 = random.uniform(5,10)
a2 = 0 - a1
self.trackdive(self.init_data, t, a1, a2, t1)
v = self.Q[len(self.Q) - 1][5]
radius = (v*t) / (2*math.pi)
print(radius)
self.turn_trace(self.init_data, abs(radius), t)
def turn_trace(self, p0, radius, t):
"""
上升过后完成转弯(小半径、回旋到最开始的位置)
"""
p1 = p0
#下降的速率
za = self.plane_attr[2]
#p1[4] = random.uniform( -math.pi / 2, -math.pi)
for i in range(int(t * 100)):
data = [0, 0, 0, 0, 0, 0, 0,0]
for j in range(len(p1)):
data[j] = p1[j]
w = p1[5]/radius
p1[0] = 1/3.94 * p1[5] * 1/100 * math.sin(p1[3]) * math.cos(p1[4]) + p1[0]
p1[1] = 5.5 * p1[5] * 1/100 * math.cos(p1[3]) * math.cos(p1[4]) + p1[1]
p1[2] = p1[2] - za * 1 / 100
#俯仰角改变
p1[4] = p1[4] + 1/ 100 * p1[7] / p1[5]
#航向角改变
p1[3] = (p1[3] + 1/100 * w)
#p1[0] = p1[5] * 1/100 * math.sin(p1[3]) + p1[0]
#p1[1] = p1[5] * 1/100 * math.cos(p1[3]) + p1[1]
#p1[4] = p1[4] + p1[5] * 1/100 / radius*math.pi
#速度
p1[5] = p1[5] + 1/100 * p1[6]
#切向加速度
p1[6] = -1
p1[7] = w * w / radius
self.Q.append(data)
def somersault(self, p0, t):
"""
筋斗机动
"""
p1 = p0
amax = self.plane_attr[1]
vmax = self.plane_attr[0]
#第一阶段 直线飞行加速
self.trackline(p0, t / 3, 1, amax, 3, vmax)
#第二阶段 爬升阶段
t1 = self.creatT(p1[2], 1)
v = self.Q[len(self.Q) - 1][5]
print(v)
radius = (v*t1) / (2*math.pi)
print(radius)
self.vertical_rise(p1, t1, radius/2)
#print(self.Q[0])
#print("-----")
#第三阶段 直线飞行阶段
#self.trackdive(p0, t1 / 2, a1, a2, t2, self.plane_attr[2])
self.trackline(p0, t / 3, 1, amax, 3, vmax)
#print(self.Q[len(self.Q) - 1])
def vertical_rise(self, p0, t, radius):
"""
飞机的垂直上升运动
"""
p1 = p0
for i in range(int(t*100)): #每0.01秒产生一个数据
data = [0, 0, 0, 0, 0, 0, 0, 0]
for j in range(len(p1)):
data[j]=p1[j]
if(p1[2] > 0 and p1[2] < 1000):
za = 305
elif(p1[2] >= 1000 and p1[2] < 10000):
za = 100
elif(p1[2] >= 1000 and p1[2] < 17000):
za = 58
else:
za = 12
w = p1[5] / radius
#x坐标
p1[0] = p1[5] * 1/100 * math.sin(p1[3])*math.cos(p1[4]) + p1[0]
#y坐标
p1[1] = p1[5] * 1/100 * math.cos(p1[3])*math.cos(p1[4]) + p1[1]
#z坐标
#1[2] = p1[5] * 1/100 * math.sin(p1[4]) + p1[2]
p1[2] = p1[2] + 1/2 * za * (1/100 * 1/100) + p1[5] * 1/100 * math.sin(p1[4])
#p1[2] = self.rate_of_climb[0] * 1/ 100 + p1[2]
#航向角
#p1[3] = p1[3] + 1 / 100 * w
#俯仰角改变
p1[4] = p1[4] + 1/ 100 * p1[7] / p1[5]
p1[5] = p1[5] + 1/ 100 * p1[6] # 切向加速度
p1[6] = -1
p1[7] = w*w*radius
#print(data)
self.Q.append(data)
| true |
7e63ab81ae42d4ba9bc246cb5d3fe87c3a9e052c | Python | billster2006/Module5 | /While loops/input_while.py | UTF-8 | 265 | 4.1875 | 4 | [] | no_license | # list to store guesses
empty_list = []
# user input
guessed_number = int(input('Enter a number between 1 and 100.'))
while 1 <= guessed_number >= 100:
int(input('Enter a number between 1 and 100.'))
empty_list.append(guessed_number)
print(empty_list)
| true |
1e310564a9e420b8efe8efbaa04d5130186ab9d5 | Python | fac3d/EveMarket | /market_file_import.py | UTF-8 | 1,623 | 2.890625 | 3 | [] | no_license | #trying to import all market items for sale in a station
import requests
import json
import pandas as pd
import numpy as np
import product_total_sold from functions
import product_total_added from functions
from datetime import timedelta, date, datetime
Amarr = '10000043'
Hek = '10000042'
Jita = '10000002'
Rens = '10000030'
Dodixie = '10000032'
file = 'Market_Orders.csv'
data = pd.read_csv(file, index_col=0)
print(datetime.today())
#print(data.keys())
#print(type(data[['TypeID']]))
count = 0
timeDiff = date.today() - timedelta(days=14)
df1 = pd.DataFrame()
for type_id, item_name in data.iterrows():
#try excludes any sold_items/0 issues
try:
sold_items = product_total_sold(int(type_id))
added_items = product_total_added(int(type_id))
SVR= (sold_items/added_items)*100
except:
continue
# Output SVR value
margin = ((float(item_name['Sell Price']) - float(item_name['Buy Price']))/float(item_name['Buy Price']))*100
if SVR >= 100 and added_items >= 14 and margin > 10:
count += 1
df2 = pd.DataFrame[int(type_id),item_name['Item'],int(SVR)]
df = df1.append(df2, ignore_index=True)
print(str(int(type_id)) + ': ' + item_name['Item'] + ' Amarr Sales to Volume Ratio (%) =', int(SVR))
print('Margin = %.2f' % margin,'%')
print('Total Sold:',sold_items,'Total Posted:',added_items)
# else:
# print(str(int(type_id)) + ': ' + item_name['Item'])
print(df)
print(datetime.today())
#print(df)
print('')
print(count,' Items')
print('')
print('End Items')
| true |
98e69163e49a26e50d1808f71cb4b10484f807ac | Python | nianien/algorithm | /src/main/python/leetcode/editor/cn/AddBinary.py | UTF-8 | 1,412 | 3.59375 | 4 | [] | no_license | # 67.add-binary
# 给你两个二进制字符串,返回它们的和(用二进制表示)。
#
# 输入为 非空 字符串且只包含数字 1 和 0。
#
#
#
# 示例 1:
#
# 输入: a = "11", b = "1"
# 输出: "100"
#
# 示例 2:
#
# 输入: a = "1010", b = "1011"
# 输出: "10101"
#
#
#
# 提示:
#
#
# 每个字符串仅由字符 '0' 或 '1' 组成。
# 1 <= a.length, b.length <= 10^4
# 字符串如果不是 "0" ,就都不含前导零。
#
# Related Topics 数学 字符串
# 👍 611 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
a, b = a[::-1], b[::-1]
la, lb = len(a), len(b)
lm = max(la, lb)
ret = [0] * lm
mod = 0
for i in range(lm):
mod += (int(a[i]) if i < la else 0)
mod += (int(b[i]) if i < lb else 0)
# mod % 2
ret[i] = mod & 1
# mod // 2
mod = mod >> 1
if mod > 0:
ret.append(mod)
return "".join([str(i) for i in ret[::-1]])
# leetcode submit region end(Prohibit modification and deletion)
# test from here
if __name__ == '__main__':
print(Solution().addBinary("1111", "1111"))
print(Solution().addBinary("11", "11"))
| true |
ab7a3a0655ba6ad179a6f9f700cb64d7a75ff563 | Python | Safery/pyplots | /price.py | UTF-8 | 626 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
import time
import requests
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
script, infile = argv
k = 1
v = []
x = []
while True:
req = requests.get("http://coinbase.com/api/v1/prices/historical?page="+str(k))
if req.status_code == 200:
with open(infile,'a') as f:
f.write('\n'+req.content)
x.append(int(k))
print(req.content)
else:
print("sorry")
k+=1
if req.content == "":
break
with open(infile,'r') as g:
for line in g:
v.append(float(line[26:].strip('\n')))
plt.plot(x,v)
plt.show()
| true |
e9f8e4082d29e91fb7bdb23ee1f7f822bd4be6ed | Python | tankman89/pycharmprojects | /division.py | UTF-8 | 585 | 3.859375 | 4 | [] | no_license | # !/usr/bin/python3
# -*- coding:utf-8 -*-
# author:tank_man time:2018/4/7
print('给我两个数字,我将把他们两个相除!')
print("输入'退出'退出!")
while True:
first_number = input('\n请输入第一个数字')
if first_number == '退出':
break
second_number = input('\n请输入第二个数字')
if second_number == '退出':
print('\n回头见!')
break
try:
answer = int(first_number) / int(second_number)
except ZeroDivisionError:
print('您不能除以0!')
else:
print(answer)
| true |