blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e027a84960e501ac8790ee4f8a6f4d0ab94394ca
|
39c578b5d9eb5ce626f541d8a441533e7bafeb2d
|
/mlsurvey/sl/__init__.py
|
d0be503a16efc5bd3d1abe0fdcf612a0bebcefc5
|
[
"MIT"
] |
permissive
|
jlaumonier/mlsurvey
|
09a0fd0ca172f8213c3229e7623306983bd00c58
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
refs/heads/master
| 2023-08-14T18:45:03.838227
| 2021-10-15T18:47:43
| 2021-10-15T18:47:43
| 261,498,675
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
from . import datasets
from . import models
from . import visualize
from . import workflows
|
[
"laumonierjulien@gmail.com"
] |
laumonierjulien@gmail.com
|
8429ea38ff229320d3168c06b549a2e44701910d
|
d63c503df093f4a6f2e4f5fa796c4864a4418461
|
/efficient_merge.py
|
171610e831fbc392955291b3af2fdee8bf1b256c
|
[] |
no_license
|
99rishita/Geeksforgeeks
|
963e4c9d484cd615e7ffb7f640d712f15cb7ad3e
|
ece2da9e1a5f39a54de4af4ee13913e67b10745e
|
refs/heads/master
| 2022-12-29T04:28:11.004559
| 2020-10-02T18:24:39
| 2020-10-02T18:24:39
| 277,882,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
def find_gap(gap):
if gap<=1:
return 0
return (gap // 2) + (gap%2)
def merge_efficient(arr1, arr2):
n = len(arr1)
m = len(arr2)
gap = m+n
find_gap(gap)
while gap > 0:
#comparing the elements in first array
i = 0
while i + gap < n:
if arr1[i] > arr1[i+gap]:
arr1[i], arr1[i+gap] = arr1[i+gap], arr1[i]
i += 1
#comparing elements of both the arrays
j = gap - n if gap > n else 0
while i < n and j < m:
if arr1[i] > arr2[j]:
arr1[i], arr2[j] = arr2[j], arr1[i]
i += 1
j += 1
#comparing elements in second array
if j < m:
j = 0
while j + gap < m:
if arr2[j] > arr2[j + gap]:
arr2[j], arr2[j+gap] = arr2[j+gap], arr2[j]
j += 1
gap = find_gap(gap)
arr1 = [1,5,9,10,15,20]
arr2 = [2,3,8,13]
merge_efficient(arr1, arr2)
print("After Merging \nFirst Array:", end="")
for i in range(len(arr1)):
print(arr1[i] , " ", end="")
print("\nSecond Array: ", end="")
for i in range(len(arr2)):
print(arr2[i] , " ", end="")
|
[
"pinnintirevati999@gmail.com"
] |
pinnintirevati999@gmail.com
|
ea5cfd69f4f9bc994ccc06e09877436971614b21
|
e2a545f54dbcbd26406e84ce1b3a2881e473ff0f
|
/libro/Problemas_resueltos/Capitulo4/listas.py
|
ae1214f3eb85a6a3c81490d289b51a789a64f77e
|
[] |
no_license
|
danieljobvaladezelguera/CYPDANIELJVE
|
736bbec0700d36722de24ccb2272281cfbfa54a4
|
ce32a71f6f559749a9e5eb9d95a6cc7099de213f
|
refs/heads/master
| 2021-06-28T21:38:52.079378
| 2021-01-25T22:04:35
| 2021-01-25T22:04:35
| 207,672,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
#Arreglo de
#Variables a declarar
LLUVIAS_NORTE =[80,60,120,100,70,150,100,47,95,70,100,130]
for indice in range(1,12,1):
print(f" mes { indice +1 } en region norte={ LLUVIAS_NORTE[indice] } ")
print(LLUVIAS_NORTE[4])
sueldos = []
for indice in range(7):
sueldos.append(int(input("Dame el sueldo: ")))
print(sueldos)
suma = 0
for indice in range(7):
suma += sueldos[indice]
promedio = suma / 7
for indice in range (7):
if sueldos[indice] > promedio:
cont = cont + 1
print(f"Arriba:", (sueldos[indice]))
|
[
"valadezlender@gmail.com"
] |
valadezlender@gmail.com
|
198e1aa788b3acfd12ea1893817fbfa75a25b31a
|
c33e7174a79c4d98118bfd2cceff05f21f3838c9
|
/students/models.py
|
c3bdfb238742122f67801201c340c4e38d73bcb3
|
[] |
no_license
|
itepliuk/studentsdb
|
cb7d3a0885b7e5f1ca7cc690563554bac6f1b29c
|
87f9ac5c7f0c0e53b136498e4604367cb08c39f9
|
refs/heads/master
| 2020-03-08T06:20:07.222533
| 2019-02-24T20:27:30
| 2019-02-24T20:27:30
| 127,969,184
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,705
|
py
|
from django.db import models
from django.db.models import Q
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.core.validators import MaxValueValidator
from django.contrib.auth.models import User
from django.utils.text import slugify
# Create search student manager
class StudentManager(models.Manager):
def search(self, query=None):
qs = self.get_queryset()
if query is not None:
or_lookup = (
Q(last_name__icontains=query) |
Q(first_name__icontains=query) |
Q(middle_name__icontains=query) |
Q(notes__icontains=query) |
Q(ticket__iexact=query))
qs = qs.filter(or_lookup).distinct()
return qs
# Create your models here.
class Student(models.Model):
"""Student model"""
male = 'male'
female = 'female'
CHOICES = (
(male, 'Чоловіча'),
(female, 'Жіноча')
)
class Meta():
verbose_name = 'Студент'
verbose_name_plural = 'Студенти'
ordering = ['last_name']
first_name = models.CharField(
"Ім'я",
max_length=256,
blank=False
)
last_name = models.CharField(
"Прізвище",
max_length=256,
blank=False
)
middle_name = models.CharField(
"По-батькові",
max_length=256,
blank=True,
default=''
)
birthday = models.DateField(
"Дата народження",
blank=False,
null=True
)
photo = models.ImageField(
"Фото",
blank=True,
null=True
)
ticket = models.CharField(
"Білет",
max_length=256,
blank=False
)
notes = models.TextField(
"Додаткові нотатки",
blank=True,
null=True,
)
gender = models.CharField(
"Стать",
max_length=25,
blank=False,
choices=CHOICES,
default=male
)
student_group = models.ForeignKey('Group',
verbose_name='Група',
blank=False,
null=True,
on_delete=models.PROTECT
)
slug = models.SlugField(
max_length=256,
unique=True,
)
objects = StudentManager()
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
class Group(models.Model):
"""Group Model"""
class Meta():
verbose_name = 'Група'
verbose_name_plural = 'Групи'
ordering = ['title']
title = models.CharField(
'Назва',
max_length=256,
blank=False,
)
leader = models.OneToOneField('Student',
verbose_name='Староста',
blank=True,
null=True,
on_delete=models.SET_NULL
)
notes = models.TextField(
'Додаткові нотатки',
blank=True,
)
slug = models.SlugField(
max_length=256,
unique=True,
)
def __str__(self):
if self.leader:
return '{} ({} {})'.format(
self.title, self.leader.first_name, self.leader.last_name)
else:
return '{}'.format(self.title)
class Exam(models.Model):
class Meta():
verbose_name = 'Екзамен'
verbose_name_plural = 'Екзамени'
ordering = ['exam_date']
title = models.CharField(
'Назва',
max_length=256,
blank=False,
)
teacher = models.CharField(
'Викладач',
max_length=256,
blank=False,
)
exam_date = models.DateTimeField(
"Дата іспиту",
blank=False,
null=True
)
duration = models.CharField(
'Тривалість',
max_length=256,
blank=True,
)
exam_group = models.ForeignKey('Group',
verbose_name='Група',
blank=True,
null=True,
on_delete=models.CASCADE
)
def __str__(self):
if self.exam_group:
return '{} {} {}'.format(
self.title, self.teacher, self.exam_group.title)
else:
return '{} {}'.format(self.title, self.teacher)
class Rating(models.Model):
"""docstring for Rating"""
student = models.ForeignKey('Student',
verbose_name='Студент',
blank=True,
null=True,
on_delete=models.CASCADE
)
exam_rating = models.ForeignKey('Exam',
verbose_name='Екзамен',
blank=True,
null=True,
on_delete=models.CASCADE
)
mark = models.PositiveIntegerField(
'Оцінка',
default=0,
validators=[MaxValueValidator(100, 'Оцінка не може бути більше 100 балів')]
)
notes = models.TextField(
'Додаткові нотатки',
blank=True,
)
class Meta():
verbose_name = 'Оцінка'
verbose_name_plural = 'Оцінки'
def __str__(self):
return '{} {}'.format(self.student, self.mark)
@property
def ects(self):
if self.mark >= 90 and self.mark <= 100:
return 'A'
elif self.mark >= 80 and self.mark < 90:
return 'B'
elif self.mark >= 65 and self.mark < 80:
return 'C'
return 'D'
elif self.mark >= 50 and self.mark < 55:
return 'E'
elif self.mark >= 1 and self.mark < 50:
return 'F'
else:
return 'Оцінка ще не виставлена'
def passfail(self):
if self.mark >= 50:
return True
else:
return False
class Issue(models.Model):
"""Issues are send to admin from cotact admin form"""
from_email = models.EmailField(
'Email адреса',
)
subject = models.CharField(
'Заголовок листа',
max_length=128,
)
message = models.TextField(
'Текст повідомлення',
max_length=2560,
)
created_date = models.DateTimeField(
'Дата створення заявки',
auto_now_add=True,
)
is_replied = models.BooleanField(
'Відправлено',
default=False,
)
class Meta():
verbose_name = 'Заявка'
verbose_name_plural = 'Заявки'
def __str__(self):
return 'Заявка № {}'.format(self.id)
class Answer(models.Model):
"""Answers are send as a reply to Issues from admin """
user = models.ForeignKey(
User,
null=True,
on_delete=models.SET_NULL,
)
subject = models.CharField(
'Заголовок листа',
max_length=128,
)
message = models.TextField(
'Текст повідомлення',
max_length=2560,
)
answer_date = models.DateTimeField(
'Дата відповіді',
auto_now_add=True,
)
issue = models.OneToOneField(
'Issue',
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name='answer',
)
class Meta():
verbose_name = 'Відповідь'
verbose_name_plural = 'Відповіді'
def __str__(self):
return 'Відповідь на заявку № {}'.format(self.issue.id)
class MonthJournal(models.Model):
"""Students Monthly Journal"""
student = models.ForeignKey('Student', verbose_name='Студент', blank=False, unique_for_month='date')
# we only need yaer and month, so always set day to
# first day of the month
date = models.DateField(verbose_name='Дата', blank=False)
for day in range(1, 32):
locals()['present_day%d' % day] = models.BooleanField(default=False)
class Meta:
verbose_name = 'Місячний Журнал'
verbose_name_plural = 'Місячні Журнали'
def __str__(self):
return '{}: {}, {}'.format(self.student.last_name, self.date.month, self.date.year)
# Signals
# -----------------------------------------------------------------------------
@receiver(pre_save, sender=Group)
def pre_save_group_slug(sender, **kwargs):
instance = kwargs.get('instance')
if instance:
group = Group.objects.filter(pk=instance.id).first()
if not instance.slug or group and instance.title != group.title:
instance.slug = slugify(instance.title)
|
[
"itepliuk@gmail.com"
] |
itepliuk@gmail.com
|
db3312bd5d33bc08f5a0b1adf886af949c3ba7af
|
586383ed657389cc67ca6c822b3ebd7e91e4d5a9
|
/app_page_cap_img/migrations/0007_auto_20190605_1741.py
|
85e91d1605b03e2e5499b9ac24ff3598c929f434
|
[] |
no_license
|
idelfrides/app_capturepage_django
|
d510e824ca57e598ec7c8bcc2e9e7c7fa04099f6
|
6ad6d87e76deb6075195ee2117c0974a6b480b5f
|
refs/heads/master
| 2022-06-14T17:44:15.945803
| 2022-06-07T20:30:18
| 2022-06-07T20:30:18
| 225,614,573
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
# Generated by Django 2.2.2 on 2019-06-05 20:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_page_cap_img', '0006_auto_20190605_1740'),
]
operations = [
migrations.AlterField(
model_name='pagecapimage',
name='headline',
field=models.TextField(default='Coloque sua Headline aqui até 300 caracteres'),
),
]
|
[
"idelfridesjorgepapai@gmail.com"
] |
idelfridesjorgepapai@gmail.com
|
0d0a75c5b66582039794507e23f6de7d8f58d838
|
4d72f77f488d798aa9a4c21f231f91b8f3019009
|
/main.py
|
5231121a05946e92b654380b28612bf269d4adf5
|
[] |
no_license
|
H1dery/Phoenix_tools
|
1f0e192b18f50afd340d80215212e08229f854c3
|
4d5dda09f0a1d363ad0dc6c79db562e38c6a45d3
|
refs/heads/master
| 2020-04-23T01:43:53.006174
| 2019-02-15T07:50:01
| 2019-02-15T07:50:01
| 170,822,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,096
|
py
|
#coding=utf-8
## __author__ = "Fidcer" ##
import Phoenix_scan
import argparse
import re,sys
import socket
import write_html
import Web_Directory
def main():
PortList = [21, 22, 23, 25, 80, 135, 137, 139, 443, 445, 1433, 1502, 3306, 3389, 8080, 9015]
parser = argparse.ArgumentParser()
parser.add_argument('-H', dest='Host', help="Host like: 192.168.3.1 or http://localhost")
parser.add_argument('-p', dest='Ports', nargs='+', type=int, help="Port like: 80 443 21,Default Scan Ports 21, 22, 23, 25, 80, 135, 137, 139, 445, 443, 1433, 1502, 3306, 3389, 8080, 9015",default=PortList)
parser.add_argument('-T', dest='Threads',type=int,help="Thread number,Default:2",default=2)
args = parser.parse_args()
if args.Host == None or args.Ports == None:
parser.print_help()
sys.exit(0)
try:
Host_split = args.Host.split('://')[1]
except:
parser.print_help()
sys.exit(0)
Host = Host_split
Ports = args.Ports
ip_search = re.compile('^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$')
if ip_search.match(Host):#匹配是否为ip
for Port in Ports:
Phoenix_scan.nmapScan(Host,Port)
else:
try:
domain_ip = socket.gethostbyname(Host)
except:
print("please Enter the correct domain name.")
sys.exit(0)
for Port in Ports:
Phoenix_scan.nmapScan(domain_ip,Port)
#Ports_Version_List = Phenix_scan.Scan_Ports_Version
#print(Ports_Version_List)
# print(Phoenix_scan.Scan_Ports_Version)
Scan_Ports_Joins = ('\r\n<br>'.join(str(d) for d in Phoenix_scan.Scan_Ports_Version))
ScanPort_write = str(Scan_Ports_Joins)
write_html.template_scan_results(Host,ScanPort_write)
Web_Directory.scan_web_dirb(args.Host+'/',args.Threads)
#print()
Scan_Dirbs_Joins = ('\r\n<br>'.join(str(d) for d in Web_Directory.webdirb_list))
ScanDirbs = str(Scan_Dirbs_Joins)
write_html.template_web_dirb(Host,ScanDirbs)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
H1dery.noreply@github.com
|
d08e18378fecc88d481a841cdfef086e6ed3d832
|
141508442608aacea6087a35d00aeb85ce02474f
|
/upgrade.py
|
c9958f3e56242edb0df9c81f99de39715f79de9a
|
[
"MIT"
] |
permissive
|
eoakley/chs
|
7e82eabd15a97bf0800ac5b3ffc90a9651ca6c80
|
ecfe51bff1c674d397c24a466b9d79f72743706b
|
refs/heads/master
| 2020-06-30T16:21:06.683813
| 2019-08-09T12:59:44
| 2019-08-09T12:59:44
| 200,882,563
| 0
| 0
|
MIT
| 2019-08-06T15:54:54
| 2019-08-06T15:54:53
| null |
UTF-8
|
Python
| false
| false
| 1,751
|
py
|
#!/usr/bin/env python
import sys
import os
file_path = os.path.dirname(os.path.abspath(__file__))
file = os.path.join(file_path, 'chs/VERSION')
if len(sys.argv) > 1 and sys.argv[1] == 'major':
opened_file = open(file, 'r')
[major, minor, patch] = opened_file.read().rstrip().split('.')
old_version = '{}.{}.{}'.format(major, minor, patch)
new_major = int(major) + 1
new_version = '{}.{}.{}'.format(new_major, '0', '0')
opened_file.close()
opened_file = open(file, 'w')
opened_file.write(new_version)
opened_file.close()
print('\x1b[38;5;1m ↘ \x1b[38;5;231;1m' + old_version + '\x1b[49;0m\n\x1b[38;5;2m ↗ \x1b[38;5;231;1m' + new_version + '\x1b[49;0m')
if len(sys.argv) > 1 and sys.argv[1] == 'minor':
opened_file = open(file, 'r')
[major, minor, patch] = opened_file.read().rstrip().split('.')
old_version = '{}.{}.{}'.format(major, minor, patch)
new_minor = int(minor) + 1
new_version = '{}.{}.{}'.format(major, new_minor, '0')
opened_file.close()
opened_file = open(file, 'w')
opened_file.write(new_version)
opened_file.close()
print('\x1b[38;5;1m ↘ \x1b[38;5;231;1m' + old_version + '\x1b[49;0m\n\x1b[38;5;2m ↗ \x1b[38;5;231;1m' + new_version + '\x1b[49;0m')
if len(sys.argv) > 1 and sys.argv[1] == 'patch':
opened_file = open(file, 'r')
[major, minor, patch] = opened_file.read().rstrip().split('.')
old_version = '{}.{}.{}'.format(major, minor, patch)
new_patch = int(patch) + 1
new_version = '{}.{}.{}'.format(major, minor, new_patch)
opened_file.close()
opened_file = open(file, 'w')
opened_file.write(new_version)
opened_file.close()
print('\x1b[38;5;1m ↘ \x1b[38;5;231;1m' + old_version + '\x1b[49;0m\n\x1b[38;5;2m ↗ \x1b[38;5;231;1m' + new_version + '\x1b[49;0m')
|
[
"zuber.nicholas@gmail.com"
] |
zuber.nicholas@gmail.com
|
c7fa2edb51934c7a54bfd3d88041a82f6c413dc6
|
d161ff2c791e3a6a0996b6bad453e92b67b95c82
|
/setup.py
|
a2466a813edb26a9febe9305df69df3f3ce07d73
|
[] |
no_license
|
brett-smythe/steve_zissou
|
316262f941395ad42e54b8d9bdda0ac288892868
|
6223a2f30e3fe6ab023a06649078e809c00f1a73
|
refs/heads/master
| 2020-12-24T06:54:39.194933
| 2018-02-26T05:04:13
| 2018-02-26T05:04:13
| 59,321,394
| 0
| 0
| null | 2016-11-18T18:21:43
| 2016-05-20T19:36:03
|
Python
|
UTF-8
|
Python
| false
| false
| 658
|
py
|
"""Setuptools for steve-zissou service"""
from setuptools import setup, find_packages
reqs = []
with open('requirements.txt') as inf:
for line in inf:
line = line.strip()
reqs.append(line)
setup(
name='steve-zissou',
version='0.1.0',
description='Web app for displaying data collected from various sources',
author='Brett Smythe',
author_email='smythebrett@gmail.com',
maintainer='Brett Smythe',
maintainer_email='smythebrett@gmail.com',
packages=find_packages(),
install_reqs=reqs,
entry_points={
'console_scripts': [
'steve-zissou=steve_zissou.app:test'
]
}
)
|
[
"brett@debian-laptop"
] |
brett@debian-laptop
|
058177b7d349b12b84e5d5301c13a103392fba19
|
7cc56fe7ea30d97151662a06221fde22607a96af
|
/code/models/item.py
|
e2eaf827479ebdb39af6402a1d9232cbe3d4a4b7
|
[
"MIT"
] |
permissive
|
sahinme/python-flaskSQLAlchemy-api
|
09f6d68ac13861e129d939795e4aecd29aeff208
|
b765845d715a76f5702c1e19bb946704df5796ec
|
refs/heads/master
| 2022-12-05T06:23:54.872370
| 2020-08-22T21:22:16
| 2020-08-22T21:22:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 918
|
py
|
from db import db
class ItemModel(db.Model):
__tablename__ = "items"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
price = db.Column(db.Float(precision=2))
store_id = db.Column(db.Integer, db.ForeignKey('stores.id'))
store = db.relationship('StoreModel')
def __init__(self, name, price, store_id):
self.name = name
self.price = price
self.store_id = store_id
def json(self):
return {'id': self.id, 'name': self.name, 'price': self.price, 'store_id': self.store_id}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first()
@classmethod
def find_all(cls):
return cls.query.all()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
[
"alisahindev@gmail.com"
] |
alisahindev@gmail.com
|
7c046af1dfc10062a55c859b7eed0cf62675b5fa
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/detection/ABINet_for_PyTorch/mmocr/datasets/pipelines/transform_wrappers.py
|
15dd0e9caa84478e16a5e4736304c51d81814722
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 4,022
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import inspect
import random
import mmcv
import numpy as np
import torchvision.transforms as torchvision_transforms
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import Compose
from PIL import Image
@PIPELINES.register_module()
class OneOfWrapper:
"""Randomly select and apply one of the transforms, each with the equal
chance.
Warning:
Different from albumentations, this wrapper only runs the selected
transform, but doesn't guarantee the transform can always be applied to
the input if the transform comes with a probability to run.
Args:
transforms (list[dict|callable]): Candidate transforms to be applied.
"""
def __init__(self, transforms):
assert isinstance(transforms, list) or isinstance(transforms, tuple)
assert len(transforms) > 0, 'Need at least one transform.'
self.transforms = []
for t in transforms:
if isinstance(t, dict):
self.transforms.append(build_from_cfg(t, PIPELINES))
elif callable(t):
self.transforms.append(t)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, results):
return random.choice(self.transforms)(results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms})'
return repr_str
@PIPELINES.register_module()
class RandomWrapper:
"""Run a transform or a sequence of transforms with probability p.
Args:
transforms (list[dict|callable]): Transform(s) to be applied.
p (int|float): Probability of running transform(s).
"""
def __init__(self, transforms, p):
assert 0 <= p <= 1
self.transforms = Compose(transforms)
self.p = p
def __call__(self, results):
return results if np.random.uniform() > self.p else self.transforms(
results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'p={self.p})'
return repr_str
@PIPELINES.register_module()
class TorchVisionWrapper:
"""A wrapper of torchvision transforms. It applies specific transform to
``img`` and updates ``img_shape`` accordingly.
Warning:
This transform only affects the image but not its associated
annotations, such as word bounding boxes and polygon masks. Therefore,
it may only be applicable to text recognition tasks.
Args:
op (str): The name of any transform class in
:func:`torchvision.transforms`.
**kwargs: Arguments that will be passed to initializer of torchvision
transform.
:Required Keys:
- | ``img`` (ndarray): The input image.
:Affected Keys:
:Modified:
- | ``img`` (ndarray): The modified image.
:Added:
- | ``img_shape`` (tuple(int)): Size of the modified image.
"""
def __init__(self, op, **kwargs):
assert type(op) is str
if mmcv.is_str(op):
obj_cls = getattr(torchvision_transforms, op)
elif inspect.isclass(op):
obj_cls = op
else:
raise TypeError(
f'type must be a str or valid type, but got {type(type)}')
self.transform = obj_cls(**kwargs)
self.kwargs = kwargs
def __call__(self, results):
assert 'img' in results
# BGR -> RGB
img = results['img'][..., ::-1]
img = Image.fromarray(img)
img = self.transform(img)
img = np.asarray(img)
img = img[..., ::-1]
results['img'] = img
results['img_shape'] = img.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transform={self.transform})'
return repr_str
|
[
"chenhao388@huawei.com"
] |
chenhao388@huawei.com
|
31e7de61859d97826c355ca015623239c96f870b
|
c14de8b244c2c9404cbed969c31f65a328d83ba3
|
/examples/common/python/connectors/interfaces/worker_registry_interface.py
|
f02b70530836415a19681a673b174733300280ef
|
[
"CC-BY-4.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-public-domain",
"Zlib"
] |
permissive
|
Bavaji9/avalon
|
5fd9b246a2f1a96838b96042dae278d5dd4ab250
|
70ee4f75c6a4cc41845692840e4cc74b8dd65652
|
refs/heads/master
| 2021-01-03T06:40:05.920729
| 2019-12-16T16:57:41
| 2019-12-30T11:21:15
| 239,964,073
| 0
| 0
|
Apache-2.0
| 2020-02-12T08:51:31
| 2020-02-12T08:42:06
|
C++
|
UTF-8
|
Python
| false
| false
| 6,521
|
py
|
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
class WorkerRegistryInterface(ABC):
"""
WorkerRegistryInterface is an abstract base class containing abstract APIs
which need to implemented by an actual blockchain connector
"""
def __init__(self):
super().__init__()
@abstractmethod
def worker_register(self, worker_id, worker_type, organization_id,
application_type_ids, details, id=None):
"""
Registering a New Worker
Inputs
1. worker_id is a worker id, e.g. an Ethereum address or
a value derived from the worker's DID.
2. worker_type defines the type of Worker. Currently defined types are:
1. indicates "TEE-SGX": an Intel SGX Trusted Execution Environment
2. indicates "MPC": Multi-Party Compute
3. indicates "ZK": Zero-Knowledge
3. organization_id is an optional parameter representing the
organization that hosts the Worker, e.g. a bank in the consortium or
anonymous entity.
4. application_type_ids is an optional parameter that defines
application types supported by the Worker.
5. details is detailed information about the worker in JSON format as
defined in
https://entethalliance.github.io/trusted-computing/spec.html
#common-data-for-all-worker-types
6. id is used for json rpc request
"""
pass
@abstractmethod
def worker_update(self, worker_id, details, id=None):
"""
Updating a Worker
Inputs
1. worker_id is a worker id, e.g. an Ethereum address or
a value derived from the worker's DID.
2. details is detailed information about the worker in JSON format
3. id is used for json rpc request
"""
pass
@abstractmethod
def worker_set_status(self, worker_id, status, id=None):
"""
Set the worker status identified by worker id
Inputs
1. worker_id is a worker id
2. status defines Worker status. The currently defined values are:
1 - indicates that the worker is active
2 - indicates that the worker is "off-line" (temporarily)
3 - indicates that the worker is decommissioned
4 - indicates that the worker is compromised
3. id is used for json rpc request
"""
pass
@abstractmethod
def worker_retrieve(self, worker_id, id=None):
"""
Retrieve worker by worker id
Inputs
1. worker_id is the id of the registry whose details are requested.
Outputs
The same as the input parameters to the corresponding call to
worker_register()
plus status as defined in worker_set_status.
2. id is used for json rpc request
"""
pass
@abstractmethod
def worker_lookup(self, worker_type, organization_id, application_type_id,
id=None):
"""
Initiating Worker lookup
This function retrieves a list of Worker ids that match the input
parameters.
The Worker must match all input parameters (AND mode) to be included
in the list.
If the list is too large to fit into a single response (the maximum
number of entries in a single response is implementation specific),
the smart contract should return the first batch of the results
and provide a lookupTag that can be used by the caller to
retrieve the next batch by calling worker_lookup_next.
All input parameters are optional and can be provided in any
combination to select Workers.
Inputs
1. worker_type is a characteristic of Workers for which you may wish
to search
2. organization_id is an id of an organization that can be used to
search for one or more Workers that belong to this organization
3. application_type_id is an application type that is supported by
the Worker
4. id is used for json rpc request
Outputs
1. total_count is a total number of entries matching a specified
lookup criteria. If this number is bigger than size of ids array,
the caller should use lookupTag to call workerLookUpNext to
retrieve the rest of the ids.
2. lookup_tag is an optional parameter. If it is returned, it means
that there are more matching Worker ids that can be retrieved by
calling function workerLookUpNext with this tag as an input parameter.
3. ids is an array of the Worker ids that match the input parameters.
"""
pass
@abstractmethod
def worker_lookup_next(self, worker_type, organization_id,
application_type_id, lookup_tag, id=None):
"""
Getting Additional Worker Lookup Results
Inputs
1. worker_type is a characteristic of Workers for which you may wish
to search.
2. organization_id is an organization to which a Worker belongs.
3. application_type_id is an application type that has to be supported
by the Worker.
4. lookup_tag is returned by a previous call to either this function
or to worker_lookup.
5. id is used for json rpc request
Outputs
1. total_count is a total number of entries matching this lookup
criteria. If this number is larger than the number of ids returned
so far, the caller should use lookupTag to call workerLookUpNext to
retrieve the rest of the ids.
2. new_lookup_tag is an optional parameter. If it is returned, it
means that there are more matching Worker ids than can be retrieved
by calling this function again with this tag as an input parameter.
3. ids is an array of the Worker ids that match the input parameters.
"""
pass
|
[
"32893509+danintel@users.noreply.github.com"
] |
32893509+danintel@users.noreply.github.com
|
c07305618de87bdc9f232c58a41421805d9bbb91
|
085da9aa977db6553e4df099b87330a2f97f8e33
|
/2/Q6/main.py
|
c512b16f2e5d15a58435f22b093baed5991c0b00
|
[] |
no_license
|
AakashSingh01/Data-Analysis
|
a23ea75a8872b8e51408ac9db8f469a3e4b60d3c
|
8d4c4659c67d2c929e3ecd2357961b1050ccdd6b
|
refs/heads/master
| 2021-09-01T12:57:32.833596
| 2017-12-27T04:17:11
| 2017-12-27T04:17:11
| 110,474,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 574
|
py
|
def stock(t,share,names,y):
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
ls=[share[i] for i in range (126217) if(names[i]==t)]
#plt.plot(ls)
#plt.show()
model = sm.OLS(y, ls).fit()
print("Predicted value for",t," is :",model.predict([250]))
import pandas as pd
import scipy.stats.mstats as sc
data = pd.read_csv('data/STOCKS.csv')
Columns= ['Close', 'Name']
a=list(set(data['Name']))
y=[i+1 for i in range (252)]
#for i in a :
# print(i,"curve : ")
|
[
"noreply@github.com"
] |
AakashSingh01.noreply@github.com
|
22e55005276482dd950f14ec8102feef648b751a
|
99329cfd13e1f9e0f7f04c692f8dabcc7cddb8b6
|
/manage.py
|
e02dabd32718b2adf7b5d1218d1d996383b41f2d
|
[] |
no_license
|
vasudhavarshney/Cart_API_With_DjangoRestApi
|
c24633c86cc529103e69beac9e155e4c00cfd481
|
7395c747fe4a7be653f6e07917a0a4296e82126b
|
refs/heads/master
| 2023-04-06T02:44:51.495293
| 2021-04-30T08:28:34
| 2021-04-30T08:28:34
| 363,075,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restapiwithmongodb.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"vasudhavarshney@gmail.com"
] |
vasudhavarshney@gmail.com
|
cf715fd5b9f678b25d6f85fc8b63ba5a8d66ed4b
|
952d50bcc73290edc37d934a5065c8f7e9415a32
|
/fibonacci sequence.py
|
34bc6cea4ac65e96c00929faf51e0c15091dff00
|
[] |
no_license
|
levanin/UCYEAR2
|
6929450e893919790a0a14431e9de72237b026fa
|
0a250b886d93c207ed3805f0b497404276cb2f38
|
refs/heads/master
| 2020-04-26T12:20:26.888922
| 2019-03-24T08:12:56
| 2019-03-24T08:12:56
| 173,546,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
def fibonacci(n):
if (n = 0 or n = 1):
return n
fibonacci(n)
|
[
"shailevanin@gmail.com"
] |
shailevanin@gmail.com
|
7de1429f92673379e835074b60420d575c7e77d4
|
ccf14a2b5bdc272be7f0e0622705feaa6f060b9b
|
/DefHandler.py
|
e4af0904ad7a9f370d07e1dbd986b346bea9568d
|
[] |
no_license
|
Magdz/JavaCompiler
|
423549ff77e62800fdcc75aff210cafdd8847b8e
|
a3407b62215c6269e4e11b3f7d958904e710133e
|
refs/heads/master
| 2021-04-15T14:23:45.060346
| 2018-05-15T19:44:46
| 2018-05-15T19:44:46
| 126,528,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 633
|
py
|
class DefHandler(object):
def __init__(self):
self.handlers = {
'SYMBOL': self.handle_symbol,
'ALT': self.handle_alt,
'PLUS': self.handle_plus,
'MINUS': self.handle_minus
}
def handle_symbol(self, token, stack, values):
stack.append(token.value)
def handle_alt(self, token, stack, values):
pass
def handle_plus(self, token, stack, values):
value = stack.pop()
values.append(value)
values.append(token.value)
def handle_minus(self, token, stack, values):
value2 = stack.pop()
value1 = stack.pop()
index = value1
while index <= value2:
values.append(index)
index = chr(ord(index) + 1)
|
[
"magdz_008@yahoo.com"
] |
magdz_008@yahoo.com
|
dd8a02085f20df07985fdbc86e783d9cd6b3e20e
|
75d9c79dfed10968b73936c2b970b4c4a5d69bb1
|
/misc/pygments-main/pygments/token.py
|
e5eadf0d7e1cd6320e65992a9cbc2b2099f307fa
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
korpling/ANNIS
|
62e23fbf6ca04049e34a8ef8e7240e41798af7e0
|
5fb9f87382e64b5adc4a3cb8beee1c4da26e1771
|
refs/heads/main
| 2023-08-27T22:56:46.215800
| 2023-07-26T12:19:16
| 2023-07-26T12:19:16
| 5,449,546
| 53
| 23
|
Apache-2.0
| 2023-07-26T12:09:57
| 2012-08-17T08:03:40
|
Java
|
UTF-8
|
Python
| false
| false
| 5,772
|
py
|
# -*- coding: utf-8 -*-
"""
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Escape = Token.Escape
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Escape: 'esc',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Backtick: 'sb',
String.Char: 'sc',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Bin: 'mb',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Comment: 'c',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
|
[
"thomaskrause@posteo.de"
] |
thomaskrause@posteo.de
|
4fdabbd008585848fa8cd66d8bf62faf423427b2
|
e06ddcd701a05d7e38ab6feff423a2afef0eea73
|
/modules/rhymebot.py
|
9e5a1d255fb4e3104f4189d1fb8ece0d0fbced57
|
[] |
no_license
|
Spacerat/SkypeBot
|
6d9f1df89d1019bac718c58784c69447c5b38f45
|
2872f49c6b9251b3fbbe141d796d511def4e671a
|
refs/heads/master
| 2021-01-19T06:34:54.785326
| 2010-08-22T01:24:36
| 2010-08-22T01:24:36
| 598,773
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
import interface
import urllib2
import re
from xml.dom import minidom
from stringsafety import *
from random import randint
def Handle(interface,command,args,messagetype):
"""!rhyme word - Get some words that rhyme with word."""
if args=="": return
url = "http://www.zachblume.com/apis/rhyme.php?format=xml&word="+escapeurl(args)
request = urllib2.Request(url,None,{'Referer':'http://spacerat.meteornet.net'})
response = urllib2.urlopen(request)
words = []
for x in response.readlines():
words.append(FormatHTML(x))
if len(words)==2:
interface.Reply('No rhymes for you. Sorry :(')
return
say = ''
for i in range(0,4):
app=''
while True:
app = words[randint(0,len(words)-1)]
app=app[0:len(app)-1]
if not app in say:
break
say+=app+" "
if say: interface.Reply(say)
interface.ComHook("rhyme",Handle,name="RhymeBot",security=3)
|
[
"spacerat3004@gmail.com"
] |
spacerat3004@gmail.com
|
e322d3ae9651d7a3eabde842bfa9e617f4a4c320
|
4244b27c3592131a13c48ad63c9bfb6759fca00c
|
/The-British-and-American-Style-of-Spelling.py
|
6c6e0a37ceb526031dfd71f0e1760e922c4c24d9
|
[] |
no_license
|
ssantic/HackerRank-RegEx-Applications
|
d9376d00a29c80d219865cfff68d93a611d4abbc
|
a9832c52d05d94dcc3097ad8e5c6b533ce751cc0
|
refs/heads/master
| 2021-01-21T19:47:30.958261
| 2017-05-23T10:04:25
| 2017-05-23T10:04:25
| 92,158,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
"""Parsing words with British or American spelling."""
import re
N = int(raw_input())
sentences = str()
for _ in xrange(N):
sentences += raw_input()
sentences += ' '
T = int(raw_input())
tests = list()
for _ in xrange(T):
tests.append(raw_input())
results = list()
for test in tests:
regex = test[:-2] + "[s|z]e(?!\w)"
results.append(len(re.findall(regex, sentences)))
for result in results:
print result
|
[
"srdjan.santic@gmail.com"
] |
srdjan.santic@gmail.com
|
5c25c75a91f879cf01c099cea8ee396ae6f10dcb
|
b6f1370556beaa5d6a6b1f9d2678e876a15ca014
|
/yelp.py
|
f506fb76c7c89dfee266719a9fdfa027d940522e
|
[] |
no_license
|
joseEnrique/test-API-IDLReasoner
|
07bc980f1a746d332ff758c2fadadeb6ec96528c
|
d2a33b6b43335d438b15c79be3a244ddc164bbfb
|
refs/heads/main
| 2023-07-11T20:58:06.551024
| 2021-08-20T22:32:43
| 2021-08-20T22:32:43
| 398,411,355
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
import json
import requests
import asyncio
import csv
import time
from timeit import default_timer
from concurrent.futures import ThreadPoolExecutor
START_TIME = default_timer()
def read_csv():
result = []
with open('test/yelp/invalid.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
url = "http://localhost:8000/v3/businesses/search?"
url = "https://api.yelp.com/v3/businesses/search?"
#header = row[9].replace("AUTHENTICATION_TOKEN_HERE;", "apikey")
parameters = row[11].replace(";", "&")
parameters = parameters.replace(":", "=")
request = url + parameters
data = {'url':request}
result.append(data)
return result
# https://60f496853cb0870017a8a294.mockapi.io/api/pages/1
def request_github(session, url):
start = time.time()
# url = "https://60f496853cb0870017a8a294.mockapi.io/api/pages/" + id
with session.get(url, headers={
#'Host': 'real-yelp-simple',
"Authorization": "Bearer apikey",
# "x-access-token": "apikey",
'accept': 'application/json'}) as response:
data = response.text
end = time.time()
elapsed_time = end - start
completed_at = "{:5.2f}s".format(elapsed_time)
body = json.dumps(response.json())
detected = "false"
if 'IdlReasoner' in body:
detected = "true"
print(completed_at+","+str(detected)+","+str(response.status_code)+","+url+","+"'"+body+"'")
else:
print(completed_at + "," + str(detected) + "," + str(
response.status_code) + "," + url + "," + "")
return data
async def start_async_process():
print("{0:<30} {1:>20} {2:>20}".format("Iccid", "Completed at", "Http Code"))
list_to_process = read_csv()
with ThreadPoolExecutor(max_workers=200) as executor:
with requests.Session() as session:
loop = asyncio.get_event_loop()
tasks = [
loop.run_in_executor(
executor,
request_github,
*(session, i)
)
for i in list_to_process
]
for response in await asyncio.gather(*tasks):
pass
print(response)
def start_sync_process():
list_to_process = read_csv()
count = 0
with requests.Session() as session:
for i in list_to_process:
#print(i)
request_github(session, i['url'])
pass
if __name__ == "__main__":
# loop = asyncio.get_event_loop(
# future = asyncio.ensure_future(start_async_process())
# loop.run_until_complete(future)
start_sync_process()
|
[
"joseenriqueruiznavarro@gmail.com"
] |
joseenriqueruiznavarro@gmail.com
|
63104e86ee47534ac82f693060c513067bf1c179
|
3398f6c384281a6a324aab85e3bc6e6267a35339
|
/app/ingredients_detection_v2.py
|
6fe540eddd12940c2eaa921660c29f274ef6670d
|
[] |
no_license
|
ahyz0569/STS
|
d0b36dfb3c193da091fbdc027b022b2199338dc6
|
605d9325bb9623cee3f8922072a9b6539ab67f95
|
refs/heads/master
| 2022-12-14T19:47:08.746421
| 2020-05-25T14:50:32
| 2020-05-25T14:50:32
| 241,295,016
| 2
| 3
| null | 2022-12-08T07:27:58
| 2020-02-18T07:07:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,629
|
py
|
import torch
from torchvision import transforms
from matplotlib import patches
import matplotlib.pyplot as plt
from detecto import core, utils, visualize
from skimage import io
def detect_ingredients(image, boxes, labels=None):
plt.rcParams.update({'font.size': 14})
fig, ax = plt.subplots(figsize=(20, 10))
fig.patch.set_visible(False)
ax.axis('off')
# If the image is already a tensor, convert it back to a PILImage and reverse normalize it
if isinstance(image, torch.Tensor):
image = reverse_normalize(image)
image = transforms.ToPILImage()(image)
ax.imshow(image)
# Show a single box or multiple if provided
if boxes.ndim == 1:
boxes = boxes.view(1, 4)
if labels is not None and not utils._is_iterable(labels):
labels = [labels]
# Plot each box
for i in range(boxes.shape[0]):
box = boxes[i]
width, height = (box[2] - box[0]).item(), (box[3] - box[1]).item()
initial_pos = (box[0].item(), box[1].item())
rect = patches.Rectangle(initial_pos, width, height, linewidth=2, edgecolor='cyan', facecolor='none')
if labels:
ax.text(box[0], box[1] - 10, '{}'.format(labels[i]), color='black')
ax.add_patch(rect)
fig.savefig('detection_result.jpg', dpi=100)
model_labels = ['chilli', 'egg', 'pork meat', 'potato', 'pa', 'onion']
model = core.Model.load('detection_weights.pth', model_labels)
image = io.imread('./data/test_image_02.jpg')
predictions = model.predict_top(image)
labels, boxes, scores = predictions
detection_class = labels
detect_ingredients(image, boxes, labels)
|
[
"ahyz0569@gmail.com"
] |
ahyz0569@gmail.com
|
6418bcc040b763bb71aa3a3e9a374d570301ee82
|
f082417069cefe75a0861843430f6acd4c1de449
|
/webapps/ve_project/src/ve/functions.py
|
cc703d3452d116e20bebe79dbbeaf2c47ddc13ec
|
[] |
no_license
|
dobati/veproject_webapps
|
8148cb7a6478ec2214b927cfeb5436b34fc8bd8c
|
6b9857a38fb1152238194900226ea6b963bc1734
|
refs/heads/master
| 2020-12-24T15:50:34.332181
| 2016-03-17T10:08:08
| 2016-03-17T10:08:08
| 28,665,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,074
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import random
import re
import Levenshtein as lev
import aspell
import string
import unicodedata
import distance
# Function help()
# ========================================================================================
def help_words(randomsent, randomsent_machinet):
"""
Takes as input translations as unicode strings and makes a list of unique,
lowercased and tokenized words without punctuation marks.
The order is randomized.
"""
global bothtranslations
bothtranslations = []
patter_punct = r'([^a-zA-Z0-9_ÀÁÈÉÍÓÚÜàáèéíñóúü]+)'
pattern_no_punct = r'([a-zA-Z0-9_ÀÁÈÉÍÓÚÜàáèéíñóúü]+)'
exclude = set(string.punctuation)
# split where not ES alphanumeric
randomchoice_translation = randomsent.encode('utf8').split()
refsp_without_inverted_marks = []
randomchoice_machinetranslation = randomsent_machinet.encode('utf8').split()
for word in randomchoice_translation:
for punc in exclude:
if not word.startswith(punc) and not word.endswith(punc): ##favor...píntame
word = word.lower().replace('¡','').replace('¿', '').replace(punc, ' ')
else:
word = word.lower().replace('¡','').replace('¿', '').replace(punc, '')
refsp_without_inverted_marks.append(word)
for word in refsp_without_inverted_marks:
m = re.match(pattern_no_punct, word)
if word.isalnum() or m:
bothtranslations.append(word)
for word in randomchoice_machinetranslation:
# # this is not necces. here, but we can use it if we want to be 100%
word = word.lower().replace('¡','').replace('¿', '')
m = re.match(pattern_no_punct, word)
if word.isalnum() or m:
bothtranslations.append(word)
#random.shuffle(bothtranslations)
bothtranslations = list(sorted(set(bothtranslations)))
bothtranslations = ' '.join(bothtranslations)
return bothtranslations
def spelling_checker(inputsentence, reft, mtdetok, mttok):
"""
Function for checking the spelling of each word in users sentence and
underlining it if spelled wrongly, using Aspell
"""
global saved_tr, highlight
# works only local:
#spelling = aspell.Speller('lang', 'es')
spelling = aspell.Speller(('local-data-dir','/home/dobati/usr/lib64/aspell-0.60'),)
saved_tr = inputsentence.encode('utf-8')
patter_punct = r'([^a-zA-Z0-9_ÀÁÈÉÍÓÚÜàáèéíñóúü]+)'
pattern_no_punct = r'([a-zA-Z0-9_ÀÁÈÉÍÓÚÜàáèéíñóúü]+)'
trans_no_punct = re.split(patter_punct, saved_tr) # get a list of token including whitespace and punct as token
#################################################################################
# words in translations should be marked as spelled correctly
words_in_translations = []
reft = reft.encode('utf8').split()
mtdetok = mtdetok.encode('utf8').split()
mttok = mttok.encode('utf8').split()
words_in_translations = list(set(reft + mtdetok + mttok))
#################################################################################
spelled_list = []
for word in trans_no_punct:
m = re.match(pattern_no_punct, word) # match all words with no punct
word1 = word.decode('utf8')
word1 = unicodedata.normalize('NFKD', word1).encode('ASCII', 'ignore') # replace diacritics to nearest ascii letter
# if word has no diacritics
if word == word1:
if m:
checked_spelling = spelling.check(word)
#########################################
### added and word not in words_in_translations:
if checked_spelling != 1 and word not in words_in_translations:
word = '<highlight>'+word+'</highlight>' #'underline the false pronounced word (save_it) in the translation'
spelled_list.append(word)
else:
spelled_list.append(word)
# include whitespace and punct
else:
spelled_list.append(word)
# if word has diacritics, check the word with no diacritics as diacritics not recognise in aspell
else:
if m:
checked_spelling = spelling.check(word1)
#########################################
### added "and word not in words_in_translations"
if checked_spelling != 1 and word not in words_in_translations:
word = '<highlight>'+word+'</highlight>' #'underline the false pronounced word (save_it) in the translation'
spelled_list.append(word)
else:
spelled_list.append(word)
# include whitespace and punct
else:
spelled_list.append(word)
saved_tr = ''.join(spelled_list)
return saved_tr
# Function compare_ref()
# ========================================================================================
def compare_ref(usertrans, targettrans):
"""
Takes the target translation and the user translation as inputs.
Based on their edit distance returns an evaluation.
@ targettrans: target translation (ideal translation of a text)
@ usertrans: translation provided by the user
"""
evaluation = {'very good': ['Superb translation!', 'Great work!', 'Perfect score!', 'High five!'], \
'good': ['Good translation!', 'Nice work!', 'Almost perfect!'], \
'fair': ['Not bad!', 'Almost there!'], \
'average': ['You can do better!', 'Shall we practice a little more?']
}
# encode sentences to UTF-8
ut = usertrans.encode('utf-8')
tt = targettrans.encode('utf-8')
# works only local:
#spelling = aspell.Speller('lang', 'es')#
spelling = aspell.Speller(('local-data-dir','/home/dobati/usr/lib64/aspell-0.60'),)
# remove punctuation
replace_punctuation = string.maketrans(string.punctuation, ' '*len(string.punctuation))
# added .replace('¿','').replace('¡','') because the string method does not recognize ¿¡
tt = tt.translate(replace_punctuation).lower().replace('¿','').replace('¡','').split()
ut = ut.translate(replace_punctuation).lower().replace('¿','').replace('¡','').split()
# if less than 5 words in both sentences
if len(tt) < 5 and len(ut) < 5:
word_is_es = 0
word_in_ref = 0
length_tt = len(tt)
length_ut = len(ut)
# check if w in user also in ref
for w in ut:
if w in tt:
word_in_ref += 1
# check if w in user is spanish
for w in ut:
w = w.decode('utf8')
w = unicodedata.normalize('NFKD', w).encode('ASCII', 'ignore')
if spelling.check(w) == 1:
word_is_es += 1
else:
continue
# get ratio spanish word and word in ref
ratio_is_es = word_is_es/length_ut
ratio_in_ref = word_in_ref/length_tt
# get levensthein ratio token and characters
lensum = len(tt)+len(ut)
ratio_lev_tok = (lensum - distance.levenshtein(tt, ut)) / lensum
tt = ' '.join(tt)
ut = ' '.join(ut)
ratio_lev_let = lev.ratio(tt,ut)
# get best ratio
best_lev_ratio = max(ratio_lev_tok, ratio_lev_let)
# if user sent less than 3 words, check if at least half words in ref and all words spanish
if length_ut < 3:
if ratio_in_ref >= 0.5:
if ratio_is_es == 1:
if best_lev_ratio >= 0.6:
return random.choice(evaluation['very good'])
else:
return random.choice(evaluation['fair'])
else:
return random.choice(evaluation['average'])
else:
return random.choice(evaluation['average'])
# if user sent between 3 and 4 words, check at least 60% words in ref and 90% words spanish
else:
if ratio_in_ref >= 0.6:
if ratio_is_es >= 0.9:
if best_lev_ratio >= 0.7:
return random.choice(evaluation['very good'])
elif best_lev_ratio >= 0.6:
return random.choice(evaluation['good'])
else:
return random.choice(evaluation['average'])
elif ratio_is_es >= 0.5:
if best_lev_ratio >= 0.9:
return random.choice(evaluation['good'])
elif best_lev_ratio >= 0.7:
return random.choice(evaluation['fair'])
else:
return random.choice(evaluation['average'])
else:
return random.choice(evaluation['average'])
else:
return random.choice(evaluation['average'])
# if either sentence have more than 5 words, get best levensthein ratio (token VS. characters)
else:
lensum = len(tt)+len(ut)
ratio_lev_tok = (lensum - distance.levenshtein(tt, ut)) / lensum
tt = ' '.join(tt)
ut = ' '.join(ut)
ratio_lev_let = lev.ratio(tt,ut)
ratio = max(ratio_lev_let, ratio_lev_tok)
if ratio >= 0.9:
return random.choice(evaluation['very good'])
elif ratio >= 0.75:
return random.choice(evaluation['good'])
elif ratio >= 0.6:
return random.choice(evaluation['fair'])
else:
return random.choice(evaluation['average'])
# Function compare_mt()
# ========================================================================================
def compare_mt(usertrans, referencetrans, machinetrans):
"""
Compare if user translation better or worst
than machine translation
"""
# deleted: 'You did as good as the machine translation!'
evaluation = {'better': ['Congratulations, you did better than the machine translation!', \
'Be proud, you were better than the machine translation!', \
'You are the best, even better than the machine translation!'], \
'same': [
'This is a tie between you and the machine translation!', \
'The machine translation was about as good as you!'], \
'worst': ["The machine translation beat you, let's try to do better!", \
"What a shame, you were defeated by the machine translation.", \
"Next time, you will beat the machine translation, but not this time!"]}
# encode sentences to UTF-8
ut = usertrans.encode('utf8')
tt = referencetrans.encode('utf8')
mt = machinetrans.encode('utf8')
# remove punctuation
replace_punctuation = string.maketrans(string.punctuation, ' '*len(string.punctuation))
#added .replace('¿','').replace('¡','') because the string method does not recognize ¿¡
ut = ut.translate(replace_punctuation).lower().replace('¿','').replace('¡','')
tt = tt.translate(replace_punctuation).lower().replace('¿','').replace('¡','')
mt = mt.translate(replace_punctuation).lower().replace('¿','').replace('¡','')
# levensthein characters ratio
lev_let_ut = lev.ratio(tt, ut)
lev_let_mt = lev.ratio(tt, mt)
# levensthein tokens ratio
ut = ut.split()
tt = tt.split()
mt = mt.split()
lensum_user = len(ut)+len(tt)
lensum_machine = len(mt)+len(tt)
lev_tok_ut = (lensum_user - distance.levenshtein(tt, ut)) / lensum_user
lev_tok_mt = (lensum_machine - distance.levenshtein(tt, mt)) / lensum_machine
# get best levensthien ratio
ratio_ut = max(lev_let_ut, lev_tok_ut)
ratio_mt = max(lev_let_mt, lev_tok_mt)
########################################################
# added:
#
# evaluate if user better, worst or similar than machine
if abs(ratio_ut - ratio_mt) < 0.07:
return random.choice(evaluation['same'])
else:
if ratio_ut > ratio_mt:
return random.choice(evaluation['better'])
else:
return random.choice(evaluation['worst'])
# TO DO: add some more specific evaluations and tie the two Feedbacks together
|
[
"dolores.batinic@uzh.ch"
] |
dolores.batinic@uzh.ch
|
67d7397c7e522f6c520bc6bad0edb954381f1e3a
|
773c69c39387af8c8e374ce307eb92b850045d15
|
/tools/processHelen.py
|
f1bc7f3e948e3b347f9170978e4419bb809a1151
|
[
"MIT"
] |
permissive
|
tayfunates/pix2pix-tensorflow
|
5c721e55a9f752f5ac7fa553bc1ec8ec29326536
|
c696156636899f7cec1c7c0ad520dcfe9aa5acbc
|
refs/heads/master
| 2020-03-18T01:22:18.915237
| 2018-06-03T11:54:28
| 2018-06-03T11:54:28
| 134,139,317
| 0
| 0
| null | 2018-05-20T10:04:31
| 2018-05-20T10:04:31
| null |
UTF-8
|
Python
| false
| false
| 9,363
|
py
|
import argparse
import os
import tempfile
import subprocess
import tensorflow as tf
import numpy as np
import tfimage as im
import threading
import time
import multiprocessing
import matplotlib
import scipy.misc as sm
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", required=True, help="path to folder containing images")
parser.add_argument("--label_images_dir", required=True, help="path to folder containing labels inside the face")
parser.add_argument("--output_dir_images", required=True, help="output path")
parser.add_argument("--output_dir_labels", required=True, help="output path")
parser.add_argument("--labels", required=True, help="output labels with comma separation. 00 and 01 are musts. e.g. 00,01,04,07")
parser.add_argument("--color_map", required=True, help="Color map png")
parser.add_argument("--workers", type=int, default=1, help="number of workers")
#Resizing operation parameters
parser.add_argument("--resize", action="store_true", help="decide whether or not to resize the input and the label images")
parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation")
parser.add_argument("--size", type=int, default=256, help="size to use for resize operation")
#Crop options
parser.add_argument("--crop", action="store_true", help="decide whether or not to crop the input and the label images")
#Label parameters
parser.add_argument("--label_cut_threshold", type=int, default=128, help="threshold for converting grayscale label images to binary ones")
a = parser.parse_args()
output_train_directory_images = os.path.join(a.output_dir_images, "train")
output_test_directory_images = os.path.join(a.output_dir_images, "test")
output_val_directory_images = os.path.join(a.output_dir_images, "val")
output_train_directory_labels = os.path.join(a.output_dir_labels, "train")
output_test_directory_labels = os.path.join(a.output_dir_labels, "test")
output_val_directory_labels = os.path.join(a.output_dir_labels, "val")
def resize(src):
height, width, _ = src.shape
dst = src
if height != width:
if a.pad:
size = max(height, width)
# pad to correct ratio
oh = (size - height) // 2
ow = (size - width) // 2
dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
else:
# crop to correct ratio
size = min(height, width)
oh = (height - size) // 2
ow = (width - size) // 2
dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
assert(dst.shape[0] == dst.shape[1])
size, _, _ = dst.shape
if size > a.size:
dst = im.downscale(images=dst, size=[a.size, a.size])
elif size < a.size:
dst = im.upscale(images=dst, size=[a.size, a.size])
return dst
def crop(src, cropReference):
rows = np.any(cropReference, axis=1)
cols = np.any(cropReference, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
return src[rmin:rmax, cmin:cmax, :]
def getLabelToColorDictionary():
colorDict = {}
cmap = matplotlib.colors.ListedColormap(sm.imread(a.color_map)[0].astype(np.float32) / 255.)
cmap = cmap(np.arange(cmap.N))
#Color settings according to https://github.com/classner/generating_people
colorDict['00'] = [255.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0]
colorDict['01'] = cmap[11][:3]
colorDict['02'] = cmap[11][:3]
colorDict['03'] = cmap[11][:3]
colorDict['04'] = cmap[20][:3]
colorDict['05'] = cmap[21][:3]
colorDict['06'] = cmap[19][:3]
colorDict['07'] = cmap[18][:3]
colorDict['08'] = cmap[18][:3]
colorDict['09'] = cmap[18][:3]
colorDict['10'] = [255.0 / 255.0, 0.0 / 255.0, 0.0 / 255.0]
return colorDict
def getLabelImages(label_folder):
ret = {}
labelIds = a.labels.split(',')
for lid in labelIds:
for label_path in im.find(label_folder):
if label_path.find('lbl'+lid) > 0: #if found the label
ret[lid] = label_path
break
return ret
def thresholdImage(img, thresh):
img[img >= thresh] = 1.0
img[img < thresh] = 0.0
return img
def getLabelImage(label_image_paths, color_dict):
res = None
thresh = a.label_cut_threshold / 255.0
for label_id, label_img_path in label_image_paths.iteritems():
label_image = im.load(label_img_path)
print label_img_path
print label_image.shape
label_image = thresholdImage(label_image, thresh)
label_image = np.reshape(label_image, (label_image.shape[0], label_image.shape[1]))
if res is None:
res = np.empty((label_image.shape[0], label_image.shape[1], 3))
res[label_image > 0.5, :] = color_dict[label_id]
return res
def getCropReference(label_image_paths):
crop_reference = im.load(label_image_paths['01'])
thresh = a.label_cut_threshold / 255.0
crop_reference = thresholdImage(crop_reference, thresh)
return crop_reference
complete_lock = threading.Lock()
start = None
num_complete = 0
total = 0
def complete():
global num_complete, rate, last_complete
with complete_lock:
num_complete += 1
now = time.time()
elapsed = now - start
rate = num_complete / elapsed
if rate > 0:
remaining = (total - num_complete) / rate
else:
remaining = 0
print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))
last_complete = now
def main():
if not os.path.exists(a.output_dir_labels):
os.makedirs(a.output_dir_labels)
if not os.path.exists(output_train_directory_labels):
os.makedirs(output_train_directory_labels)
if not os.path.exists(output_test_directory_labels):
os.makedirs(output_test_directory_labels)
if not os.path.exists(output_val_directory_labels):
os.makedirs(output_val_directory_labels)
processInputImages = a.resize or a.crop
if not os.path.exists(a.output_dir_images) and processInputImages:
os.makedirs(a.output_dir_images)
if not os.path.exists(output_train_directory_images) and processInputImages:
os.makedirs(output_train_directory_images)
if not os.path.exists(output_test_directory_images) and processInputImages:
os.makedirs(output_test_directory_images)
if not os.path.exists(output_val_directory_images) and processInputImages:
os.makedirs(output_val_directory_images)
#cropped images directory
splits = ['train', 'test', 'val']
src_paths = []
dst_paths_labels = []
dst_paths_images = []
skipped = 0
for split in splits:
split_folder = os.path.join(a.input_dir, split)
for src_path in im.find(split_folder):
name, _ = os.path.splitext(os.path.basename(src_path))
dst_path_label = os.path.join(a.output_dir_labels, split)
dst_path_label = os.path.join(dst_path_label, name + ".png")
dst_path_image = os.path.join(a.output_dir_images, split)
dst_path_image = os.path.join(dst_path_image, name + ".png")
if os.path.exists(dst_path_label) or os.path.exists(dst_path_image):
skipped += 1
else:
src_paths.append(src_path)
dst_paths_labels.append(dst_path_label)
dst_paths_images.append(dst_path_image)
print("skipping %d files that already exist" % skipped)
global total
total = len(src_paths)
print("processing %d files" % total)
global start
start = time.time()
if a.workers == 1:
with tf.Session() as sess:
for src_path, dst_path_label, dst_path_image in zip(src_paths, dst_paths_labels, dst_paths_images):
name, _ = os.path.splitext(os.path.basename(src_path))
print 'Name: ' + name
label_folder = os.path.join(a.label_images_dir, name)
label_image_paths = getLabelImages(label_folder)
print label_image_paths
color_dict = getLabelToColorDictionary()
label_img = getLabelImage(label_image_paths, color_dict)
if processInputImages:
processedImage = im.load(src_path)
if a.crop:
crop_reference = getCropReference(label_image_paths)
processedImage = crop(processedImage, crop_reference)
label_img = crop(label_img, crop_reference)
if a.resize:
processedImage = resize(processedImage)
label_img = resize(label_img)
im.save(processedImage, dst_path_image)
im.save(label_img, dst_path_label)
complete()
main()
|
[
"tayfun@caverna.cs.hacettepe.edu.tr"
] |
tayfun@caverna.cs.hacettepe.edu.tr
|
c8ca518523066602d66a33743dc6fd505d5b7567
|
1f295ce58be3f299f2b66e81b2df2281ff2c37a1
|
/project_draft.py
|
4c7b388fcd05121a89c4789f6bc2f754eecd8b34
|
[] |
no_license
|
LucasSabbatini/aind-p2-game-playing-agent
|
883867e873f784f1b75ef0f79f7d5acc7cd196db
|
0de3de81742f77deb078771500518537d9299a73
|
refs/heads/master
| 2021-08-23T21:28:45.558613
| 2017-12-06T16:40:00
| 2017-12-06T16:40:00
| 111,029,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,151
|
py
|
import random
import numpy as np
def custom_score(game, player):
"""Calculate the heuristic value of a game state from the point of view
of the given player.
This should be the best heuristic function for your project submission.
Note: this function should be called from within a Player instance as
`self.score()` -- you should not need to call this function directly.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : object
A player instance in the current game (i.e., an object corresponding to
one of the player objects `game.__player_1__` or `game.__player_2__`.)
Returns
-------
float
The heuristic value of the current game state to the specified player.
"""
# TODO: finish this function!
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
return float(len(game.get_legal_moves(player)))
class IsolationPlayer:
def __init__(self, search_depth=3, score_fn=custom_score, timeout=10.):
self.search_depth = search_depth
self.score_fn = score_fn
self.time_left = None
self.TIMER_THRESHOLD = timeout
class MinimaxPlayer(IsolationPlayer):
"""
Game agent using only minimax method.
"""
def get_move(self, game, time_left):
"""Search for the best move from the available legal moves and return a
result before the time limit expires.
************** YOU DO NOT NEED TO MODIFY THIS FUNCTION *************
For fixed-depth search, this function simply wraps the call to the
minimax method, but this method provides a common interface for all
Isolation agents, and you will replace it in the AlphaBetaPlayer with
iterative deepening search.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
-------
(int, int)
Board coordinates corresponding to a legal move; may return
(-1, -1) if there are no available legal moves.
"""
self.time_left = time_left
# Initialize the best move so that this function returns something
# in case the search fails due to timeout
best_move = (-1, -1)
try:
# The try/except block will automatically catch the exception
# raised when the timer is about to expire.
return self.minimax(game, self.search_depth)
except SearchTimeout:
pass # Handle any actions required after timeout as needed
# Return the best move from the last completed search iteration
return best_move
def minimax(self, game, depth):
"""
This function will perform a depth-limited searh to find the best move.
It'll act like the minimax-decision funciton previously implemented, so
it'll call a max_value and a min_value methods, which will be implemented
within this class.
This method is the starting (root) node of a search tree, and what follows
is a min node.
Assumptions:
1. The minimax algorithm finds the path to the best game for max (it
searches the entire tree to find the answer).
refutation: This code will not search the whole tree for the best move,
since it is a depth limited search. It'll keep opening branches till it reaches
the defined depth, and than apply the evaluation function in that state and
return the value found.
2. Since this is a depth limited search, this will actually be a quiescent
search, which means will iteratively go down tree, till we find a depth
where the eval
refutation: Will not be a quiescent search. The depth is well defined, so
we don't have to find a quiescence depth.
Arguments:
- game: Board object representing the current game state
- depth: depth which our code should look for the answer
Returns:
- move: a tuple of the form (int, int) representing the position on the board
which the MinimaxPlayer should move
"""
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
possible_actions = game.get_legal_moves()
values_for_actions = np.zeros(len(possible_actions))
for i in range(len(possible_actions)):
values_for_actions[i] = self.min_value(game.forecast_move(possible_actions[i]), depth-1)
try:
return possible_actions[np.argmax(values_for_actions)]
except:
print(type(possible_actions))
print(possible_actions)
pass
def max_value(self, game, depth):
"""Max player in the minimax method. Look for the following move
that will maximize the expected evaluation
Parameters
----------
game : isolation.Board
Board objest representing a state of the game. It is a forecast state
following the last min action in the search tree
depth : int
remaining steps to reach maximum depth specified
Returns
-------
val : int
Utility value for current state
"""
# timer check
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
# checking if limit depth or terminal test
if depth == 0:
return self.score(game, self)
v = float("-inf")
for action in game.get_legal_moves():
v = max(v, self.min_value(game.forecast_move(action), depth-1))
return v
def min_value(self, game, depth):
"""Min player in the minimax method. Look for the following move that will
minimize the expected evaluation
Parameters
----------
game : isolation.Board
Board objest representing a state of the game. It is a forecast state
following the last min action in the search tree
depth : int
remaining steps to reach maximum depth specified
Returns
-------
val : int
Mimimum expected value associated with possible actions
"""
# timer chack
if self.time_left() < self.TIMER_THRESHOLD:
raise SearchTimeout()
# checking if limit depth or terminal test
if depth == 0:
return self.score(game, self)
v = float("inf")
for action in game.get_legal_moves():
v = min(v, self.max_value(game.forecast_move(action), depth-1))
return v
|
[
"lucassabbatini@gmail.com"
] |
lucassabbatini@gmail.com
|
b79590ac3e08d5f6a2e861b214934ecac1ae93c7
|
314fbb9641aee064730a1a12f3164ac512a6c27d
|
/Desktop/Project/todolist/lists/models.py
|
0560a18a391791cea070472f7d0608d080a5cd42
|
[] |
no_license
|
sarthakprajapati/todolist
|
7dfc3f94dc6e812b45b292fbf84946dd505f548f
|
ed716f0b405abb16dbe5cb73e8b03adbd465ef7a
|
refs/heads/master
| 2021-05-08T15:40:01.795570
| 2018-02-03T19:44:47
| 2018-02-03T19:44:47
| 120,122,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
from django.db import models
# Create your models here.
class todolist(models.Model):
title = models.CharField(max_length=20)
description = models.CharField(max_length=200)
date = models.DateField(auto_now=False, auto_now_add=False)
active = models.BooleanField(default=True)
def __str__(self):
return self.title
def __unicode__(str):
return self.title
|
[
"sarthakprajapati@live.in"
] |
sarthakprajapati@live.in
|
615c5d23737fde98c2ed00eb9ee6f0c664ae518f
|
9b3293e0f94c717f88f14bf161785685688b7bd1
|
/flickr/data.py
|
f47293653a6a97106e20ba7148cf2c0c2a92cfe5
|
[
"MIT"
] |
permissive
|
Deepayan137/ml_ss19_cross-modal
|
715c4809a9718b4cf8a9221eea0eb1420bc9f937
|
10159be3fd97730d1b0d7e10aea371215e65aad6
|
refs/heads/master
| 2020-06-18T13:35:26.317897
| 2019-07-11T06:18:21
| 2019-07-11T06:18:21
| 196,319,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,103
|
py
|
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import os
import nltk
from PIL import Image
import numpy as np
import json as jsonmod
import pickle
import pdb
class FlickrDataset(data.Dataset):
"""
Dataset loader for Flickr30k and Flickr8k full datasets.
"""
def __init__(self, root, split, vocab, transform=None):
self.root = root
self.vocab = vocab
self.split = split
self.transform = transform
with open(os.path.join(root, '%s.txt'%split)) as f:
self.lines = f.readlines()
def __getitem__(self, index):
"""This function returns a tuple that is further passed to collate_fn
"""
vocab = self.vocab
img_id = index//5
root = self.root
image_name = self.lines[index].split(' ')[0] + '.jpg'
caption = ' '.join(self.lines[index].split(' ')[1:])
# pdb.set_trace()
image = Image.open(os.path.join(root, 'Flicker8k_Dataset', image_name)).convert('RGB')
if self.transform is not None:
image = self.transform(image)
# Convert caption (string) to word ids.
tokens = nltk.tokenize.word_tokenize(
str(caption).lower())
caption = []
caption.append(vocab('<start>'))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab('<end>'))
target = torch.Tensor(caption)
return image, target, index, img_id
def __len__(self):
return len(self.lines)
def collate_fn(data):
data.sort(key=lambda x: len(x[1]), reverse=True)
images, captions, ids, _ = zip(*data)
# Merge images (convert tuple of 3D tensor to 4D tensor)
images = torch.stack(images, 0)
# Merget captions (convert tuple of 1D tensor to 2D tensor)
lengths = [len(cap) for cap in captions]
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return images, targets, lengths, list(ids)
class PrecompDataset(data.Dataset):
"""
Load precomputed captions and image features
Possible options: f8k, f30k, coco, 10crop
"""
def __init__(self, data_path, data_split, vocab):
self.vocab = vocab
loc = data_path + '/'
# Captions
self.captions = []
with open(loc+'f8k_%s_caps.txt' % data_split, 'rb') as f:
for line in f:
self.captions.append(line.strip())
# Image features
self.images = np.load(loc+'f8k_%s_ims.npy' % data_split)
self.length = len(self.captions)
# rkiros data has redundancy in images, we divide by 5, 10crop doesn't
if self.images.shape[0] != self.length:
self.im_div = 5
else:
self.im_div = 1
# the development set for coco is large and so validation would be slow
if data_split == 'dev':
self.length = 5000
def __getitem__(self, index):
# handle the image redundancy
img_id = index//self.im_div
image = torch.Tensor(self.images[img_id])
caption = self.captions[index]
vocab = self.vocab
# Convert caption (string) to word ids.
tokens = nltk.tokenize.word_tokenize(
caption.lower().decode('utf-8'))
caption = []
# pdb.set_trace()
caption.append(vocab('<start>'))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab('<end>'))
target = torch.Tensor(caption)
return image, target, index, img_id
def __len__(self):
return self.length
def get_precomp_loader(data_path, data_split, vocab, batch_size=100,
shuffle=True):
"""Returns torch.utils.data.DataLoader for custom coco dataset."""
dset = PrecompDataset(data_path, data_split, vocab)
data_loader = torch.utils.data.DataLoader(dataset=dset,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=True,
collate_fn=collate_fn)
return data_loader
def get_transform(split_name):
normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
t_list = []
if split_name == 'train':
t_list = [transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip()]
elif split_name == 'dev':
t_list = [transforms.Resize(256), transforms.CenterCrop(224)]
elif split_name == 'test':
t_list = [transforms.Resize(256), transforms.CenterCrop(224)]
t_end = [transforms.ToTensor(), normalizer]
transform = transforms.Compose(t_list + t_end)
return transform
def get_loader_single(root, split, vocab, transform,
batch_size=128, shuffle=True,
collate_fn=collate_fn):
dataset = FlickrDataset(root=root,
split=split,
vocab=vocab,
transform=transform)
# Data loader
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=collate_fn)
return data_loader
def get_loaders(root, vocab, batch_size, precomp=False):
if precomp == True:
train_loader = get_precomp_loader(root, 'train', vocab,
batch_size, False)
val_loader = get_precomp_loader(root, 'dev', vocab,
batch_size, False)
else:
transform = get_transform('train')
train_loader = get_loader_single(root, 'train',
vocab, transform,
batch_size=batch_size, shuffle=True,
collate_fn=collate_fn)
transform = get_transform('dev')
val_loader = get_loader_single(root, 'dev',
vocab, transform,
batch_size=batch_size, shuffle=False,
collate_fn=collate_fn)
transform = get_transform('test')
test_loader = get_loader_single(root, 'test',
vocab, transform,
batch_size=batch_size, shuffle=False,
collate_fn=collate_fn)
return train_loader, val_loader
# path = '/ssd_scratch/cvit/deep/Flickr-8K'
# with open('./vocab/%s_vocab.pkl' %'flickr', 'rb') as f:
# vocab = pickle.load(f)
# train, val = get_loaders(path, vocab, 128)
# for i, batch in enumerate(train):
# img, targ, lengths = batch
# pdb.set_trace()
# # data = FlickrDataset(path, 'test', vocab)
|
[
"deepayan137@gmail.com"
] |
deepayan137@gmail.com
|
0e6013ae1d59046d1b969eec8d8a3e912e94342c
|
0f276c28a4a66fa715368db2455af5c8caec2823
|
/todo/models.py
|
30bcccb32587940d53132bd564b8c511cc1f6563
|
[] |
no_license
|
ryor40172/todo
|
6354d2549ad331c0ac117568d6af3f43e95e3e2f
|
135d11f8c036661daaea96cf11319fdf0f4acf84
|
refs/heads/master
| 2022-07-03T16:31:03.620641
| 2020-05-13T08:15:15
| 2020-05-13T08:15:15
| 263,567,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
from django.db import models
# Create your models here.
PRIORITY = (('danger','high'),('info','nomal'),('success','low'))
class TodoModels(models.Model):
title = models.CharField(max_length=100)
memo = models.TextField()
priority = models.CharField(
max_length = 50,
choices = PRIORITY
)
duedate = models.DateField()
def __str__(self):
return self.title
|
[
"r.y@yofuneracBookea.elecom"
] |
r.y@yofuneracBookea.elecom
|
445680537fd1407a06de0e00035762daa2c87efb
|
3ce0b93b6f7d94e82e1dfe4498b87680f8f0ed85
|
/1977.py
|
856c14edca7c46077d50683046bb133847f07977
|
[] |
no_license
|
stellaluminary/Baekjoon
|
8ff31fb3dc2d4ca55a4da34e4156c595adbead52
|
22ebacf6c80872bd98f610d93fd527b70c3d760e
|
refs/heads/master
| 2022-09-05T02:16:57.112602
| 2022-08-10T13:41:57
| 2022-08-10T13:41:57
| 211,986,841
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
"""
Method 2
"""
n = int(input())
m = int(input())
t = [i**2 for i in range(101) if n <= i**2 <= m]
print(f'{sum(t)}\n{t[0]}'if t else -1)
"""
Method 1
"""
n = int(input())
m = int(input())
t = [i**2 for i in range(1,101)]
s = []
for i in t:
if n <= i <= m:
s.append(i)
if len(s):
print(sum(s))
print(min(s))
else:
print(-1)
|
[
"43261434+stellaluminary@users.noreply.github.com"
] |
43261434+stellaluminary@users.noreply.github.com
|
1edfcedba76e88ee929715b12d2c8848b2dfbf9f
|
1f4ee7cfb3ecd5c712f4e59f16b4aa908b54dc49
|
/snippets.py
|
8154c41760b689b8c5b6767973e7df548773f5aa
|
[] |
no_license
|
mrush336/Self_Driving_Car_Course
|
32f4cf8e1fb26cf667416f7028e8d86ce8c3605d
|
f74f34964319c4ae5b8f45c1941eb52848665971
|
refs/heads/master
| 2022-12-21T07:49:52.195042
| 2020-10-05T22:31:36
| 2020-10-05T22:31:36
| 297,202,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
!git clone https://bitbucket.org/jadslim/german-traffic-signs
!ls german-traffic-sign
%matplotlib inline
|
[
"mrush336@gmail.com"
] |
mrush336@gmail.com
|
03c40469acfd2bccb77b80c41b3fef53b4f49765
|
44bb6546b50be63fdde19a717bb35e6d13963061
|
/oip-vpc/bin/rst2s5.py
|
e720d1a932dbe36b9fb9fabf6a8e0e258c3442fa
|
[] |
no_license
|
psdeepu26/python_test
|
cc2ac322503a73e1ba974198b9e282e7a5f1b5c6
|
ee74f619d8aaba4b4e6d5d48c7274635b0a791ba
|
refs/heads/master
| 2023-05-14T04:30:44.112433
| 2020-07-03T14:11:56
| 2020-07-03T14:11:56
| 276,915,441
| 0
| 0
| null | 2021-06-10T23:07:37
| 2020-07-03T14:12:09
|
Python
|
UTF-8
|
Python
| false
| false
| 676
|
py
|
#!/Users/spatrayuni/virutalenvs/python/oip-vpc/bin/python
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
|
[
"psdeepu26@gmail.com"
] |
psdeepu26@gmail.com
|
bad630554e36736b01efb8bc2574a0ffb943de1e
|
d45b4db35e5e8baef1aa71bb8ae55236e8e8de67
|
/transid2geneid.py
|
7046b81434910dcabb244921926ff25d3f634fdd
|
[] |
no_license
|
davek44/utility
|
a5af6bfff2cf576671dcdfa7bdfdac97a417b26a
|
5a2581078bf9dab78cc182f2917ecb671d04570c
|
refs/heads/master
| 2023-04-30T21:19:40.683342
| 2023-04-20T22:30:48
| 2023-04-20T22:30:48
| 7,212,829
| 18
| 11
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,307
|
py
|
#!/usr/bin/env python
from optparse import OptionParser
import gff
################################################################################
# transid2geneid.py
#
# Given a transcript id, produce a gene id to punch into the browser
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <trans id>'
parser = OptionParser(usage)
parser.add_option('-l', dest='lnc_file', default='/Users/dk/research/common/data/lncrna/lnc_catalog.gtf', help='lncRNA catalog file [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 1:
parser.error('Must provide transcript id')
else:
trans_id = args[0]
for line in open(options.lnc_file):
a = line.split('\t')
kv = gff.gtf_kv(a[8])
if kv['transcript_id'] == trans_id:
print kv['gene_id']
break
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
[
"dkelley@fas.harvard.edu"
] |
dkelley@fas.harvard.edu
|
32faa30396f23e4011a0a610069b60514d8a5177
|
46a5163ef27278091fa5d237757d02e6d537a5a8
|
/vis_utils/plot_runs_direct.py
|
7ace96d944ef58bd23089038a2819f6ef0a0cdf8
|
[
"MIT"
] |
permissive
|
lknak/tigr
|
5630f02c24bcb9030bf924a315d5ad0069161ab0
|
614a6435c483a25cb8183c08184d140120053a4f
|
refs/heads/main
| 2023-05-28T02:31:02.966208
| 2021-06-07T10:26:42
| 2021-06-07T10:26:42
| 369,203,165
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,382
|
py
|
import sys, os, re, time
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as InterFun
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
# Define folder path for csvs
FOLDER_PATH_RUNS = os.path.join('output', 'cheetah-multi-task', '2021_04_26_8_task')
FOLDER_PATH_FIG = os.path.join('log', 'figures')
CONCAT_RUNS = False
SMOOTHING = 0.1
# Setup:
# List of run names that should be plotted
RUNS_TO_PLOT = [
# 'MLP_5T',
# 'GRU_5T',
# 'CONV_5T',
# 'TRANSFORMER_5T',
# 'MLP_5T',
# 'MLP_5T_PCGRAD',
# 'MLP_10T',
# 'MLP_10T_PCGRAD',
# 'MLP_1T',
# 'MLP_2T',
# 'MLP_3T',
# 'MLP_4T',
# 'MLP_5T',
# 'MLP_10T',
# 'MLP_20T',
# 'MLP_5T_LD1',
# 'MLP_5T_LD2',
# 'MLP_5T_LD3',
# 'MLP_5T_LD4',
# 'MLP_AT0S',
# 'MLP_AT1S',
# 'MLP_AT5S',
# 'MLP_AT10S',
# 'MLP_AT25S',
# 'MLP_P_A0001_R01',
# 'MLP_P_A0001_R0',
# 'MLP_P_A001_R01',
# 'MLP_P_A01_R01',
# 'MLP_5_PRIOR_GMM',
# 'MLP_5_TRUE_GMM',
# 'MLP_5_COMB._ACTIV.',
# 'MLP_5_DIRECT_ACTIV.',
# 'AKZ0.001_BE0.01_GS0.01',
# 'AKZ0.001_BE0.01_GS0.1',
# 'AKZ0.001_BE0.1_GS0.01',
# 'AKZ0.01_BE0.1_GS0.01',
# 'AKZ0.01_BE0.1_GS0.1',
# 'AKZ0.1_BE0.01_GS0.1',
# 'AKZ0.1_BE0.1_GS0.01',
# 'AKZ0.1_BE0.1_GS0.1'
# 'SM_NONE',
# 'SM_LINEAR',
'8_TASK_GRU_64'
]
# Setup:
# DICT = {Title: regex, ...}
RUN_REGEX_DICT = {
'MLP_1T': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=1_ls=2_prior_gmm',
'MLP_2T': '2021_02_27_20_07_39_prior_gmm_mlp_2',
'MLP_3T': '2021_02_27_20_07_25_prior_gmm_mlp_3',
'MLP_4T': '2021_02_27_20_07_12_prior_gmm_mlp_4',
'MLP_5T': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=5_ls=2_prior_gmm',
'MLP_10T': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=10_ls=2_prior_gmm',
'MLP_20T': '2021_02_24_16_35_15_prior_gmm_mlp_20',
'MLP_5T_LD1': '2021_02_27_20_05_41_prior_gmm_mlp_5_ld1',
'MLP_5T_LD2': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=5_ls=2_prior_gmm',
'MLP_5T_LD3': '2021_02_27_20_05_51_prior_gmm_mlp_5_ld3',
'MLP_5T_LD4': '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=5_ls=4_prior_gmm',
'MLP_AT0S': '2021_02_25_17_05_02_prior_gmm_mlp_5',
'MLP_AT1S': '2021_03_02_07_22_39_prior_gmm_mlp_at1',
'MLP_AT5S': '2021_03_01_18_12_38_prior_gmm_mlp_at5',
'MLP_AT10S': '2021_03_01_18_13_10_prior_gmm_mlp_at10',
'MLP_AT25S': '2021_03_02_07_23_06_prior_gmm_mlp_at25',
'MLP_P_A0001_R01': '2021_02_25_17_05_02_prior_gmm_mlp_5',
'MLP_P_A0001_R0': '2021_03_01_03_25_34_prior_gmm_a_0001_r_0',
'MLP_P_A001_R01': '2021_03_01_03_25_53_prior_gmm_a_001_r_01',
'MLP_P_A01_R01': '2021_03_01_03_26_12_prior_gmm_a_01_r_01',
'MLP_5_PRIOR_GMM' : '.*cheetah_multi_task_io=prior_gmm_et=mlp_ts=5_ls=2_prior_gmm',
'MLP_5_TRUE_GMM' : '.*cheetah_multi_task_io=true_gmm_et=mlp_ts=5_ls=2_true_gmm',
'MLP_5_COMB._ACTIV.' : '.*cheetah_multi_task_io=comb_et=mlp_ts=5_ls=2_activation_combination',
'MLP_5_DIRECT_ACTIV.' : '.*cheetah_multi_task_io=direct_et=mlp_ts=5_ls=2_direct_activation',
'GRU_5T': '.*cheetah_multi_task_io=prior_et=gru_ts=5_ls=2_prior_gmm',
'GRU_10T': '2021_02_25_17_05_58_prior_gmm_gru_10',
'CONV_5T': '.*cheetah_multi_task_io=prior_gmm_et=conv_ts=5_ls=2_prior_gmm',
'CONV_10T': '2021_02_25_17_05_23_prior_gmm_conv_10',
'TRANSFORMER_5T': '.*cheetah_multi_task_io=prior_et=transformer_ts=5_ls=2_prior_gmm',
'TRANSFORMER_10T': '2021_02_26_15_39_57_prior_gmm_transformer_10',
'MLP_5T_PCGRAD': '2021_03_01_03_15_43_prior_gmm_mlp_5_pcgrad',
'MLP_10T_PCGRAD': '2021_02_26_16_42_03_prior_gmm_mlp_10_pcgrad',
#'TIBIAMRL': 'PLACEHOLDER',
'AKZ0.001_BE0.01_GS0.01': '.*cheetah_multi_task_akz~0.001_be~0.01_gs~0.01_prior_gmm',
'AKZ0.001_BE0.01_GS0.1': '.*cheetah_multi_task_akz~0.001_be~0.01_gs~0.1_prior_gmm',
'AKZ0.001_BE0.1_GS0.01': '.*cheetah_multi_task_akz~0.001_be~0.1_gs~0.01_prior_gmm',
'AKZ0.01_BE0.1_GS0.01': '.*cheetah_multi_task_akz~0.01_be~0.1_gs~0.01_prior_gmm',
'AKZ0.01_BE0.1_GS0.1': '.*cheetah_multi_task_akz~0.01_be~0.1_gs~0.1_prior_gmm',
'AKZ0.1_BE0.01_GS0.1': '.*cheetah_multi_task_akz~0.1_be~0.01_gs~0.1_prior_gmm',
'AKZ0.1_BE0.1_GS0.01': '.*cheetah_multi_task_akz~0.1_be~0.1_gs~0.01_prior_gmm',
'AKZ0.1_BE0.1_GS0.1': '.*cheetah_multi_task_akz~0.1_be~0.1_gs~0.1_prior_gmm',
'GRU_T10': '.*cheetah_multi_task_et~gru_ts~10_prior_gmm',
'TRANSFORMER_T1': '.*cheetah_multi_task_et~transformer_ts~1_prior_gmm',
'TRANSFORMER_T5': '.*cheetah_multi_task_et~transformer_ts~5_prior_gmm',
'T_MULTIPLICATION': '.*cheetah_multi_task_tc~multiplication_prior_gmm',
'SM_NONE': '.*cheetah_multi_task_td~None_sm~None_prior_gmm',
'SM_LINEAR': '.*cheetah_multi_task_td~None_sm~linear_prior_gmm',
'TD_NONE_SMNONE': '.*cheetah_multi_task_td~None_sm~None_prior_gmm',
'TD_NONE_SMLINEAR': '.*cheetah_multi_task_td~None_sm~linear_prior_gmm',
'TD_WORST_SMNONE': '.*cheetah_multi_task_td~worst_sm~None_prior_gmm',
'8_TASK_GRU_64': '.*cheetah_multi_task_ts~64_true_gmm',
}
# Setup:
# DICT = {run name: [(Title, tag), ...], ...}
RUN_TAGS_DICT = {
'default': [
('Evaluation Test ND Average Reward', 'evaluation/nd_test/average_reward'),
('Evaluation Test ND Max Reward', 'evaluation/nd_test/max_reward'),
('Evaluation Test ND Min Reward', 'evaluation/nd_test/min_reward'),
('Evaluation Test ND Std Reward', 'evaluation/nd_test/std_reward'),
('Evaluation Test ND Success Rate', 'evaluation/nd_test/success_rate'),
('Evaluation Test Average Reward', 'evaluation/test/average_reward'),
('Evaluation Test Max Reward', 'evaluation/test/max_reward'),
('Evaluation Test Min Reward', 'evaluation/test/min_reward'),
('Evaluation Test Std Reward', 'evaluation/test/std_reward'),
('Evaluation Test Success Rate', 'evaluation/test/success_rate'),
('Evaluation Training Average Reward', 'evaluation/train/average_reward'),
('Evaluation Training Max Reward', 'evaluation/train/max_reward'),
('Evaluation Training Min Reward', 'evaluation/train/min_reward'),
('Evaluation Training Std Reward', 'evaluation/train/std_reward'),
('Evaluation Training Success Rate', 'evaluation/train/success_rate'),
('Policy Training Alpha Loss', 'rl/alpha'),
('Policy Training Policy Loss', 'rl/policy_loss'),
('Policy Training QF1 Loss', 'rl/qf1_loss'),
('Policy Training QF2 Loss', 'rl/qf2_loss'),
('Task Inference Training Mixture Model Combined Loss', 'training/ti_mixture_loss'),
('Task Inference Training Mixture Model Elbo Loss', 'training/ti_mixture_elbo_loss'),
('Task Inference Training Mixture Model State Loss', 'training/ti_mixture_state_losses'),
('Task Inference Training Mixture Model Reward Loss', 'training/ti_mixture_reward_losses'),
('Task Inference Training Mixture Model Regularization Loss', 'training/ti_mixture_regularization_loss'),
('Task Inference Training Mixture Model Class Activation Accuracy', 'training/ti_classification_acc'),
('Task Inference Training Mixture Model Clustering Loss', 'training/ti_mixture_clustering_losses')
],
}
def main(run_name=None, interpolation_type='scipy', smooth=True, format_='pdf', plot_std=True, save_=True,
summary_pref='', fit_plt=False):
global RUN_REGEX_DICT
global FOLDER_PATH_RUNS
global RUNS_TO_PLOT
if run_name is not None:
run_name = run_name if run_name[-1] != '/' else run_name[:-1]
head, tail = os.path.split(run_name)
if len(head) > 0:
FOLDER_PATH_RUNS = head
RUN_REGEX_DICT = {
'TIBIAMRL': tail,
}
else:
RUN_REGEX_DICT = {
'TIBIAMRL': run_name,
}
RUNS_TO_PLOT = ['TIBIAMRL']
# Prepare data
data_dict = {}
# Get all folders in folder
folders = sorted([d for d in os.listdir(FOLDER_PATH_RUNS) if os.path.isdir(os.path.join(FOLDER_PATH_RUNS, d))])
for run_name in RUNS_TO_PLOT:
for folder in folders:
if re.match(RUN_REGEX_DICT[run_name], folder) is not None:
(dirpath, subfolders, subfiles) = next(os.walk(os.path.join(FOLDER_PATH_RUNS, folder, 'tensorboard')))
#(dirpath, _, subsubfiles) = next(os.walk(os.path.join(dirpath, subfolders[0])))
# Add tf events from first subfolder
print(f'Reading in events of {[file for file in subfiles if "events.out" in file][0]} [{folder}]')
acc = EventAccumulator(os.path.join(dirpath, [file for file in subfiles if 'events.out' in file][0])).Reload()
# Gather all info for given tags
for title, tag in RUN_TAGS_DICT[run_name if run_name in RUN_TAGS_DICT.keys() else 'default']:
try:
list_of_events = acc.Scalars(summary_pref + tag)
except Exception as e:
print(f'\tAcquiring data for tag "{summary_pref + tag}" went wrong! ({e})')
continue
_, steps, values = list(zip(*map(lambda x: x._asdict().values(), list_of_events)))
df = pd.DataFrame(data=np.array([np.array(steps), np.array(values)]).T, columns=['Step', 'Value'])
df.drop_duplicates(subset='Step', keep='last', inplace=True)
# Add dfs to data_dict
if title in data_dict.keys():
if not CONCAT_RUNS:
if run_name in data_dict[title].keys():
data_dict[title][run_name].append(df)
else:
data_dict[title][run_name] = [df]
else:
last_step = data_dict[title][run_name][0]['Step'].to_numpy()[-1]
df['Step'] += last_step
data_dict[title][run_name][0] = data_dict[title][run_name][0].append(df)
else:
data_dict[title] = {run_name: [df]}
print(f'Using {["own", "InterpolatedUnivariateSpline (scipy)"][int(interpolation_type == "scipy")]} interpolation method to patch missing data in some plots')
# Find min length for plotting only valid data and transform pd frames in numpy arrays
for title in data_dict.keys():
# Find corresponding values and interpolate
for run_name in list(data_dict[title].keys()):
# Only interpolate in case we have multiple runs that need to be averaged
min_steps = data_dict[title][run_name][0]['Step'].to_numpy()
if len(data_dict[title][run_name]) > 1:
temp_l = np.array([df['Step'].to_numpy()[-1] for df in data_dict[title][run_name]])
min_steps = data_dict[title][run_name][temp_l.argmin()]['Step'].to_numpy()
if interpolation_type == 'scipy':
for ind, df in enumerate(data_dict[title][run_name]):
interpolation_function = InterFun(df['Step'].to_numpy(), df['Value'].to_numpy())
data_dict[title][run_name][ind] = interpolation_function(min_steps)
elif interpolation_type == 'own':
for ind, df in enumerate(data_dict[title][run_name]):
steps, values = df['Step'].to_numpy(), df['Value'].to_numpy()
bigger_array = np.zeros_like(min_steps, dtype=np.float)
for arr_ind, step in enumerate(min_steps):
bigger_array[arr_ind] = values[np.where(steps >= step)[0][0]] if np.sum(steps >= step) > 0 else values[-1]
data_dict[title][run_name][ind] = bigger_array
else:
data_dict[title][run_name][0] = data_dict[title][run_name][0]['Value'].to_numpy()
data_dict[title][run_name + '_steps'] = min_steps
# Start plotting
print(f'Plotting ...')
# Use Latex text
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
# Make folder in case not yet existing
file_name = "_".join([RUN_REGEX_DICT[run_name] for run_name in RUNS_TO_PLOT])
fig_folder = os.path.join(FOLDER_PATH_FIG, f'{time.strftime("%Y-%m-%d-%H_%M_%S")}_{file_name if len(RUNS_TO_PLOT) < 2 else "comparison"}_smoothing{SMOOTHING}')
if not os.path.isdir(fig_folder) and save_:
os.mkdir(fig_folder)
for title in data_dict.keys():
plot_title = ('Comparison ' if len(data_dict[title]) > 2 else '') + title
plt.ioff()
plt.title(plot_title)
max_mean, min_mean = -np.inf, np.inf
for run_name in data_dict[title].keys():
if '_steps' in run_name:
continue
data_arr = np.array(data_dict[title][run_name])
steps = data_dict[title][run_name + '_steps']
mean = data_arr.mean(axis=0) if not smooth else smooth_values(data_arr.mean(axis=0))
std = np.sqrt(data_arr.var(axis=0))
plt.plot(steps, mean)
if plot_std: plt.fill_between(steps, mean + std, mean - std, alpha=0.3)
max_mean = mean.max() if max_mean < mean.max() else max_mean
min_mean = mean.min() if min_mean > mean.min() else min_mean
if fit_plt: plt.ylim([min_mean, max_mean])
plt.legend([f'{el}_[{len(data_dict[title][el])}]' for el in data_dict[title].keys() if '_steps' not in el],
bbox_to_anchor=(1, 1), loc='upper left')
plt.xlabel('Steps')
plt.ylabel(title)
# Always show 0
# y_min, y_max = plt.gca().get_ylim()
# if y_min > 0 and not fit_plt:
# plt.ylim([0, y_max])
# Save or show
if save_:
plt.savefig(os.path.join(fig_folder, plot_title + '.' + format_), format=format_, dpi=100,
bbox_inches='tight')
else:
plt.show()
plt.close()
def smooth_values(scalars, weight=None): # Scalars as np.array, weight between 0 and 1
if weight is None: weight = SMOOTHING
last = scalars[0] # First value in the plot (first timestep)
smoothed = np.zeros_like(scalars)
for idx, point in enumerate(scalars):
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed[idx] = smoothed_val # Save it
last = smoothed_val # Anchor the last smoothed value
return np.array(smoothed)
if __name__ == '__main__':
if len(sys.argv) > 0:
main(*sys.argv[1:])
else:
main()
|
[
"brainstoorm@web.de"
] |
brainstoorm@web.de
|
13e5580aff5ed3f900413d87447a30a3ad35e622
|
e4f9c74094b5d2263768640e15d36265e905a133
|
/catalogue_folder_level.py
|
fd89b2190ece65cbe1739249ffe9d9993e626e6b
|
[] |
no_license
|
rothwellstuart/nlp-command-line-tool
|
a4549af4d8bdcd764a20bfbac4c4d9faa7388a27
|
4821391484dbb9fc9896a1c9c1dd5ac385f5f8d2
|
refs/heads/master
| 2020-03-29T02:19:23.249589
| 2018-09-19T10:05:53
| 2018-09-19T10:05:53
| 149,430,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,004
|
py
|
# # Catalogue_folder_level
#
# Create a high level classification of files by file type, WITHIN A FOLDER
#
# Imports
# def catalogue_folder_level(selected_dir):
import os, sys, magic, time, hashlib, csv, shutil, operator
from os import listdir, environ
from os.path import isfile, join
import pandas as pd
# Initialise variables
allfiles=[]
# Main loop
# for file in driver_list:
for subdir, dirs, files in os.walk(selected_dir):
dict_filecount=dict()
dict_filesizes=dict()
dict_mimecount=dict()
dict_mimesizes=dict()
dict_extcount=dict()
dict_extsizes=dict()
dict_filecount_sorted = []
dict_filesizes_sorted = []
dict_mimecount_sorted = []
dict_mimesizes_sorted = []
dict_extcount_sorted = []
dict_extsizes_sorted = []
folder_count = 0
folder_size = 0
# Get name of folder immediately above the files
named_folder = subdir.rsplit('/',1)[-1]
relpath = os.path.relpath(subdir, selected_dir)
if relpath == '.':
relpath = ""
print("Processing sub-folder: ", named_folder, ", at: ", relpath)
# Cycle through files
for file in files:
# Count of files
folder_count += 1
# Get filetype and file size
filesize = os.path.getsize(join(subdir, file))
fileext = os.path.splitext(join(subdir, file))[1].upper()
mimetype = magic.from_file(join(subdir, file), mime=True)
# Add in size
folder_size += filesize
# CLASSIFICATION of files
if 'encrypted' in mimetype:
fileclass = 'ENCRYPTED'
elif 'zip' in mimetype:
fileclass = 'COMPRESSED'
elif 'word' in mimetype:
fileclass = 'WORD'
elif 'pdf' in mimetype:
fileclass='PDF'
elif ('excel' in mimetype) or ('spreadsheet' in mimetype):
fileclass = 'EXCEL'
elif 'office' in mimetype and fileext == '.VSD':
fileclass = 'VISIO'
elif 'office' in mimetype and fileext == '.XLS':
fileclass = 'EXCEL'
elif 'powerpoint' in mimetype:
fileclass = 'POWERPOINT'
elif 'image' in mimetype:
fileclass = 'IMAGE'
elif 'message' in mimetype:
fileclass = 'EMAIL'
elif 'text' in mimetype or 'octet-stream' in mimetype or 'application' in mimetype:
if fileext == '.HTM' or fileext == '.HTML':
fileclass = 'HTML'
elif fileext == '.EML' or fileext == '.RTF' or fileext == '.MSG':
fileclass = 'EMAIL'
elif fileext == '.TXT':
fileclass = 'TEXT'
elif fileext == '.RAW' or fileext == '.GIF' or fileext == '.JPG' or fileext == '.PNG' or '.TIF' in fileext or fileext == '.WMF':
fileclass = 'IMAGE'
elif fileext == '.DAT' or fileext == '.CSV':
fileclass = 'FLATFILE'
elif '.DOC' in fileext:
fileclass = 'WORD'
elif fileext == '.PDF':
fileclass = 'PDF'
elif '.XLS' in fileext:
fileclass = 'EXCEL'
elif '.PPT' in fileext:
fileclass = 'POWERPOINT'
elif fileext == '.MBOX':
fileclass = 'MAILBOX'
elif fileext == '.XML':
fileclass = 'XML'
elif fileext == '.ZIP':
fileclass = 'COMPRESSED'
elif '.001' in fileext or fileext == '.JS' or fileext == '.AU_' or fileext == '.COM_' or fileext == '.CSS' or \
fileext == '.JOBOPTIONS' or fileext == '.LOCAL_' or fileext == '.DOT' or fileext == '.DS_STORE' or \
fileext == '.EMF' or fileext == '.MDB' or fileext == '.ODTTF' or fileext == '.PART' or fileext == '.WPD':
fileclass = 'MISC'
else:
fileclass = 'MISC'
elif mimetype == 'application/xml':
fileclass = 'XML'
else:
### octet-stream
### inode/x-empty
fileclass='UNKNOWN'
# Add to dictionaries
if fileclass in dict_filecount:
dict_filecount[fileclass] += 1
dict_filesizes[fileclass] += filesize
else:
dict_filecount[fileclass] = 1
dict_filesizes[fileclass] = filesize
if mimetype in dict_mimecount:
dict_mimecount[mimetype] += 1
dict_mimesizes[mimetype] += filesize
else:
dict_mimecount[mimetype] = 1
dict_mimesizes[mimetype] = filesize
if fileext in dict_extcount:
dict_extcount[fileext] += 1
dict_extsizes[fileext] += filesize
else:
dict_extcount[fileext] = 1
dict_extsizes[fileext] = filesize
###### End loop of files within subdir
# Sort dictionaries by the values
dict_filecount_sorted = sorted(dict_filecount.items(), key=operator.itemgetter(1), reverse=True)
dict_filesizes_sorted = sorted(dict_filesizes.items(), key=operator.itemgetter(1), reverse=True)
dict_mimecount_sorted = sorted(dict_mimecount.items(), key=operator.itemgetter(1), reverse=True)
dict_mimesizes_sorted = sorted(dict_mimesizes.items(), key=operator.itemgetter(1), reverse=True)
dict_extcount_sorted = sorted(dict_extcount.items(), key=operator.itemgetter(1), reverse=True)
dict_extsizes_sorted = sorted(dict_extsizes.items(), key=operator.itemgetter(1), reverse=True)
# Check contents of dictionaries
# print('Filecounts by filetype: ', dict_filecount_sorted)
# print('Filesizes by filetype', dict_filesizes_sorted)
# print('Filecounts by mimetype: ', dict_mimecount_sorted)
# print('Filesizes by mimetype', dict_mimesizes_sorted)
# print('Filecounts by extension: ', dict_extcount_sorted)
# print('Filesizes by extension', dict_extsizes_sorted)
# Append to output - one row for every subdir
row=[]
row.append(named_folder)
row.append(subdir)
row.append(relpath)
row.append(folder_count)
row.append(folder_size)
row.append(str(dict_filecount_sorted))
row.append(str(dict_filesizes_sorted))
row.append(str(dict_mimecount_sorted))
row.append(str(dict_mimesizes_sorted))
row.append(str(dict_extcount_sorted))
row.append(str(dict_extsizes_sorted))
allfiles.append(row)
### End of subdirs loop
# Convert to DataFrame
allfiles_df = pd.DataFrame(allfiles)
# Rename columns
allfiles_df.columns = ['named_folder', 'subdir', 'relpath', 'filecount', 'filesize', 'filecount_by_type', 'filesize_by_type', 'filecount_by_mimetype','filesize_by_mimetype', 'filecount_by_ext', 'filesize_by_ext']
allfiles_df.sort_values('filesize', ascending=False, inplace=True)
# Output to csv
allfiles_df.to_csv('output/00_catalogue_folder_level.csv', index=False)
# Print output to screen
print("Folder summary view run:")
print(allfiles_df[['subdir', 'filecount', 'filesize']])
print("\nSee output/00_catalogue_folder_level.csv for full detailed summary.\n")
|
[
"rothwellstuart@hotmail.com"
] |
rothwellstuart@hotmail.com
|
38ca3a02b5ddb39e04540b8ee04a6af0828c4cbd
|
842a047102c81e78c7c9276bb77519218b6c3967
|
/app/django-backend/app/spendings/views.py
|
4aa7551f3a877c371e165801c3beba89fb3bf7f3
|
[] |
no_license
|
dpinedaj/FinanceApp
|
18017455d962e0db6acd05f4547c4d76dbe50745
|
12f5e84f4758d59b8916472661ff2411ced40ecc
|
refs/heads/master
| 2023-05-08T06:29:02.896984
| 2021-05-03T01:39:22
| 2021-05-03T01:39:22
| 300,404,921
| 1
| 0
| null | 2021-05-03T01:39:23
| 2020-10-01T19:45:53
|
Python
|
UTF-8
|
Python
| false
| false
| 434
|
py
|
from rest_framework import viewsets
from spendings.models import Spends, SpendTypes
from spendings.serializer import SpendsSerializer, SpendTypesSerializer
# Create your views here.
class SpendTypesView(viewsets.ModelViewSet):
queryset = SpendTypes.objects.all()
serializer_class = SpendTypesSerializer
class SpendsView(viewsets.ModelViewSet):
queryset = Spends.objects.all()
serializer_class = SpendsSerializer
|
[
"dpinedaj@unal.edu.co"
] |
dpinedaj@unal.edu.co
|
b364344e9455d0d80b99465d14a2c0d8abf05236
|
54b09a85d579d2a0d296a825196f2515da64fec1
|
/BOJ2884.py
|
f655909e11caaeff6516f8e4ff9b49d9dcc256af
|
[] |
no_license
|
winan305/Algorithm-with-Python3
|
2c0f51e03b7207eb7b644cecc44aef489e3e6ee2
|
233b0f5687f4d7b1ec7ec4772771503fa85c27ee
|
refs/heads/master
| 2021-01-22T20:49:09.488640
| 2018-05-16T03:52:27
| 2018-05-16T03:52:27
| 100,776,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 179
|
py
|
# https://www.acmicpc.net/problem/2884
# 알람 시계
# 구현
H, M = map(int, input().split())
M = M - 45
if M < 0 :
M += 60
H -= 1
if H < 0 : H += 24
print(H, M)
|
[
"winan305"
] |
winan305
|
eaa84d083daf2838f8db871cfe6ed73b20709602
|
00c48bd685a3dda5731a9f9ba7b87589048ed71d
|
/TEAM_HIT_BY_PITCH/teams_hit_by_pitches.py
|
26b0c66d1059ac8d36c095e00f30a7e3b58a3bfe
|
[] |
no_license
|
ebwinters/BaseballAnalysis
|
0da56faae45b551be840df7ed6779c515e45fcd4
|
9d0ed4aab54cba8848d85595ec4d77c319c6f02f
|
refs/heads/master
| 2020-03-07T19:23:06.777335
| 2018-04-10T17:56:09
| 2018-04-10T17:56:09
| 127,668,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#load in data
teams = "~/Desktop/DataAnalysis_Udemy/BaseballAnalysis/baseballdatabank-master/core/Teams.csv"
teams_df = pd.read_csv(teams)
odds_by_team_id = {}
def get_odds_hit_by_pitch(team_id):
#only get teams with team_id, and drop any columns with no data (probably from early years of baseball)
df = teams_df.loc[teams_df['teamID'] == team_id].dropna()
df = df.groupby(by='teamID', as_index=False)['teamID', 'HBP', 'AB'].sum()
#add to dictionary to use later in plotting
odds_by_team_id[team_id] = float(df['HBP']/df['AB'])
team_id_list = [
'ARI',
'ATL',
'BAL',
'BOS',
'CHA',
'CHN',
'CIN',
'CLE',
'COL',
'DET',
'HOU',
'KCA',
'LAA',
'LAN',
'MIA',
'MIL',
'MIN',
'NYA',
'NYN',
'OAK',
'PHI',
'PIT',
'SDN',
'SEA',
'SFN',
'SLN',
'TBA',
'TEX',
'TOR',
'WAS'
]
for team_id in team_id_list:
get_odds_hit_by_pitch(team_id)
barchart = sns.barplot(x=list(odds_by_team_id.keys()), y=list(odds_by_team_id.values()), palette='deep')
barchart.set(xlabel='Team', ylabel='% Chance hit by pitch')
barchart.tick_params(labelsize=5)
plt.show(barchart)
|
[
"ewinters@terpmail.umd.edu"
] |
ewinters@terpmail.umd.edu
|
ae48474220c3b8e0a410957103a20113cddbb24a
|
69a1a36a322cfc393ad40423d782ebe6f7153304
|
/analytics/migrations/0002_auto_20190103_0249.py
|
07ca1f41e2447340ecbb4343879e6a4cc4e45f49
|
[] |
no_license
|
mmaleka/beam-force-calculator
|
1690098575daae1a2e6df2b18734530b9aeb2476
|
1e953373270b7288f3a9d0d34971b1c8f68e5eff
|
refs/heads/master
| 2020-04-14T13:49:27.063524
| 2019-01-05T12:11:42
| 2019-01-05T12:11:42
| 163,877,466
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
# Generated by Django 2.1.1 on 2019-01-03 00:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analytics', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SolutionBeamCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_address', models.CharField(blank=True, max_length=220, null=True)),
('user', models.CharField(db_index=True, max_length=150)),
('time_stamp', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('views_count', models.IntegerField(default=0)),
],
options={
'ordering': ['-time_stamp'],
},
),
migrations.RemoveField(
model_name='registercount',
name='address',
),
migrations.RemoveField(
model_name='solvebeamcount',
name='address',
),
]
|
[
"Mpho.Maleka@rheinmetall-denelmunition.com"
] |
Mpho.Maleka@rheinmetall-denelmunition.com
|
3c2625961aa16d15246b5e222e8ed2673f9004c5
|
f08d0b5d0ce94292493111be42eaf6db051c8eb3
|
/view/CardEncoder.py
|
a6c0f5955440aa1bf542bf4cf57eb5961033a426
|
[] |
no_license
|
draxlus/CMPT-370_SoftwareDevProject
|
999ac7ddd470b40d2df8f338a51f2a661b747922
|
f2205456ba5ff3d1cb7d4d65cd65becfabcf8c2c
|
refs/heads/main
| 2023-04-19T07:52:45.986842
| 2021-05-06T19:18:43
| 2021-05-06T19:18:43
| 365,009,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 124
|
py
|
import json
from json import JSONEncoder
class CardEncoder(JSONEncoder):
def default(self,o):
return o.__dict__
|
[
"siddhantagrawal777@gmail.com"
] |
siddhantagrawal777@gmail.com
|
6b059ae9f2f5382b3bb9cbb8c0e8698cebbcb437
|
a86e67ac95a331e9652c82f8d30e0a3a3968a3ba
|
/omsaalert/config/email.py
|
5f9c023f03b1b0d141d70ddac9deb598972f3205
|
[] |
no_license
|
dsoprea/omsa-alert
|
9a1511e2d7e3bf90335bfe72bde3ba3bd8103439
|
23aaa939182e1c0e070b430768d9d4b5cf1be4d4
|
refs/heads/master
| 2021-05-09T13:33:47.916314
| 2019-12-21T17:47:57
| 2019-12-21T17:47:57
| 119,039,198
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 115
|
py
|
DEFAULT_SUBJECT = "OMSA Reported a Problem"
FROM_EMAIL_ADDRESS = "omsaalert@localhost"
SMTP_HOSTNAME = 'localhost'
|
[
"doprea@magicleap.com"
] |
doprea@magicleap.com
|
af46dad9e4c7157da0632a065f8f382db3a588b7
|
607257a034f4d0ce2916c68d9995ee9d2eec20f0
|
/Controller/make_db.py
|
c79fd9e486d77956cab1eef0429a7f1752a66618
|
[] |
no_license
|
sean-ocall/phosphorylation
|
c3d086b6afb152e1b0a73c330ac63b1c90dea7da
|
e9e3482fb73fc45a60dfaf26bf067ce73975e0d1
|
refs/heads/master
| 2021-09-15T10:52:45.043572
| 2018-05-31T04:38:36
| 2018-05-31T04:38:36
| 107,934,589
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,867
|
py
|
import sqlite3
conn = sqlite3.connect('../Model/phospho-db.sqlite')
c = conn.cursor()
table_name = 'phosphositetb'
# 0 1 2 3 4 5 6
t_fields = ['residue', 'position', 'uniprotid','genename','function', 'foldchange', 'AA_sequence']
t_field_types = ['TEXT', 'INTEGER', 'TEXT', 'TEXT', 'TEXT', 'FLOAT','TEXT']
c.execute(""" CREATE TABLE IF NOT EXISTS {tn} (
{fn1} {ft1},
{fn2} {ft2},
{fn3} {ft3},
{fn4} {ft4},
{fn5} {ft5},
{fn6} {ft6},
{fn7} {ft7}
); """.format(tn=table_name,
fn1=t_fields[0],
fn2=t_fields[1],
fn3=t_fields[2],
fn4=t_fields[3],
fn5=t_fields[4],
fn6=t_fields[5],
fn7=t_fields[6],
ft1=t_field_types[0],
ft2=t_field_types[1],
ft3=t_field_types[2],
ft4=t_field_types[3],
ft5=t_field_types[4],
ft6=t_field_types[5],
ft7=t_field_types[6]))
conn.commit()
conn.close()
|
[
"sean.ocall@gmail.com"
] |
sean.ocall@gmail.com
|
392a95a13678b978b2bf26cfa31a3ae43fdcdd15
|
5e517912d4666fc3a2f012fa1f2a7e829f18ad6c
|
/Exercícios/Conversão-moeda.py
|
fbb865e76af27c941ed51d4093851d19e65f4df6
|
[] |
no_license
|
Marcelo-Carlos/Python
|
817a8342191e57abb676dafef8c8798c0364c959
|
d3e3c2f96bb9cfad530c67ab65f6e4713f9ca3d1
|
refs/heads/master
| 2022-09-11T21:20:06.976305
| 2020-05-31T21:33:17
| 2020-05-31T21:33:17
| 268,367,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 118
|
py
|
real = float(input('Quanto voce tem R$: '))
dolar = real / 5.27
print('Você pode comprar US$: {:.2f}'.format(dolar))
|
[
"Marcelo Carlos"
] |
Marcelo Carlos
|
5c2a0ecf03bd9fc2fff4b6d350ed3171d1b1c3d3
|
209a7a4023a9a79693ec1f6e8045646496d1ea71
|
/COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/_testing.py
|
0af5339179bf326f08e63c419731ff513a646c25
|
[
"MIT"
] |
permissive
|
anzhao920/MicrosoftProject15_Invictus
|
5e2347015411bbffbdf0ceb059df854661fb240c
|
15f44eebb09561acbbe7b6730dfadf141e4c166d
|
refs/heads/main
| 2023-04-16T13:24:39.332492
| 2021-04-27T00:47:13
| 2021-04-27T00:47:13
| 361,913,170
| 0
| 0
|
MIT
| 2021-04-26T22:41:56
| 2021-04-26T22:41:55
| null |
UTF-8
|
Python
| false
| false
| 99,239
|
py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
from pathlib import Path
import random
import re
from shutil import rmtree
import string
import tempfile
from typing import IO, Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import safe_sort, take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA]
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:119: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"always", _testing_mode_warnings # type: ignore[arg-type]
)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:126: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"ignore", _testing_mode_warnings # type: ignore[arg-type]
)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
# pandas\_testing.py:243: error: Incompatible types in assignment
# (expression has type "IO[Any]", variable has type "BinaryIO")
f = gzip.open(path, "rb") # type: ignore[assignment]
elif compression == "bz2":
# pandas\_testing.py:245: error: Incompatible types in assignment
# (expression has type "BZ2File", variable has type "BinaryIO")
f = bz2.BZ2File(path, "rb") # type: ignore[assignment]
elif compression == "xz":
f = get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
# pandas\_testing.py:252: error: Incompatible types in assignment
# (expression has type "IO[bytes]", variable has type "BinaryIO")
f = zip_file.open(zip_names.pop()) # type: ignore[assignment]
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
# pandas\_testing.py:288: error: Incompatible types in assignment
# (expression has type "Type[GzipFile]", variable has type
# "Type[ZipFile]")
compress_method = gzip.GzipFile # type: ignore[assignment]
elif compression == "bz2":
# pandas\_testing.py:290: error: Incompatible types in assignment
# (expression has type "Type[BZ2File]", variable has type
# "Type[ZipFile]")
compress_method = bz2.BZ2File # type: ignore[assignment]
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
# pandas\_testing.py:302: error: Incompatible types in assignment
# (expression has type "Tuple[Any]", variable has type "Tuple[Any,
# Any]")
args = (data,) # type: ignore[assignment]
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike: bool = False, **kwargs: Any):
"""
Gets a temporary path and agrees to remove on close.
This implementation does not use tempfile.mkstemp to avoid having a file handle.
If the code using the returned path wants to delete the file itself, windows
requires that no program has a file handle to it.
Parameters
----------
filename : str (optional)
suffix of the created file.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords are passed to open().
"""
folder = Path(tempfile.gettempdir())
if filename is None:
filename = ""
filename = (
"".join(random.choices(string.ascii_letters + string.digits, k=30)) + filename
)
path = folder / filename
path.touch()
handle_or_str: Union[str, IO] = str(path)
if return_filelike:
kwargs.setdefault("mode", "w+b")
handle_or_str = open(path, **kwargs)
try:
yield handle_or_str
finally:
if not isinstance(handle_or_str, str):
handle_or_str.close()
if path.is_file():
path.unlink()
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas.testing import assert_index_equal
>>> a = pd.Index([1, 2, 3])
>>> b = pd.Index([1, 2, 3])
>>> assert_index_equal(a, b)
"""
__tracebackhide__ = True
def _check_types(left, right, obj="Index"):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", left, right, obj=obj)
# allow string-like to have different inferred_types
if left.inferred_type in ("string"):
assert right.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", left, right, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = Index(safe_sort(left))
right = Index(safe_sort(right))
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for left_arr, right_arr in zip(left, right):
# count up differences
if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
Examples
--------
>>> from pandas.testing import assert_extension_array_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b, c = a.array, a.array
>>> assert_extension_array_equal(b, c)
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
.. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas.testing import assert_series_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b = pd.Series([1, 2, 3, 4])
>>> assert_series_equal(a, b)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype_and_needs_i8_conversion(
left.dtype, right.dtype
) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
.. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
if check_like:
left, right = left.reindex_like(right), right
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> bool:
"""
Checks that we have the combination of an ExtensionArraydtype and
a dtype that should be converted to int64
Returns
-------
bool
Related to issue #37609
"""
return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype)
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
# pandas\_testing.py:1986: error: Cannot call function of unknown type
yield make_index_func(k=k) # type: ignore[operator]
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}.get(idx_type)
if idx_func:
# pandas\_testing.py:2120: error: Cannot call function of unknown type
idx = idx_func(nentries) # type: ignore[operator]
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# pandas\_testing.py:2148: error: Need type annotation for 'cnt'
cnt = Counter() # type: ignore[var-annotated]
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "density" [misc]
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "random_state" [misc]
i, j = _create_missing_idx( # type: ignore[misc]
*df.shape, density=density, random_state=random_state
)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
# pandas\_testing.py:2331: error: Incompatible types in assignment
# (expression has type "List[<nothing>]", variable has type
# "Tuple[Any, ...]")
args = [] # type: ignore[assignment]
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="https://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'https://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if (
check_before_test
and not raise_on_error
and not can_connect(url, error_classes)
):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
# pandas\_testing.py:2521: error: "Exception" has no attribute
# "reason"
errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
filter_level="always",
check_stacklevel: bool = True,
raise_on_extra_warnings: bool = True,
match: Optional[str] = None,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
match : str, optional
Match warning message.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
matched_message = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if not expected_warning:
continue
expected_warning = cast(Type[Warning], expected_warning)
if issubclass(actual_warning.category, expected_warning):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
_assert_raised_with_correct_stacklevel(actual_warning)
if match is not None and re.search(match, str(actual_warning.message)):
matched_message = True
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
expected_warning = cast(Type[Warning], expected_warning)
if not saw_warning:
raise AssertionError(
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
if match and not matched_message:
raise AssertionError(
f"Did not see warning {repr(expected_warning.__name__)} "
f"matching {match}"
)
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
def _assert_raised_with_correct_stacklevel(
actual_warning: warnings.WarningMessage,
) -> None:
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[3][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
[
"ana.kapros@yahoo.ro"
] |
ana.kapros@yahoo.ro
|
be66006b3dbd21221d545dfca9aca95cd6b65bc0
|
1cf2bce33acd6db5e77e0f1fa2460cebc0addf2a
|
/Aula 25.09/while_true.py
|
05ff114ed225963e817f41c8f06ac2aac49cbb46
|
[] |
no_license
|
natibaggi/faculdade-python
|
77a341a00ad5713812c977030ce39cea9c8e999a
|
95172ff1d6bc48f76ab299890cbddb31ab3c1efa
|
refs/heads/master
| 2023-02-04T16:39:08.652180
| 2020-12-22T21:41:14
| 2020-12-22T21:41:14
| 323,738,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
numero = input("Digite um número inteiro: ");
tamanho = len(numero);
resultado = [];
while True:
print(tamanho, end="")
tamanho -= 1;
if tamanho == 0:
break
#nao terminei
|
[
"natibaggi@gmail.com"
] |
natibaggi@gmail.com
|
4a337e652f79aa204a424d063e472a09f6d4b0f5
|
4876e2a240735a99cbded77d879cd2ba71edf7c6
|
/blog/migrations/0001_initial.py
|
e10fb819982179f744949e3bccf7b23e06da3575
|
[] |
no_license
|
denilenko/my-first-blog
|
5de7734959f0ff58fdd3daf12fe9f095c5bdaba2
|
fb9645287d45d461dfe2b03111fe6150c10bc254
|
refs/heads/master
| 2021-08-28T16:19:47.335294
| 2017-12-12T18:16:25
| 2017-12-12T18:16:25
| 113,341,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-06 15:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"denilenko@gmail.com"
] |
denilenko@gmail.com
|
5e6ea92d7567fc266542f3007289f93977e7a2e2
|
c140ad38b1463024e289ceb0d5d6d44a45c91724
|
/test/test_slurm_pmi2.py
|
a06325c6180f3a53cdb57d646e58232fe3b28adb
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/hpc-container-maker
|
3a333526decbd18352ef8d1fb3bec0033be221e8
|
60fd2a51c171258a6b3f93c2523101cb7018ba1b
|
refs/heads/master
| 2023-08-21T13:32:27.132476
| 2023-06-12T21:12:40
| 2023-06-12T21:12:40
| 126,385,168
| 419
| 88
|
Apache-2.0
| 2023-09-11T18:33:26
| 2018-03-22T19:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,245
|
py
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods, bad-continuation
"""Test cases for the slurm_pmi2 module"""
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
from helpers import centos, docker, ubuntu, x86_64
from hpccm.building_blocks.slurm_pmi2 import slurm_pmi2
class Test_slurm_pmi2(unittest.TestCase):
def setUp(self):
"""Disable logging output messages"""
logging.disable(logging.ERROR)
@x86_64
@ubuntu
@docker
def test_defaults_ubuntu(self):
"""Default slurm_pmi2 building block"""
p = slurm_pmi2()
self.assertEqual(str(p),
r'''# SLURM PMI2 version 21.08.8
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
bzip2 \
file \
make \
perl \
tar \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://download.schedmd.com/slurm/slurm-21.08.8.tar.bz2 && \
mkdir -p /var/tmp && tar -x -f /var/tmp/slurm-21.08.8.tar.bz2 -C /var/tmp -j && \
cd /var/tmp/slurm-21.08.8 && ./configure --prefix=/usr/local/slurm-pmi2 && \
cd /var/tmp/slurm-21.08.8 && \
make -C contribs/pmi2 install && \
rm -rf /var/tmp/slurm-21.08.8 /var/tmp/slurm-21.08.8.tar.bz2''')
@x86_64
@ubuntu
@docker
def test_ldconfig(self):
"""ldconfig option"""
p = slurm_pmi2(ldconfig=True, version='20.02.7')
self.assertEqual(str(p),
r'''# SLURM PMI2 version 20.02.7
RUN apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
bzip2 \
file \
make \
perl \
tar \
wget && \
rm -rf /var/lib/apt/lists/*
RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://download.schedmd.com/slurm/slurm-20.02.7.tar.bz2 && \
mkdir -p /var/tmp && tar -x -f /var/tmp/slurm-20.02.7.tar.bz2 -C /var/tmp -j && \
cd /var/tmp/slurm-20.02.7 && ./configure --prefix=/usr/local/slurm-pmi2 && \
cd /var/tmp/slurm-20.02.7 && \
make -C contribs/pmi2 install && \
echo "/usr/local/slurm-pmi2/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig && \
rm -rf /var/tmp/slurm-20.02.7 /var/tmp/slurm-20.02.7.tar.bz2''')
@x86_64
@ubuntu
@docker
def test_runtime(self):
"""Runtime"""
p = slurm_pmi2()
r = p.runtime()
self.assertEqual(r,
r'''# SLURM PMI2
COPY --from=0 /usr/local/slurm-pmi2 /usr/local/slurm-pmi2''')
|
[
"noreply@github.com"
] |
NVIDIA.noreply@github.com
|
20f177d5aac4b209c6517aba1be8ed5c99b59e68
|
70a89bdfcccd48fa1d9862d559f3caeeea0a668e
|
/tests/integration/boxscore/test_nfl_boxscore.py
|
7d73417817c18d43d2e5620b270b7890953df91a
|
[
"MIT"
] |
permissive
|
JosephDErwin/sportsreference
|
2e5e456e0f316594f972e5be30b919ed185676f3
|
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
|
refs/heads/master
| 2022-11-14T10:25:26.510132
| 2018-12-15T23:54:59
| 2018-12-15T23:54:59
| 278,135,130
| 0
| 0
|
MIT
| 2020-07-08T16:05:25
| 2020-07-08T16:05:24
| null |
UTF-8
|
Python
| false
| false
| 26,216
|
py
|
import mock
import os
import pandas as pd
from datetime import datetime
from flexmock import flexmock
from sportsreference import utils
from sportsreference.constants import AWAY
from sportsreference.nfl.constants import BOXSCORE_URL, BOXSCORES_URL
from sportsreference.nfl.boxscore import Boxscore, Boxscores
MONTH = 10
YEAR = 2017
BOXSCORE = '201802040nwe'
def read_file(filename):
filepath = os.path.join(os.path.dirname(__file__), 'nfl', filename)
return open('%s' % filepath, 'r').read()
def mock_pyquery(url):
class MockPQ:
def __init__(self, html_contents):
self.status_code = 200
self.html_contents = html_contents
self.text = html_contents
if url == BOXSCORES_URL % (YEAR, 7):
return MockPQ(read_file('boxscores-7-2017.html'))
if url == BOXSCORES_URL % (YEAR, 8):
return MockPQ(read_file('boxscores-8-2017.html'))
boxscore = read_file('%s.html' % BOXSCORE)
return MockPQ(boxscore)
class MockDateTime:
def __init__(self, year, month):
self.year = year
self.month = month
class TestNFLBoxscore:
@mock.patch('requests.get', side_effect=mock_pyquery)
def setup_method(self, *args, **kwargs):
self.results = {
'date': 'Sunday Feb 4, 2018',
'time': '6:30pm',
'stadium': 'U.S. Bank Stadium',
'attendance': 67612,
'duration': '3:46',
'winner': AWAY,
'winning_name': 'Philadelphia Eagles',
'winning_abbr': 'PHI',
'losing_name': 'New England Patriots',
'losing_abbr': 'NWE',
'away_points': 41,
'away_first_downs': 25,
'away_rush_attempts': 27,
'away_rush_yards': 164,
'away_rush_touchdowns': 1,
'away_pass_completions': 29,
'away_pass_attempts': 44,
'away_pass_yards': 374,
'away_pass_touchdowns': 4,
'away_interceptions': 1,
'away_times_sacked': 0,
'away_yards_lost_from_sacks': 0,
'away_net_pass_yards': 374,
'away_total_yards': 538,
'away_fumbles': 0,
'away_fumbles_lost': 0,
'away_turnovers': 1,
'away_penalties': 6,
'away_yards_from_penalties': 35,
'away_third_down_conversions': 10,
'away_third_down_attempts': 16,
'away_fourth_down_conversions': 2,
'away_fourth_down_attempts': 2,
'away_time_of_possession': '34:04',
'home_points': 33,
'home_first_downs': 29,
'home_rush_attempts': 22,
'home_rush_yards': 113,
'home_rush_touchdowns': 1,
'home_pass_completions': 28,
'home_pass_attempts': 49,
'home_pass_yards': 505,
'home_pass_touchdowns': 3,
'home_interceptions': 0,
'home_times_sacked': 1,
'home_yards_lost_from_sacks': 5,
'home_net_pass_yards': 500,
'home_total_yards': 613,
'home_fumbles': 1,
'home_fumbles_lost': 1,
'home_turnovers': 1,
'home_penalties': 1,
'home_yards_from_penalties': 5,
'home_third_down_conversions': 5,
'home_third_down_attempts': 10,
'home_fourth_down_conversions': 1,
'home_fourth_down_attempts': 2,
'home_time_of_possession': '25:56',
}
flexmock(utils) \
.should_receive('_todays_date') \
.and_return(MockDateTime(YEAR, MONTH))
self.boxscore = Boxscore(BOXSCORE)
def test_nfl_boxscore_returns_requested_boxscore(self):
for attribute, value in self.results.items():
assert getattr(self.boxscore, attribute) == value
def test_invalid_url_yields_empty_class(self):
flexmock(Boxscore) \
.should_receive('_retrieve_html_page') \
.and_return(None)
boxscore = Boxscore(BOXSCORE)
for key, value in boxscore.__dict__.items():
if key == '_uri':
continue
assert value is None
def test_nfl_boxscore_dataframe_returns_dataframe_of_all_values(self):
df = pd.DataFrame([self.results], index=[BOXSCORE])
# Pandas doesn't natively allow comparisons of DataFrames.
# Concatenating the two DataFrames (the one generated during the test
# and the expected one above) and dropping duplicate rows leaves only
# the rows that are unique between the two frames. This allows a quick
# check of the DataFrame to see if it is empty - if so, all rows are
# duplicates, and they are equal.
frames = [df, self.boxscore.dataframe]
df1 = pd.concat(frames).drop_duplicates(keep=False)
assert df1.empty
class TestNFLBoxscores:
def setup_method(self):
self.expected = {
'7-2017': [
{'boxscore': '201710190rai',
'away_name': 'Kansas City Chiefs',
'away_abbr': 'kan',
'away_score': 30,
'home_name': 'Oakland Raiders',
'home_abbr': 'rai',
'home_score': 31,
'winning_name': 'Oakland Raiders',
'winning_abbr': 'rai',
'losing_name': 'Kansas City Chiefs',
'losing_abbr': 'kan'},
{'boxscore': '201710220chi',
'away_name': 'Carolina Panthers',
'away_abbr': 'car',
'away_score': 3,
'home_name': 'Chicago Bears',
'home_abbr': 'chi',
'home_score': 17,
'winning_name': 'Chicago Bears',
'winning_abbr': 'chi',
'losing_name': 'Carolina Panthers',
'losing_abbr': 'car'},
{'boxscore': '201710220buf',
'away_name': 'Tampa Bay Buccaneers',
'away_abbr': 'tam',
'away_score': 27,
'home_name': 'Buffalo Bills',
'home_abbr': 'buf',
'home_score': 30,
'winning_name': 'Buffalo Bills',
'winning_abbr': 'buf',
'losing_name': 'Tampa Bay Buccaneers',
'losing_abbr': 'tam'},
{'boxscore': '201710220ram',
'away_name': 'Arizona Cardinals',
'away_abbr': 'crd',
'away_score': 0,
'home_name': 'Los Angeles Rams',
'home_abbr': 'ram',
'home_score': 33,
'winning_name': 'Los Angeles Rams',
'winning_abbr': 'ram',
'losing_name': 'Arizona Cardinals',
'losing_abbr': 'crd'},
{'boxscore': '201710220min',
'away_name': 'Baltimore Ravens',
'away_abbr': 'rav',
'away_score': 16,
'home_name': 'Minnesota Vikings',
'home_abbr': 'min',
'home_score': 24,
'winning_name': 'Minnesota Vikings',
'winning_abbr': 'min',
'losing_name': 'Baltimore Ravens',
'losing_abbr': 'rav'},
{'boxscore': '201710220mia',
'away_name': 'New York Jets',
'away_abbr': 'nyj',
'away_score': 28,
'home_name': 'Miami Dolphins',
'home_abbr': 'mia',
'home_score': 31,
'winning_name': 'Miami Dolphins',
'winning_abbr': 'mia',
'losing_name': 'New York Jets',
'losing_abbr': 'nyj'},
{'boxscore': '201710220gnb',
'away_name': 'New Orleans Saints',
'away_abbr': 'nor',
'away_score': 26,
'home_name': 'Green Bay Packers',
'home_abbr': 'gnb',
'home_score': 17,
'winning_name': 'New Orleans Saints',
'winning_abbr': 'nor',
'losing_name': 'Green Bay Packers',
'losing_abbr': 'gnb'},
{'boxscore': '201710220clt',
'away_name': 'Jacksonville Jaguars',
'away_abbr': 'jax',
'away_score': 27,
'home_name': 'Indianapolis Colts',
'home_abbr': 'clt',
'home_score': 0,
'winning_name': 'Jacksonville Jaguars',
'winning_abbr': 'jax',
'losing_name': 'Indianapolis Colts',
'losing_abbr': 'clt'},
{'boxscore': '201710220cle',
'away_name': 'Tennessee Titans',
'away_abbr': 'oti',
'away_score': 12,
'home_name': 'Cleveland Browns',
'home_abbr': 'cle',
'home_score': 9,
'winning_name': 'Tennessee Titans',
'winning_abbr': 'oti',
'losing_name': 'Cleveland Browns',
'losing_abbr': 'cle'},
{'boxscore': '201710220sfo',
'away_name': 'Dallas Cowboys',
'away_abbr': 'dal',
'away_score': 40,
'home_name': 'San Francisco 49ers',
'home_abbr': 'sfo',
'home_score': 10,
'winning_name': 'Dallas Cowboys',
'winning_abbr': 'dal',
'losing_name': 'San Francisco 49ers',
'losing_abbr': 'sfo'},
{'boxscore': '201710220sdg',
'away_name': 'Denver Broncos',
'away_abbr': 'den',
'away_score': 0,
'home_name': 'Los Angeles Chargers',
'home_abbr': 'sdg',
'home_score': 21,
'winning_name': 'Los Angeles Chargers',
'winning_abbr': 'sdg',
'losing_name': 'Denver Broncos',
'losing_abbr': 'den'},
{'boxscore': '201710220pit',
'away_name': 'Cincinnati Bengals',
'away_abbr': 'cin',
'away_score': 14,
'home_name': 'Pittsburgh Steelers',
'home_abbr': 'pit',
'home_score': 29,
'winning_name': 'Pittsburgh Steelers',
'winning_abbr': 'pit',
'losing_name': 'Cincinnati Bengals',
'losing_abbr': 'cin'},
{'boxscore': '201710220nyg',
'away_name': 'Seattle Seahawks',
'away_abbr': 'sea',
'away_score': 24,
'home_name': 'New York Giants',
'home_abbr': 'nyg',
'home_score': 7,
'winning_name': 'Seattle Seahawks',
'winning_abbr': 'sea',
'losing_name': 'New York Giants',
'losing_abbr': 'nyg'},
{'boxscore': '201710220nwe',
'away_name': 'Atlanta Falcons',
'away_abbr': 'atl',
'away_score': 7,
'home_name': 'New England Patriots',
'home_abbr': 'nwe',
'home_score': 23,
'winning_name': 'New England Patriots',
'winning_abbr': 'nwe',
'losing_name': 'Atlanta Falcons',
'losing_abbr': 'atl'},
{'boxscore': '201710230phi',
'away_name': 'Washington Redskins',
'away_abbr': 'was',
'away_score': 24,
'home_name': 'Philadelphia Eagles',
'home_abbr': 'phi',
'home_score': 34,
'winning_name': 'Philadelphia Eagles',
'winning_abbr': 'phi',
'losing_name': 'Washington Redskins',
'losing_abbr': 'was'}
]
}
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search(self, *args, **kwargs):
result = Boxscores(7, 2017).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_invalid_end(self, *args, **kwargs):
result = Boxscores(7, 2017, 5).games
assert result == self.expected
@mock.patch('requests.get', side_effect=mock_pyquery)
def test_boxscores_search_multiple_weeks(self, *args, **kwargs):
expected = {
'7-2017': [
{'boxscore': '201710190rai',
'away_name': 'Kansas City Chiefs',
'away_abbr': 'kan',
'away_score': 30,
'home_name': 'Oakland Raiders',
'home_abbr': 'rai',
'home_score': 31,
'winning_name': 'Oakland Raiders',
'winning_abbr': 'rai',
'losing_name': 'Kansas City Chiefs',
'losing_abbr': 'kan'},
{'boxscore': '201710220chi',
'away_name': 'Carolina Panthers',
'away_abbr': 'car',
'away_score': 3,
'home_name': 'Chicago Bears',
'home_abbr': 'chi',
'home_score': 17,
'winning_name': 'Chicago Bears',
'winning_abbr': 'chi',
'losing_name': 'Carolina Panthers',
'losing_abbr': 'car'},
{'boxscore': '201710220buf',
'away_name': 'Tampa Bay Buccaneers',
'away_abbr': 'tam',
'away_score': 27,
'home_name': 'Buffalo Bills',
'home_abbr': 'buf',
'home_score': 30,
'winning_name': 'Buffalo Bills',
'winning_abbr': 'buf',
'losing_name': 'Tampa Bay Buccaneers',
'losing_abbr': 'tam'},
{'boxscore': '201710220ram',
'away_name': 'Arizona Cardinals',
'away_abbr': 'crd',
'away_score': 0,
'home_name': 'Los Angeles Rams',
'home_abbr': 'ram',
'home_score': 33,
'winning_name': 'Los Angeles Rams',
'winning_abbr': 'ram',
'losing_name': 'Arizona Cardinals',
'losing_abbr': 'crd'},
{'boxscore': '201710220min',
'away_name': 'Baltimore Ravens',
'away_abbr': 'rav',
'away_score': 16,
'home_name': 'Minnesota Vikings',
'home_abbr': 'min',
'home_score': 24,
'winning_name': 'Minnesota Vikings',
'winning_abbr': 'min',
'losing_name': 'Baltimore Ravens',
'losing_abbr': 'rav'},
{'boxscore': '201710220mia',
'away_name': 'New York Jets',
'away_abbr': 'nyj',
'away_score': 28,
'home_name': 'Miami Dolphins',
'home_abbr': 'mia',
'home_score': 31,
'winning_name': 'Miami Dolphins',
'winning_abbr': 'mia',
'losing_name': 'New York Jets',
'losing_abbr': 'nyj'},
{'boxscore': '201710220gnb',
'away_name': 'New Orleans Saints',
'away_abbr': 'nor',
'away_score': 26,
'home_name': 'Green Bay Packers',
'home_abbr': 'gnb',
'home_score': 17,
'winning_name': 'New Orleans Saints',
'winning_abbr': 'nor',
'losing_name': 'Green Bay Packers',
'losing_abbr': 'gnb'},
{'boxscore': '201710220clt',
'away_name': 'Jacksonville Jaguars',
'away_abbr': 'jax',
'away_score': 27,
'home_name': 'Indianapolis Colts',
'home_abbr': 'clt',
'home_score': 0,
'winning_name': 'Jacksonville Jaguars',
'winning_abbr': 'jax',
'losing_name': 'Indianapolis Colts',
'losing_abbr': 'clt'},
{'boxscore': '201710220cle',
'away_name': 'Tennessee Titans',
'away_abbr': 'oti',
'away_score': 12,
'home_name': 'Cleveland Browns',
'home_abbr': 'cle',
'home_score': 9,
'winning_name': 'Tennessee Titans',
'winning_abbr': 'oti',
'losing_name': 'Cleveland Browns',
'losing_abbr': 'cle'},
{'boxscore': '201710220sfo',
'away_name': 'Dallas Cowboys',
'away_abbr': 'dal',
'away_score': 40,
'home_name': 'San Francisco 49ers',
'home_abbr': 'sfo',
'home_score': 10,
'winning_name': 'Dallas Cowboys',
'winning_abbr': 'dal',
'losing_name': 'San Francisco 49ers',
'losing_abbr': 'sfo'},
{'boxscore': '201710220sdg',
'away_name': 'Denver Broncos',
'away_abbr': 'den',
'away_score': 0,
'home_name': 'Los Angeles Chargers',
'home_abbr': 'sdg',
'home_score': 21,
'winning_name': 'Los Angeles Chargers',
'winning_abbr': 'sdg',
'losing_name': 'Denver Broncos',
'losing_abbr': 'den'},
{'boxscore': '201710220pit',
'away_name': 'Cincinnati Bengals',
'away_abbr': 'cin',
'away_score': 14,
'home_name': 'Pittsburgh Steelers',
'home_abbr': 'pit',
'home_score': 29,
'winning_name': 'Pittsburgh Steelers',
'winning_abbr': 'pit',
'losing_name': 'Cincinnati Bengals',
'losing_abbr': 'cin'},
{'boxscore': '201710220nyg',
'away_name': 'Seattle Seahawks',
'away_abbr': 'sea',
'away_score': 24,
'home_name': 'New York Giants',
'home_abbr': 'nyg',
'home_score': 7,
'winning_name': 'Seattle Seahawks',
'winning_abbr': 'sea',
'losing_name': 'New York Giants',
'losing_abbr': 'nyg'},
{'boxscore': '201710220nwe',
'away_name': 'Atlanta Falcons',
'away_abbr': 'atl',
'away_score': 7,
'home_name': 'New England Patriots',
'home_abbr': 'nwe',
'home_score': 23,
'winning_name': 'New England Patriots',
'winning_abbr': 'nwe',
'losing_name': 'Atlanta Falcons',
'losing_abbr': 'atl'},
{'boxscore': '201710230phi',
'away_name': 'Washington Redskins',
'away_abbr': 'was',
'away_score': 24,
'home_name': 'Philadelphia Eagles',
'home_abbr': 'phi',
'home_score': 34,
'winning_name': 'Philadelphia Eagles',
'winning_abbr': 'phi',
'losing_name': 'Washington Redskins',
'losing_abbr': 'was'}
],
'8-2017': [
{'boxscore': '201710260rav',
'away_name': 'Miami Dolphins',
'away_abbr': 'mia',
'away_score': 0,
'home_name': 'Baltimore Ravens',
'home_abbr': 'rav',
'home_score': 40,
'winning_name': 'Baltimore Ravens',
'winning_abbr': 'rav',
'losing_name': 'Miami Dolphins',
'losing_abbr': 'mia'},
{'boxscore': '201710290cle',
'away_name': 'Minnesota Vikings',
'away_abbr': 'min',
'away_score': 33,
'home_name': 'Cleveland Browns',
'home_abbr': 'cle',
'home_score': 16,
'winning_name': 'Minnesota Vikings',
'winning_abbr': 'min',
'losing_name': 'Cleveland Browns',
'losing_abbr': 'cle'},
{'boxscore': '201710290buf',
'away_name': 'Oakland Raiders',
'away_abbr': 'rai',
'away_score': 14,
'home_name': 'Buffalo Bills',
'home_abbr': 'buf',
'home_score': 34,
'winning_name': 'Buffalo Bills',
'winning_abbr': 'buf',
'losing_name': 'Oakland Raiders',
'losing_abbr': 'rai'},
{'boxscore': '201710290tam',
'away_name': 'Carolina Panthers',
'away_abbr': 'car',
'away_score': 17,
'home_name': 'Tampa Bay Buccaneers',
'home_abbr': 'tam',
'home_score': 3,
'winning_name': 'Carolina Panthers',
'winning_abbr': 'car',
'losing_name': 'Tampa Bay Buccaneers',
'losing_abbr': 'tam'},
{'boxscore': '201710290phi',
'away_name': 'San Francisco 49ers',
'away_abbr': 'sfo',
'away_score': 10,
'home_name': 'Philadelphia Eagles',
'home_abbr': 'phi',
'home_score': 33,
'winning_name': 'Philadelphia Eagles',
'winning_abbr': 'phi',
'losing_name': 'San Francisco 49ers',
'losing_abbr': 'sfo'},
{'boxscore': '201710290nyj',
'away_name': 'Atlanta Falcons',
'away_abbr': 'atl',
'away_score': 25,
'home_name': 'New York Jets',
'home_abbr': 'nyj',
'home_score': 20,
'winning_name': 'Atlanta Falcons',
'winning_abbr': 'atl',
'losing_name': 'New York Jets',
'losing_abbr': 'nyj'},
{'boxscore': '201710290nwe',
'away_name': 'Los Angeles Chargers',
'away_abbr': 'sdg',
'away_score': 13,
'home_name': 'New England Patriots',
'home_abbr': 'nwe',
'home_score': 21,
'winning_name': 'New England Patriots',
'winning_abbr': 'nwe',
'losing_name': 'Los Angeles Chargers',
'losing_abbr': 'sdg'},
{'boxscore': '201710290nor',
'away_name': 'Chicago Bears',
'away_abbr': 'chi',
'away_score': 12,
'home_name': 'New Orleans Saints',
'home_abbr': 'nor',
'home_score': 20,
'winning_name': 'New Orleans Saints',
'winning_abbr': 'nor',
'losing_name': 'Chicago Bears',
'losing_abbr': 'chi'},
{'boxscore': '201710290cin',
'away_name': 'Indianapolis Colts',
'away_abbr': 'clt',
'away_score': 23,
'home_name': 'Cincinnati Bengals',
'home_abbr': 'cin',
'home_score': 24,
'winning_name': 'Cincinnati Bengals',
'winning_abbr': 'cin',
'losing_name': 'Indianapolis Colts',
'losing_abbr': 'clt'},
{'boxscore': '201710290sea',
'away_name': 'Houston Texans',
'away_abbr': 'htx',
'away_score': 38,
'home_name': 'Seattle Seahawks',
'home_abbr': 'sea',
'home_score': 41,
'winning_name': 'Seattle Seahawks',
'winning_abbr': 'sea',
'losing_name': 'Houston Texans',
'losing_abbr': 'htx'},
{'boxscore': '201710290was',
'away_name': 'Dallas Cowboys',
'away_abbr': 'dal',
'away_score': 33,
'home_name': 'Washington Redskins',
'home_abbr': 'was',
'home_score': 19,
'winning_name': 'Dallas Cowboys',
'winning_abbr': 'dal',
'losing_name': 'Washington Redskins',
'losing_abbr': 'was'},
{'boxscore': '201710290det',
'away_name': 'Pittsburgh Steelers',
'away_abbr': 'pit',
'away_score': 20,
'home_name': 'Detroit Lions',
'home_abbr': 'det',
'home_score': 15,
'winning_name': 'Pittsburgh Steelers',
'winning_abbr': 'pit',
'losing_name': 'Detroit Lions',
'losing_abbr': 'det'},
{'boxscore': '201710300kan',
'away_name': 'Denver Broncos',
'away_abbr': 'den',
'away_score': 19,
'home_name': 'Kansas City Chiefs',
'home_abbr': 'kan',
'home_score': 29,
'winning_name': 'Kansas City Chiefs',
'winning_abbr': 'kan',
'losing_name': 'Denver Broncos',
'losing_abbr': 'den'}
]
}
result = Boxscores(7, 2017, 8).games
assert result == expected
|
[
"robert.d.clark@hpe.com"
] |
robert.d.clark@hpe.com
|
ac94fd5eecb7bf3e1c65e43777aab51fbfe786b9
|
3ba0c680f17c921c8826c0c5b8157e0e9e1bceb9
|
/pokemon_entities/migrations/0005_pokemon_description.py
|
c442a830a3b5988f804f7ac6d062b89f6c2c8e5c
|
[] |
no_license
|
n1k0din/pokemon-map
|
302680b21ec1c7df3121da13876162c61d7928d7
|
c391e737d8faf25e596f585a3a72a35038106664
|
refs/heads/master
| 2023-05-07T10:52:50.354599
| 2021-05-15T13:36:45
| 2021-05-15T13:36:45
| 367,273,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
# Generated by Django 3.1.11 on 2021-05-14 05:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pokemon_entities', '0004_auto_20210514_1057'),
]
operations = [
migrations.AddField(
model_name='pokemon',
name='description',
field=models.TextField(blank=True, null=True),
),
]
|
[
"nik726@gmail.com"
] |
nik726@gmail.com
|
9993e1cf4b160b12b6a2d804a2c3d24dece21224
|
18fa1b0a78d82ecdeecb3e1955030ba9b2cbe776
|
/blog/urls.py
|
e99883553251e05998eb647b166e3964a2c65598
|
[] |
no_license
|
reemrantisi/Blog
|
9192736b7e2abcd5ba21c64df8a2b0e01f3578b6
|
e88662a20f90e8b277eaee13ae8427df92346a80
|
refs/heads/master
| 2023-01-13T21:57:23.352399
| 2020-11-08T09:15:19
| 2020-11-08T09:15:19
| 311,018,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
py
|
from . import views
from django.urls import path
urlpatterns = [
path('', views.PostList.as_view(), name='home'),
#path('<slug:title>/', views.post_detail, name='post_detail'),
path('<str:username>', views.home, name='user_posts'),
]
|
[
"reem@gmail.com"
] |
reem@gmail.com
|
2f3a07dd4dc6968861691feb4d8eec611112ac26
|
f71d77aaec526cf71ff03b5e8203917de50f0a91
|
/Novelsssss/start.py
|
de014a720bdd03556c732e739b8c612aed8e7408
|
[] |
no_license
|
zhangbailong945/pyqt5test
|
f9e272fb00e53528a045ac374cfb1a188d4d5d48
|
8032d2b44dbe2dcd4d01b802041b2b29265c8409
|
refs/heads/master
| 2020-04-06T13:47:31.328968
| 2019-07-23T07:31:04
| 2019-07-23T07:31:04
| 157,514,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
import sys
from PyQt5.QtWidgets import QApplication,QWidget,QVBoxLayout
from Libraries.Views.Ui_FramelessWindow import FramelessWindow
if __name__=='__main__':
app=QApplication(sys.argv)
w=FramelessWindow()
w.resize(950,400)
w.move(20,200)
w.show()
sys.exit(app.exec_())
|
[
"1207549344@qq.com"
] |
1207549344@qq.com
|
6063be56d0792ceb5dd279fab6f4e16f812946d9
|
2e74c7339c63385172629eaa84680a85a4731ee9
|
/como/como/dws/urolithiasis/scale_symp_urolithiasis.py
|
d46755f1010c3712ebfc43f6d49433e3669012a8
|
[] |
no_license
|
zhusui/ihme-modeling
|
04545182d0359adacd22984cb11c584c86e889c2
|
dfd2fe2a23bd4a0799b49881cb9785f5c0512db3
|
refs/heads/master
| 2021-01-20T12:30:52.254363
| 2016-10-11T00:33:36
| 2016-10-11T00:33:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,030
|
py
|
import numpy as np
import pandas as pd
import MySQLdb
import sys
sys.path.append('/home/j/WORK/04_epi/02_models/01_code/02_severity/01_code/prod')
import gbd_utils
gbd = gbd_utils.GbdConventions()
# Read dw file
standard_dws = pd.read_csv("/home/j/WORK/04_epi/03_outputs/01_code/02_dw/02_standard/dw.csv")
gen_med_dw = standard_dws[standard_dws.healthstate=="generic_medication"]
# Get % symptomatic urolithiasis to apply to generic_medication
symp_urolith_prop = pd.read_csv("/home/j/WORK/04_epi/01_database/02_data/urinary_urolithiasis/04_models/gbd2013/chronic_urolithiasis_DW_iso3_distribution.csv")
symp_urolith_prop = symp_urolith_prop[symp_urolith_prop.year.isin([1990,1995,2000,2005,2010,2013])].reset_index(drop=True)
# Draw generation function
def beta_draws(row):
row = pd.DataFrame([row])
sd = abs((row.upper - row.lower)/(2*1.96))
mean = row.proportion_dw
sample_size = mean*(1-mean)/sd**2
alpha = mean*sample_size
beta = (1-mean)*sample_size
draws = pd.Series(np.random.beta(alpha, beta, size=1000))
return draws
# Generate proportion draws
prop_draws = symp_urolith_prop.apply(beta_draws, axis=1)
# Multiply DW draws by proportions
weighted_dws = pd.DataFrame(gen_med_dw.filter(like='draw').as_matrix() * prop_draws.as_matrix())
weighted_dws.columns = ['draw'+str(i) for i in range(1000)]
# Format output and write to file
symp_urolith_prop = symp_urolith_prop.join(weighted_dws)
symp_urolith_prop = symp_urolith_prop.merge(gbd.get_locations()[['location_id','local_id']])
symp_urolith_prop['healthstate'] = "urolith_symp"
symp_urolith_prop['healthstate_id'] = 822
symp_urolith_prop = symp_urolith_prop[['local_id','year','healthstate_id','healthstate']+['draw'+str(i) for i in range(1000)]]
symp_urolith_prop.rename(columns={'local_id':'iso3'}, inplace=True)
symp_urolith_prop.to_csv("/home/j/WORK/04_epi/03_outputs/01_code/02_dw/03_custom/urolith_symp_dws.csv", index=False)
symp_urolith_prop.to_csv("/clustertmp/WORK/04_epi/03_outputs/01_code/02_dw/03_custom/urolith_symp_dws.csv", index=False)
|
[
"nsidles@uw.edu"
] |
nsidles@uw.edu
|
9cefb9fab54b2c1b00b9ef78bbbf42b5aabce9dd
|
d4cae0ad3b7dd457e9eeef1714f99c79d3e4f72c
|
/day25/testting/tcp_client.py
|
365a23e15c5d67cc9b44f31a7bc6db5b7b118be7
|
[] |
no_license
|
Fixdq/python-learn
|
698e823bdba2b705bf04dd81cef0abbb5ab5c0ff
|
4c2b2bb75c62321ecbae0e50834c1f10b65f0e7c
|
refs/heads/master
| 2020-03-08T15:56:37.796636
| 2018-06-09T14:42:16
| 2018-06-09T14:42:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : Fixdq
# @File : tcp_client.py
# @Software: PyCharm
import json
from socket import *
import struct
ip_port = ('127.155.101.25', 8090)
client = socket(AF_INET, SOCK_STREAM)
client.connect_ex(ip_port)
while True:
cmd = input('>>>>>:').strip()
client.send(cmd.encode('utf-8'))
# 接收 报头的报头 (固定长度 4 字节)
head_head = client.recv(4)
# 反解出 报头的长度
head_len = int(struct.unpack('i',head_head)[0])
# 接收 自定义报头
head = client.recv(head_len)
# 拿到 自定义报头的长度
body_len = json.loads(head.decode('utf-8'))['len']
# 接收 真实的数据
data = client.recv(body_len)
print(data.decode('utf-8'))
client.close()
|
[
"fixd.quan@aliyun.com"
] |
fixd.quan@aliyun.com
|
ba73f0b8bb22bfb6637144bb8ad5c9b9ac380524
|
9d45131eb90eaec3388b53f8e030c5093f794c9f
|
/com/bridgelabz/quantitymeasurement/Converter.py
|
59775beb5b0fd25752aeea86039d4781d707a034
|
[] |
no_license
|
birajit95/Quantity_Measurement_TDD
|
f5bbbf16936e5ff4cc287627d8e1ea3583f3ab44
|
1df67748fc7549740eaeed1a6f6e282a47d48d81
|
refs/heads/master
| 2023-01-31T14:12:14.550820
| 2020-12-12T05:22:54
| 2020-12-12T05:22:54
| 319,701,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
from com.bridgelabz.quantitymeasurement.Unit import Length, Volume, Weight, Temperature
class Converter:
BaseUnitDict = {
type(Length.Inch): Length.Inch,
type(Volume.Ml): Volume.Ml,
type(Weight.Gram): Weight.Gram,
type(Temperature.C): Temperature.C
}
@staticmethod
def convert(value1Unit, value2Unit, value1, value2):
if isinstance(value1Unit, Temperature):
value1 = value1Unit.value * (value1 if value1Unit is Temperature.C else value1 - 32)
value2 = value2Unit.value * (value2 if value2Unit is Temperature.C else value2 - 32)
return value1, value2
return value1Unit.value * value1, value2Unit.value * value2
|
[
"birajit95@gmail.com"
] |
birajit95@gmail.com
|
dd8e0689de0a7ce7483d0c413046514e35f5f54a
|
ace409e56a2a31bc30878f84b28427f0af283bb1
|
/polls/tests.py
|
fc03c76b26226c9642d6a1cea337a9eb30483ea8
|
[] |
no_license
|
mikeyshean/django-test
|
9867bc47301507486f032e667cd765491c68c348
|
287df0336bba7195ba423f818e24b66d86d824de
|
refs/heads/master
| 2021-01-10T06:35:56.137001
| 2015-10-23T21:26:58
| 2015-10-23T21:26:58
| 44,765,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,143
|
py
|
from django.test import TestCase
import datetime
from django.utils import timezone
from polls.models import Question
from django.core.urlresolvers import reverse
def create_question(question_text, days, choice):
time = timezone.now() + datetime.timedelta(days=days)
question = Question.objects.create(question_text=question_text,
pub_date=time)
if choice:
question.choices.create(choice_text="Hello", votes=0)
return question
class QuestionViewTests(TestCase):
def test_index_view_with_no_questions(self):
"""
If no questions exist, display message.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_a_past_question(self):
"""
Display questions with pub_date in the past.
"""
create_question(question_text="Past", days=-30, choice=True)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past>']
)
def test_index_view_with_a_future_question(self):
"""
Questions with a future pub_date should not be displayed.
"""
create_question(question_text="Future", days=30, choice=True)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.",
status_code=200)
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_future_question_and_past_question(self):
"""
Only display past question.
"""
create_question(question_text="Future", days=30, choice=True)
create_question(question_text="Past", days=-30, choice=True)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past>']
)
def test_index_view_with_two_past_questions(self):
"""
Displays multiple questions.
"""
create_question(question_text="Past1", days=-30, choice=True)
create_question(question_text="Past2", days=-31, choice=True)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past1>', '<Question: Past2>']
)
def test_index_view_with_question_without_choices(self):
"""
Does not display questions without choices
"""
create_question(question_text="Good question", days=-5, choice=False)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
[],
)
class QuestiondIndexDetailTests(TestCase):
def test_detail_view_with_a_future_question(self):
"""
Return 404 for a detail view of a question with a future
pub_date
"""
future_question = create_question(question_text="Future", days=5, choice=True)
response = self.client.get(reverse('polls:detail',
args=(future_question.id,)))
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_question(self):
"""
Should return detail view of question with past pub_date
"""
past_question = create_question(question_text="Past", days=-5, choice=True)
response = self.client.get(reverse('polls:detail',
args=(past_question.id,)))
self.assertContains(response, past_question.question_text, status_code=200)
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions with
pub_date in the future
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for questions with
pub_date older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertEqual(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for questions with
pub_date within last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertEqual(recent_question.was_published_recently(), True)
|
[
"mdshean2@gmail.com"
] |
mdshean2@gmail.com
|
8b1ce587404581722d8d20cb341ad064b3756a91
|
305913537e02c6fdd577f3537415fd6990980219
|
/_xml_to_json.py
|
8e99d6cf45042a0bf555da70513e1c82f6e4c05f
|
[] |
no_license
|
taeyang916/yolov1
|
00e845ce34224cb7fe3bb92fec29dcfb6a4fd736
|
9ef7bb8aaa899440b8a8696f756f0695da52e1f8
|
refs/heads/main
| 2023-06-04T04:28:08.879085
| 2021-06-20T13:40:05
| 2021-06-20T13:40:05
| 372,874,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
import json
from itertools import chain
import xmltodict
import os
from xml.etree.ElementTree import parse
# path
dataset_path = '/home/vim/Desktop/tykim/workspace/VOC2012'
IMAGE_FOLDER = 'JPEGImages'
ANNOTATIONS_FOLDER = "Annotations"
json_list = []
ann_root, ann_dir, ann_files = next(os.walk(os.path.join(dataset_path, ANNOTATIONS_FOLDER)))
for xml_file in ann_files:
xml_ = open(os.path.join(ann_root, xml_file), "r")
xmlString = xmltodict.parse(xml_.read())
parsed_xml = xmlString["annotation"]
json_list.append(parsed_xml)
# for (i, xml_file) in enumerate(ann_files):
# xml_ = open(f"/home/vim/Desktop/tykim/workspace/VOC2012/json/{xml_file}.json", "r")
# xmlString = xmltodict.parse(xml_.read())
# parsed_xml = xmlString["annotation"]
# globals()['json_'+str(i)] = parsed_xml
# for (i, dt) in enumerate(ann_files):
# json_list.append(globals()['json_'+str(i)])
print(json_list[0])
parsed_json_list = [{"annotations", json_list}]
print(parsed_json_list[0])
# slack = list(chain.from_iterable(json_list["annotation"]))
# print(slack)
# with open("/home/vim/Desktop/tykim/workspace/VOC2012/json/annotations.json", 'w') as ann:
# json.dump(json_list, ann, indent=4)
|
[
"taeyang916@naver.com"
] |
taeyang916@naver.com
|
a96550ddf975b264a333377b874bb94f4677be26
|
d134b17d6b1fbcd6d240d01319377b8f2c5863cb
|
/python/hosts_file/update_hosts_file.py
|
4059123028974d04a71dca6cfb8a2d53151df76f
|
[
"MIT"
] |
permissive
|
lijie2000/devops_public
|
1de9fa87c6e767790a732a4a74e5e91d7df1f7cb
|
3efb981d75cf86602457e209ebec1e7f11c29bb9
|
refs/heads/master
| 2021-01-21T18:34:21.049233
| 2017-05-21T04:14:44
| 2017-05-21T04:14:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,605
|
py
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
##-------------------------------------------------------------------
## @copyright 2017 DennyZhang.com
## Licensed under MIT
## https://raw.githubusercontent.com/DennyZhang/devops_public/master/LICENSE
##
## File : update_hosts_file.py
## Author : Denny <denny@dennyzhang.com>
## Created : <2017-05-03>
## Updated: Time-stamp: <2017-05-11 14:13:42>
## Description :
## Load an extra hosts binding into /etc/hosts
## Sample:
## python ./examine_hosts_file.py --extra_hosts_file /tmp/hosts
##-------------------------------------------------------------------
import os, sys
import argparse
import socket, datetime
import logging
log_file = "/var/log/%s.log" % (os.path.basename(__file__).rstrip('\.py'))
logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler())
def load_hostsfile_to_dict(host_file):
host_dict = {}
with open(host_file,'r') as f:
for row in f:
row = row.strip()
if row.startswith('#') or row == '':
continue
entry_l = row.split()
if '::' in entry_l[0]:
continue
ip = entry_l[0]
if len(entry_l) == 2:
hostname = entry_l[1]
host_dict[hostname] = ip
else:
for hostname in entry_l[1:]:
host_dict[hostname] = ip
return host_dict
###############################################################
if __name__ == '__main__':
# get parameters from users
parser = argparse.ArgumentParser()
parser.add_argument('--extra_hosts_file', required=False, default="", \
help="Load extra hosts into /etc/hosts", type=str)
parser.add_argument('--skip_current_hostname', required=False, dest='skip_current_hostname', \
action='store_true', default=False, \
help="Skip the binding for current hostname, if it's specified in --extra_hosts_file")
l = parser.parse_args()
extra_hosts_file = l.extra_hosts_file
skip_current_hostname = l.skip_current_hostname
current_hosts_dict = load_hostsfile_to_dict("/etc/hosts")
extra_hosts_dict = load_hostsfile_to_dict(extra_hosts_file)
has_changed = False
has_backup = False
current_hostname = socket.gethostname()
for hostname in extra_hosts_dict:
if skip_current_hostname is True and hostname == current_hostname:
continue
if hostname not in current_hosts_dict:
if has_backup is False:
host_backup_file = "/etc/hosts.%s" % \
(datetime.datetime.utcnow().strftime("%Y-%m-%d_%H%M%S"))
logging.info("Backup /etc/hosts to %s" % (host_backup_file))
has_backup = True
open("/etc/hosts", "ab").write("%s %s" % (extra_hosts_dict[hostname]), hostname)
logging.error("Append /etc/hosts: (%s:%s)" % (hostname, extra_hosts_dict[hostname]))
has_changed = True
else:
if current_hosts_dict[hostname] != extra_hosts_dict[hostname]:
logging.error("ERROR /etc/hosts is conflict with %s for entry of hostname(%s)" % \
(extra_hosts_file, hostname))
sys.exit(1)
if has_changed is True:
logging.info("OK: /etc/hosts is good after some updates.")
else:
logging.info("OK: /etc/hosts is gook with no changes.")
## File : update_hosts_file.py ends
|
[
"denny@dennyzhang.com"
] |
denny@dennyzhang.com
|
e84be353b6b25278129e6289b26839157449bfd4
|
1e10e0cd035418098bdb5c667ffca0fb9fc3e1af
|
/pages/migrations/0003_auto_20210701_0931.py
|
09fe6eec193db2b1c0cc97d9303f61993b78a55a
|
[] |
no_license
|
rahuljain08/Ecommerce-Website
|
98c4ef2c3517a243ad3e1aba41502309ab48df7b
|
a6978a0a15abeac5aad46dfb0255d7be561fd5c8
|
refs/heads/master
| 2023-06-24T23:51:45.265591
| 2021-07-22T05:59:58
| 2021-07-22T05:59:58
| 388,085,670
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
# Generated by Django 3.1.4 on 2021-07-01 04:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_cart_user'),
]
operations = [
migrations.AlterField(
model_name='order',
name='placed_on',
field=models.DateField(auto_now=True),
),
]
|
[
"rahuljain8102@gmail.com"
] |
rahuljain8102@gmail.com
|
097bc0c868a18aad30eac69b86879e4f315ca032
|
7117cab1e015e1f0298aad64535116e57413f13a
|
/tutorial/datascience/seabornematrix.py
|
5ba9b9567a70c98bcd3eb0d35e1a90440f0a9417
|
[] |
no_license
|
udeshpa/python
|
a1d2a0779749abaad6d93ffd58aac6c3f6de035f
|
d7150bd9d1030f7a02a9b364858fe7565ebfdb24
|
refs/heads/main
| 2023-03-20T19:32:52.842280
| 2021-03-13T23:36:52
| 2021-03-13T23:36:52
| 347,502,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
import seaborn as sns
import matplotlib.pyplot as plt
tips = sns.load_dataset('tips')
flights = sns.load_dataset('flights')
print(tips.head())
print(flights.head())
tc= tips.corr()
print(tc)
sns.heatmap(tc, annot=True, cmap='coolwarm')
plt.show()
pt = flights.pivot_table(index='month', columns='year', values='passengers')
print(pt)
sns.heatmap(pt, linecolor='white', linewidths=1)
plt.show()
sns.clustermap(pt, cmap='coolwarm', standard_scale=1)
plt.show()
|
[
"udeshpa@gmail.com"
] |
udeshpa@gmail.com
|
212a4c3bbf7f97c5a14636a3ebba1e1888715850
|
90dfb4a7b1fdd8f431305e26d261c1d1df277b19
|
/src/RequestHandlerWSGIServerTraceCall.py
|
d52ad5e85665666c2327299f3e19ac1c2be5f57e
|
[
"MIT"
] |
permissive
|
AoiKuiyuyou/AoikBottleStudy
|
9f589abb6e18ca713dbe6907d1f958d9129756d2
|
7892e9019027f0785998958e9453feffe51e7371
|
refs/heads/master
| 2020-07-03T16:41:17.953117
| 2016-11-24T03:01:20
| 2016-11-24T03:01:20
| 74,246,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,243
|
py
|
# coding: utf-8
from __future__ import absolute_import
# Standard imports
import sys
import logging
# External imports
import aoiktracecall.config
import aoiktracecall.logging
import aoiktracecall.trace
# Traced modules should be imported after `trace_calls_in_specs` is called.
# Set configs
aoiktracecall.config.set_configs({
# Whether use wrapper class.
#
# Wrapper class is more adaptive to various types of callables but will
# break if the code that was using the original function requires a real
# function, instead of a callable. Known cases include PyQt slot functions.
#
'WRAP_USING_WRAPPER_CLASS': True,
# Whether wrap base class attributes in a subclass.
#
# If enabled, wrapper attributes will be added to a subclass even if the
# wrapped original attributes are defined in a base class.
#
# This helps in the case that base class attributes are implemented in C
# extensions thus can not be traced directly.
#
'WRAP_BASE_CLASS_ATTRIBUTES': True,
# Whether highlight title shows `self` argument's class instead of called
# function's defining class.
#
# This helps reveal the real type of the `self` argument on which the
# function is called.
#
'HIGHLIGHT_TITLE_SHOW_SELF_CLASS': True,
# Highlight title line character count max
'HIGHLIGHT_TITLE_LINE_CHAR_COUNT_MAX': 265,
# Whether show function's file path and line number in pre-call hook
'SHOW_FUNC_FILE_PATH_LINENO_PRE_CALL': True,
# Whether show function's file path and line number in post-call hook
'SHOW_FUNC_FILE_PATH_LINENO_POST_CALL': False,
# Whether wrapper function should debug info dict's URIs
'WRAPPER_FUNC_DEBUG_INFO_DICT_URIS': False,
# Whether printing handler should debug arguments inspect info
'PRINTING_HANDLER_DEBUG_ARGS_INSPECT_INFO': False,
# Whether printing handler should debug info dict.
#
# Notice info dict contains called function's arguments and printing these
# arguments may cause errors.
#
'PRINTING_HANDLER_DEBUG_INFO_DICT': False,
# Whether printing handler should debug info dict, excluding arguments.
#
# Use this if `PRINTING_HANDLER_DEBUG_INFO_DICT` causes errors.
#
'PRINTING_HANDLER_DEBUG_INFO_DICT_SAFE': False,
})
# Add debug logger handler
aoiktracecall.logging.get_debug_logger().addHandler(logging.NullHandler())
# Add info logger handler
aoiktracecall.logging.get_info_logger().addHandler(
logging.StreamHandler(sys.stdout)
)
# Add error logger handler
aoiktracecall.logging.get_error_logger().addHandler(
logging.StreamHandler(sys.stderr)
)
# Constant for `highlight`
HL = 'highlight'
# Create trace specs.
#
# The order of the specs determines the matching precedence, with one exception
# that URI patterns consisting of only alphanumerics, underscores, and dots are
# considered as exact URI matching, and will have higher precedence over all
# regular expression matchings. The rationale is that a spec with exact URI
# matching is more specific therefore should not be shadowed by any spec with
# regular expression matching that has appeared early.
#
trace_specs = [
# ----- aoiktracecall -----
('aoiktracecall([.].+)?', False),
# ----- * -----
# Tracing `__setattr__` will reveal instances' attribute assignments.
# Notice Python 2 old-style classes have no `__setattr__` attribute.
('.+[.]__setattr__', True),
# Not trace most of double-underscore functions.
# Tracing double-underscore functions is likely to break code, e.g. tracing
# `__str__` or `__repr__` may cause infinite recursion.
('.+[.]__(?!init|call)[^.]+__', False),
# ----- socket._socketobject (Python 2), socket.socket (Python 3) -----
# Notice in Python 2, class `socket._socketobject`'s instance methods
# - recv
# - recvfrom
# - recv_into
# - recvfrom_into
# - send
# - sendto
# are dynamically generated in `_socketobject.__init__`. The approach of
# wrapping class attributes is unable to trace these methods.
('socket[.](_socketobject|socket)[.]__init__', HL),
('socket[.](_socketobject|socket)[.]bind', HL),
('socket[.](_socketobject|socket)[.]listen', HL),
('socket[.](_socketobject|socket)[.]connect', HL),
('socket[.](_socketobject|socket)[.]accept', HL),
('socket[.](_socketobject|socket)[.]setblocking', HL),
('socket[.](_socketobject|socket)[.]makefile', HL),
('socket[.](_socketobject|socket)[.]recv.*', HL),
('socket[.](_socketobject|socket)[.]send.*', HL),
('socket[.](_socketobject|socket)[.]shutdown', HL),
('socket[.](_socketobject|socket)[.]close', HL),
# ----- socket._fileobject (Python 2), socket.SocketIO (Python 3) -----
('socket[.](SocketIO|_fileobject)[.]__init__', HL),
('socket[.](SocketIO|_fileobject)[.]read.*', HL),
('socket[.](SocketIO|_fileobject)[.]write.*', HL),
('socket[.](SocketIO|_fileobject)[.]flush', HL),
('socket[.](SocketIO|_fileobject)[.]close', HL),
('socket[.](SocketIO|_fileobject)[.].+', True),
# ----- socket -----
('socket._intenum_converter', False),
('socket[.].+[.]_decref_socketios', False),
('socket[.].+[.]fileno', False),
# Ignore to avoid error in `__repr__` in Python 3
('socket[.].+[.]getpeername', False),
# Ignore to avoid error in `__repr__` in Python 3
('socket[.].+[.]getsockname', False),
('socket[.].+[.]gettimeout', False),
('socket([.].+)?', True),
# ----- select (Python 2) -----
('select.select', HL),
('select([.].+)?', True),
# ----- selectors (Python 3) -----
('selectors.SelectSelector.__init__', HL),
('selectors.SelectSelector.register', HL),
('selectors.SelectSelector.select', HL),
('selectors([.].+)?', True),
# ----- SocketServer (Python 2), socketserver (Python 3) -----
('SocketServer._eintr_retry', False),
('(socketserver|SocketServer)[.]BaseServer[.]__init__', HL),
('(socketserver|SocketServer)[.]TCPServer[.]__init__', HL),
('(socketserver|SocketServer)[.]ThreadingMixIn[.]process_request', HL),
(
'(socketserver|SocketServer)[.]ThreadingMixIn[.]'
'process_request_thread', HL
),
# Ignore to avoid error:
# ```
# 'WSGIServer' object has no attribute '_BaseServer__is_shut_down'
# ```
('(socketserver|SocketServer)[.]ThreadingMixIn[.].+', False),
('(socketserver|SocketServer)[.]BaseRequestHandler[.]__init__', HL),
('(socketserver|SocketServer)[.].+[.]service_actions', False),
('.+[.]server_bind', HL),
('.+[.]server_activate', HL),
('.+[.]serve_forever', HL),
('.+[.]_handle_request_noblock', HL),
('.+[.]get_request', HL),
('.+[.]verify_request', HL),
('.+[.]process_request', HL),
('.+[.]process_request_thread', HL),
('.+[.]finish_request', HL),
('.+[.]setup', HL),
('.+[.]handle', HL),
('.+[.]finish', HL),
('.+[.]shutdown_request', HL),
('.+[.]close_request', HL),
('.+[.]fileno', False),
('(socketserver|SocketServer)([.].+)?', True),
# ----- mimetools -----
# `mimetools` is used for parsing HTTP headers in Python 2.
('mimetools([.].+)?', True),
# ----- email -----
# `email` is used for parsing HTTP headers in Python 3.
('email([.].+)?', True),
# ----- BaseHTTPServer (Python 2), http.server (Python 3) -----
('.+[.]handle_one_request', HL),
('.+[.]parse_request', HL, 'hide_below'),
('.+[.]send_response', HL),
('.+[.]send_header', HL),
('.+[.]end_headers', HL),
# ----- BaseHTTPServer (Python 2) -----
('BaseHTTPServer([.].+)?', True),
# ----- http (Python 3) -----
('http([.].+)?', True),
# ----- wsgiref -----
('wsgiref.handlers.BaseHandler.write', HL),
('wsgiref.handlers.BaseHandler.close', HL),
('wsgiref.handlers.SimpleHandler.__init__', HL),
('wsgiref.handlers.SimpleHandler._write', HL),
('wsgiref.handlers.SimpleHandler._flush', HL),
('wsgiref.simple_server.WSGIServer.__init__', HL),
('wsgiref.simple_server.ServerHandler.__init__', HL),
('wsgiref.simple_server.ServerHandler.close', HL),
('.+[.]make_server', HL),
('.+[.]setup_environ', HL, 'hide_below'),
('.+[.]set_app', HL),
('.+[.]get_environ', HL, 'hide_below'),
('.+[.]get_app', HL),
('.+[.]run', HL),
('.+[.]start_response', HL),
('.+[.]finish_response', HL),
('.+[.]send_headers', HL),
('.+[.]cleanup_headers', HL),
('.+[.]send_preamble', HL),
('.+[.]finish_content', HL),
('.+[.]finish', HL),
('wsgiref([.].+)?', True),
# ----- bottle -----
# Ignore to avoid error
('bottle.app', False),
# Ignore to avoid error
('bottle.BaseRequest.__setattr__', False),
('bottle.Bottle._cast', HL),
('bottle.Bottle._handle', HL),
('bottle.Bottle.add_route', HL),
('bottle.Bottle.post', HL),
('bottle.Bottle.route', HL),
('bottle.Bottle.trigger_hook', HL),
('bottle.Bottle.wsgi', HL),
# Ignore to avoid error
('bottle.default_app', False),
('bottle.JSONPlugin.apply', HL),
# Ignore to avoid error
('bottle.LocalRequest.__setattr__', False),
('bottle.LocalRequest.bind', HL),
('bottle.LocalResponse.bind', HL),
('bottle.post', HL),
('bottle.Route.__init__', HL),
# Ignore to avoid error in `__repr__`
('bottle.Route.__setattr__', False),
('bottle.Route._make_callback', HL),
('bottle.Route.all_plugins', HL),
# Ignore to avoid error in `__repr__`
('bottle.Route.get_undecorated_callback', False),
('bottle.Router.add', HL),
('bottle.Router.build', HL),
('bottle.Router.match', HL),
('bottle.run', HL),
('bottle.ServerAdapter.__init__', HL),
# Ignore to avoid error in `__repr__`
('bottle.ServerAdapter.__setattr__', False),
('bottle.TemplatePlugin.apply', HL),
('bottle.update_wrapper', HL),
('bottle.WSGIRefServer.run', HL),
('bottle([.].+)?', True),
# ----- __main__ -----
('__main__.main', HL),
('__main__.CustomRequestHandler', HL),
('__main__([.].+)?', True),
]
# Create `printing_handler`'s filter function
def printing_handler_filter_func(info):
# Get on-wrap URI
onwrap_uri = info['onwrap_uri']
# If is one of these URIs
if onwrap_uri in {
'bottle.Bottle.__call__',
'bottle.Bottle._handle',
'bottle.Bottle.wsgi',
'bottle.LocalRequest.__setattr__',
'bottle.LocalRequest.bind',
'bottle.Router.match',
}:
# Get arguments inspect info
arguments_inspect_info = info['arguments_inspect_info']
# Get `environ` argument info
arg_info = arguments_inspect_info.fixed_arg_infos['environ']
# Hide value
arg_info.value = '{...}'
# Return info dict
return info
# Trace calls according to trace specs.
#
# This function will hook the module importing system in order to intercept and
# process newly imported modules. Callables in these modules which are matched
# by one of the trace specs will be wrapped to enable tracing.
#
# Already imported modules will be processed as well. But their callables may
# have been referenced elsewhere already, making the tracing incomplete. This
# explains why import hook is needed and why modules must be imported after
# `trace_calls_in_specs` is called.
#
aoiktracecall.trace.trace_calls_in_specs(
specs=trace_specs,
printing_handler_filter_func=printing_handler_filter_func,
)
# Remove to avoid being traced
del printing_handler_filter_func
# Import modules after `trace_calls_in_specs` is called
import bottle
# Notice do not use decorator to add URL-to-handler mapping here otherwise this
# function can not be traced.
#
def CustomRequestHandler():
"""
This request handler echoes request body in response body.
"""
# Gen `environ` dict
environ = bottle.request.environ
# Get `Context-Length` header value
content_length_text = environ['CONTENT_LENGTH']
# If header value is empty
if not content_length_text:
# Set content length be 0
content_length = 0
# If header value is not empty
else:
# Convert to int
content_length = int(content_length_text)
# Get input file
input_file = environ['wsgi.input']
# Read request body
request_body = input_file.read(content_length)
# Return response body
return request_body
def main():
# Add URL-to-handler mapping
bottle.post('/')(CustomRequestHandler)
try:
# Run server
bottle.run(host='127.0.0.1', port=8000)
# If have `KeyboardInterrupt`
except KeyboardInterrupt:
# Stop gracefully
pass
# Trace calls in this module.
#
# Calling this function is needed because at the point `trace_calls_in_specs`
# is called, this module is being initialized, therefore callables defined
# after the call point are not accessible to `trace_calls_in_specs`.
#
aoiktracecall.trace.trace_calls_in_this_module()
# If is run as main module
if __name__ == '__main__':
# Call main function
exit(main())
|
[
"aoi.kuiyuyou@gmail.com"
] |
aoi.kuiyuyou@gmail.com
|
8b1fc6237f949e3f7e9091a48346d8177862e152
|
d9dadaae97e333193c52b0731eb980487e430a44
|
/vampires.py
|
58fceacc6ba2df6e00e63bb25f4903c3a3856cb9
|
[] |
no_license
|
kevinpatell/Object_Oriented_Programming_part2
|
c34857e6055eaaf0bdb09aeb04f7fc212f8f0227
|
551cd1e42cbf7b2ee99c791d363b397747dd2473
|
refs/heads/master
| 2020-05-23T06:55:32.603610
| 2019-05-15T21:12:26
| 2019-05-15T21:12:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,933
|
py
|
class Vampire:
coven = []
def __init__(self, name, age):
self.name = name
self.age = age
self.in_coffin = True
self.drank_blood_today = False
def __str__(self):
return f"Name of Vampire: {self.name}\nAge: {self.age}\nIn coffin: {self.in_coffin}\nDrank Blood Today: {self.drank_blood_today}"
def __repr__(self):
return f"{self.name} (Drank Blood: {self.drank_blood_today} In Coffin: {self.in_coffin})"
@classmethod
def create(cls, name, age):
new_vampire = Vampire(name, age)
cls.coven.append(new_vampire)
return new_vampire
@classmethod
def sunrise(cls):
for vampire in Vampire.coven:
if vampire.in_coffin == False or vampire.drank_blood_today == False:
cls.coven.remove(vampire)
return f'Survivors after sunrise: {Vampire.coven}'
@classmethod
def sunset(cls):
for vampire in cls.coven:
Vampire.drank_blood_today = False
Vampire.in_coffin = False
return f"After sunset:\nThese all vampires are out of coffin and looking for blood:\n {Vampire.coven}"
def drink_blood(self):
self.drank_blood_today = True
def go_home(self):
self.in_coffin = True
v1 = Vampire.create("Bella", 20000)
v2 = Vampire.create("Elizabeth", 100)
v3 = Vampire.create("Zurie", 200)
v4 = Vampire.create("Ambrosiat", 50000)
print(v1)
print()
print(v2)
print()
print(v3)
print()
print(v4)
print()
print(Vampire.coven[1].drank_blood_today)
print(Vampire.sunset())
(v1.drink_blood())
(v3.drink_blood())
(v4.drink_blood())
print()
print(Vampire.sunrise())
(v1.go_home())
(v2.go_home())
(v3.go_home())
(v4.go_home())
print(Vampire.coven[0].drank_blood_today)
print(Vampire.coven[1].drank_blood_today)
print(Vampire.coven[0].in_coffin)
print(Vampire.coven[2].in_coffin)
print()
print(Vampire.coven)
|
[
"mail.kevinpatel@gmail.com"
] |
mail.kevinpatel@gmail.com
|
6c0839c6066d8b8eafd8876ece1347f4488b2947
|
35b58dedc97622b1973456d907ede6ab86c0d966
|
/Test/2019年12月24日/2019年12月24日.py
|
ad4abe21a8bcaf16d5512d25c100514422f959cd
|
[] |
no_license
|
GithubLucasSong/PythonProject
|
7bb2bcc8af2de725b2ed9cc5bfedfd64a9a56635
|
e3602b4cb8af9391c6dbeaebb845829ffb7ab15f
|
refs/heads/master
| 2022-11-23T05:32:44.622532
| 2020-07-24T08:27:12
| 2020-07-24T08:27:12
| 282,165,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
def xxx(num):
def wrapper(func):
def inner():
for i in range(num):
func()
return inner
return wrapper
@xxx(3)
def func():
print('1')
func()
|
[
"1433880147@qq.com"
] |
1433880147@qq.com
|
f80d70dfcbd1d0f09fa8ffe23895353bf97ff23a
|
b91588cda1a129f06aa9493ee6d6a70e4f996b7f
|
/Production/python/Spring15Fastv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8_cff.py
|
bf01b9213d786b2b0783318c6ea58743e54c1d71
|
[] |
no_license
|
muzamilahmad/LeptoQuarkTreeMaker
|
2371e93589dbe41b02a93a2533cbf5e955aaa0db
|
8e7eed0d03c6770a029eafb9b638e82c600a7425
|
refs/heads/master
| 2021-01-19T01:02:35.749590
| 2018-02-06T17:27:52
| 2018-02-06T17:27:52
| 65,389,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,409
|
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/08ABB1E4-2A9F-E511-9BB9-02163E013FA4.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/10080BE5-2A9F-E511-95A1-001EC94BA3CC.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/104285B3-2A9F-E511-87B8-02163E01778C.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/129711D0-2A9F-E511-A330-782BCB20FDEA.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/168B4ACC-2A9F-E511-A3C8-B083FED42C03.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/1E23B9BD-2A9F-E511-AB59-B083FED045ED.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/2E0EEAEB-2A9F-E511-9D2B-C81F66B78FF5.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/32953818-2F9F-E511-9B22-E41D2D08DEE0.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/34E265D2-2A9F-E511-937F-02163E014DAE.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/46ACB58F-2D9F-E511-B8F3-842B2B42B584.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/486C9DED-2A9F-E511-A4B8-D4AE526DF64C.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/524AA4AE-2A9F-E511-BBD4-B083FED42FE4.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/6C4EE4C3-2A9F-E511-8B7B-A4BADB1E6796.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/6C7EA702-2B9F-E511-93D0-D4AE526DF562.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/6E7E0ED6-2A9F-E511-8877-02163E012FDA.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/74AE38DA-2A9F-E511-A308-D4AE527F2883.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/74CAF166-2D9F-E511-91B3-0019B9CB0300.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/7830B3AC-2A9F-E511-BFAE-90B11C0BD676.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/8CB7CDBA-2A9F-E511-9833-001EC94BFFEB.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/8CF658E1-2A9F-E511-8525-549F3525AE18.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/90D341B4-2E9F-E511-849F-002590AC4C49.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/921DB3CD-2A9F-E511-93E1-842B2B180A9F.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/944B9CBE-2A9F-E511-9427-782BCB1F5E6B.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/9637E6EF-2A9F-E511-9EF6-842B2B42BC3A.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/A2329021-2F9F-E511-8450-E41D2D08DD10.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/A435D607-2B9F-E511-B219-782BCB53B63D.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/A4FCCFCE-2A9F-E511-B41D-02163E013BCB.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/A60BBDF4-759E-E511-B825-782BCB50ACF1.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/ACE670F0-2A9F-E511-9F4B-90B11C0BD467.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/BC6BCDE5-2A9F-E511-B3C8-782BCB20EDD2.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/BEFB24F7-2A9F-E511-A7F7-D4AE526DF3BB.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/C0CF56B9-2E9F-E511-934F-047D7BD6DD64.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/CA274026-2B9F-E511-B7C7-C81F66B73923.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/CC2ACAC9-2A9F-E511-83BB-B083FED40671.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/D0634CE3-2E9F-E511-94F8-002590DB9358.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/D20D02BD-2A9F-E511-B408-02163E013168.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/D40165CD-2A9F-E511-B358-02163E013E59.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/D8D9F9B2-2E9F-E511-82C9-047D7BD6DF00.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/DA9F057B-849E-E511-B6AA-0019B9CB01E8.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/E8635D09-2B9F-E511-83F2-001EC94BA146.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/E8FC0EF1-2A9F-E511-ADC3-B083FED177B1.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/F4516FB8-2E9F-E511-BC71-0025907FD40C.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/FC25231D-2B9F-E511-B63E-02163E016831.root',
'/store/mc/RunIISpring15MiniAODv2/SMS-T1tttt_mGluino-1550to1575_mLSP-500to1175_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/MINIAODSIM/FastAsympt25ns_74X_mcRun2_asymptotic_v2-v1/40000/FCE78AE4-2A9F-E511-89AF-842B2B181727.root' ] );
secFiles.extend( [
] )
|
[
"kpedro88@gmail.com"
] |
kpedro88@gmail.com
|
413caa82fcdf1cbed4522863899a188b6410a76f
|
71f4c9ed644e02466806da202a121bc56244077e
|
/adventofcode/2021/day13.py
|
298d2c12e6b74bf49833b784d29d3b8c329bc83c
|
[] |
no_license
|
smaaland/python-sandbox
|
01c73492904c67f160c943699642e36bafea1641
|
e6c97f86294814831df44403ac576d9e2933e632
|
refs/heads/master
| 2022-12-22T17:13:31.208691
| 2022-12-21T18:14:22
| 2022-12-21T18:14:22
| 75,340,921
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
from typing import List
with open('input13.txt', 'r') as f:
lines = [line.rstrip() for line in f]
pattern = lines[:lines.index('')]
instructions = lines[lines.index('')+1:]
def fold(data: List[List[str]], axis: str, value: int) -> List[List[str]]:
if axis == 'y':
for y in range(value+1, len(data)):
for x in range(len(data[y])):
if data[y][x] == '#':
data[len(data) - y - 1][x] = data[y][x]
return data[0:value]
if axis == 'x':
for y in range(len(data)):
for x in range(value + 1, len(data[y])):
if data[y][x] == '#':
data[y][len(data[y]) - x - 1] = data[y][x]
return [x[0:value] for x in data]
return data
max_x = max([int(_.split(',')[0]) for _ in pattern])
max_y = max([int(_.split(',')[1]) for _ in pattern])
sheet = [[' ' for _ in range(max_x + 1)] for __ in range((max_y + 1))]
for point in pattern:
sheet[int(point.split(',')[1])][int(point.split(',')[0])] = '#'
axis, value = instructions[0].split(' ')[-1].split('=')
sheet_1 = fold(sheet, axis, int(value))
print(f"Part 1: {sum([row.count('#') for row in sheet_1])}")
for instruction in instructions:
axis, value = instruction.split(' ')[-1].split('=')
sheet = fold(sheet, axis, int(value))
print("Part 2:")
for line in sheet:
print(''.join(line))
|
[
"smaaland86@gmail.com"
] |
smaaland86@gmail.com
|
7462e551f2da5f26f1cc688dda0a77d433c1fb85
|
ba510b58e26cdad3fe1151136e36e70e7d6fa282
|
/order/migrations/0004_order_email.py
|
58198966b2e0591b12d559b362f33dd7d5cf662a
|
[] |
no_license
|
nicos199-sudo/kick
|
ade0562047e30d4d5b086450e0df916b5030012b
|
da644d656011345e53e9a7fdf408f66aa43865c6
|
refs/heads/main
| 2023-02-18T21:53:20.361165
| 2021-01-18T15:30:11
| 2021-01-18T15:30:11
| 330,705,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# Generated by Django 3.0.7 on 2021-01-18 10:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0003_auto_20210113_1149'),
]
operations = [
migrations.AddField(
model_name='order',
name='email',
field=models.EmailField(default=1, max_length=255),
preserve_default=False,
),
]
|
[
"geeksolutionhacker@gmail.com"
] |
geeksolutionhacker@gmail.com
|
25bc799c2f8ab00b632e077060a62ffd91382750
|
91fe96f88a17d373e51654028d8e2cdd99728a21
|
/configs/HTC/htc_without_semantic_r50_fpn_1x_coco.py
|
27cc3cce36929c8bf9a5bc4a6a26c21a2ab60260
|
[
"Apache-2.0"
] |
permissive
|
liusurufeng/NucleiDetSeg
|
ab8639872ffedd2648e4b38af5528ecc89119cbe
|
ad4040a359e52c611780b409f84b601bfa9c94e2
|
refs/heads/main
| 2023-07-26T18:45:22.646575
| 2021-09-01T08:14:57
| 2021-09-01T08:14:57
| 461,822,842
| 1
| 0
|
Apache-2.0
| 2022-02-21T11:05:01
| 2022-02-21T11:05:00
| null |
UTF-8
|
Python
| false
| false
| 7,442
|
py
|
_base_ = [
'../_base_/datasets/coco_instance_kaggle.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='HybridTaskCascade',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[6],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='HybridTaskCascadeRoIHead',
interleaved=True,
mask_info_flow=True,
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=[
dict(
type='HTCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=1,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=1,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='HTCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=1,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
]))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=False),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=False),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=False),
mask_size=28,
pos_weight=-1,
debug=False)
])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=500,
mask_thr_binary=0.5))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
[
"m18379660828@163.com"
] |
m18379660828@163.com
|
f642a8d1ab2728c136abc63723350f6c85d11e37
|
184fd6be003329c3b6a3033236869ad501f7a328
|
/Занятие4/Лабораторные_задания/task1_5/main.py
|
5e0626372abde991ef9997c883bc132e7c47ec43
|
[] |
no_license
|
Aleks8830/PythonPY100
|
629a2f6deee2b1faba2a6e019c2aa0d937b72a9e
|
fa61ad8a583deb8725a716edbafc6d660840bc73
|
refs/heads/master
| 2023-08-30T00:03:43.823320
| 2021-10-27T18:19:45
| 2021-10-27T18:19:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
if __name__ == "__main__":
list_ = [41, -13, 10, -1, 32, -3, -6, 8, 6, 9, 3]
even = 0
odd = 0
for i in list_:
if i %2 ==0:
even += 1
else:
odd+=1
if odd > even:
print("odd")
else:
print("even")
|
[
"Sorokin_200683@mai.ru"
] |
Sorokin_200683@mai.ru
|
3b9971fac9181ca226d9ad1d30f00773e8a81a78
|
1358a2450ec6c499ad1f67b38e42a21278857561
|
/home/views.py
|
1f3f77b2417ee79c335801301f99e6d940360a22
|
[] |
no_license
|
atharvparamane/School_Admission_App_using_Django
|
4926c90351558cccd462f8ab13fa1f018c457b06
|
803ea27f699b2b0d5040616960ab18eae4c70713
|
refs/heads/master
| 2023-05-04T05:43:10.612064
| 2021-05-29T05:39:10
| 2021-05-29T05:39:10
| 371,888,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,111
|
py
|
from django.shortcuts import render, HttpResponse
from datetime import datetime
from home.models import Contact
from django.contrib import messages
# Create your views here.
def index(request):
context = {
"variable1":"Harry is great",
"variable2":"Rohan is great"
}
return render(request, 'index.html', context)
# return HttpResponse("this is homepage")
def about(request):
return render(request, 'about.html')
def services(request):
return render(request, 'services.html')
def contact(request):
if request.method == "POST":
name = request.POST.get('name')
email = request.POST.get('email')
phone = request.POST.get('phone')
desc = request.POST.get('desc')
contact = Contact(name=name, email=email, phone=phone, desc=desc, date = datetime.today())
contact.save()
messages.success(request, 'Your message has been sent!')
return render(request, 'contact.html')
def all_events(request):
contact_list=Contact.objects.all()
return render(request, 'content.html',{'contact_list':contact_list})
|
[
"atharvparamane111@gmail.com"
] |
atharvparamane111@gmail.com
|
e9e8f2e5d7dfb09d98e3cd864015ec9c3bd7b0a9
|
272d82bd33b1cfc7139ec0ff511e62cb4a918184
|
/NationalDebt/NationalDebt/settings.py
|
4c5f001dc5ecfc399f380af7885d742f09874631
|
[] |
no_license
|
KyraYang/Scrapy_practices
|
ea63ecd7fef86e2fb79fac4353bcb94fedbeaaa8
|
5e234289ea35956468a8faf9b2fea7541847f06b
|
refs/heads/master
| 2022-12-29T07:27:34.962005
| 2020-10-14T09:28:10
| 2020-10-14T09:28:10
| 303,099,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,138
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for NationalDebt project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'NationalDebt'
SPIDER_MODULES = ['NationalDebt.spiders']
NEWSPIDER_MODULE = 'NationalDebt.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'NationalDebt (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'NationalDebt.middlewares.NationaldebtSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'NationalDebt.middlewares.NationaldebtDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'NationalDebt.pipelines.NationaldebtPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"yangziqing@duozhuayu.net"
] |
yangziqing@duozhuayu.net
|
0f445c5665deabdb6463b07cb613172db70bfaf1
|
09699108d8594f33ddddb5fbcbd169598861d7a4
|
/data_processing/mcs/mcs_process.py
|
2da7ee1904ad7d8f71faf0131a0cda08b8fd8baa
|
[] |
no_license
|
aldopareja/easy_attribute_prediction
|
78d3ac48c5322874d8a28455ff361c5130d5b6f4
|
278ae05eff89d5cd034b42a85558da528c6cfef8
|
refs/heads/main
| 2023-01-12T14:39:45.377238
| 2020-11-20T07:06:50
| 2020-11-20T07:06:50
| 311,797,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,919
|
py
|
"""
This tool processes passive data for eval 3 and dumps it in a valid format to train a derender
"""
import argparse
import os
import random
import shutil
import sys
from itertools import repeat, chain, product
from multiprocessing import Process, Queue
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
import numpy as np
import hickle as hkl
from pycocotools import mask as mask_util
from PIL import Image
import machine_common_sense as mcs
sys.path.insert(0, './')
from easy_attributes.utils.io import write_serialized
from easy_attributes.utils.meta_data import get_continuous_metadata, get_discrete_metadata, get_pixels_mean_and_std
MIN_AREA = 100
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--mcs_executable', type=str)
parser.add_argument('--output_dir', type=str)
parser.add_argument('--data_path', type=str)
# parser.add_argument('--parallel', action='store_true')
parser.add_argument('--num_parallel_controllers', type=int)
args = parser.parse_args()
args.data_path = Path(args.data_path)
args.input_dir = Path(args.mcs_executable)
args.output_dir = Path(args.output_dir)
return args
def get_attributes(obj: mcs.object_metadata.ObjectMetadata):
attributes = {}
attributes['shape'] = obj.shape
[attributes.__setitem__('position_' + k, v) for k, v in obj.position.items()]
[attributes.__setitem__('rotation_' + k, v) for k, v in obj.rotation.items()]
[attributes.__setitem__(f'dimension_{i}_{c}', obj.dimensions[i][c]) for i,c in product(range(8), 'xyz')]
return attributes
def dump_for_detectron(step_data, out_path, index):
# print(step_data)
depth: np.ndarray = step_data.depth_map_list[0]
depth = 1 / (1 + depth)
rgb = np.array(step_data.image_list[0], dtype=np.float32) / 255.0
input = np.concatenate([rgb, depth[..., np.newaxis]], axis=2)
input = input.swapaxes(2, 0).swapaxes(1, 2) # now it is C, H, W
input_to_file = out_path / 'inputs' / (str(index).zfill(9) + '.hkl')
hkl.dump(input, input_to_file, mode='w', compression='gzip')
masks = np.array(step_data.object_mask_list[0])
masks = masks[:, :, 0] + masks[:, :, 1] * 256 + masks[:, :, 2] * 256 ** 2
assert not (masks == 0).any()
foreground_objects = {e.color['r'] + e.color['g'] * 256 + e.color['b'] * 256 ** 2: e
for e in step_data.structural_object_list
if not (e.uuid.startswith('wall') or e.uuid.startswith('floor'))}
foreground_objects.update({e.color['r'] + e.color['g'] * 256 + e.color['b'] * 256 ** 2: e
for e in step_data.object_list})
objects = []
for v in foreground_objects.keys():
mask = masks == v
if mask.sum() < MIN_AREA:
continue
mask_y, mask_x = mask.nonzero()
bbox = list(map(int, [mask_x.min(), mask_y.min(), mask_x.max(), mask_y.max()]))
if bbox[3] <= bbox[1] + 2 and bbox[2] <= bbox[0] + 2: # width and height shouldn't be too small
continue
mask = mask_util.encode(np.asarray(mask, order="F"))
mask['counts'] = mask['counts'].decode('ascii')
attributes = get_attributes(foreground_objects[v])
objects.append({'mask': mask,
'bbox': bbox,
**attributes,
'filename': str(input_to_file),
**{'agent_position_' + k: v for k, v in step_data.position.items()},
'agent_rotation': step_data.rotation})
return objects
def process_scene(controller, scene_path, output_path, vid_index, concurrent, tp: ThreadPoolExecutor):
config_data, _ = mcs.load_config_json_file(scene_path)
jobs = []
frame_id = 0
step_data = controller.start_scene(config_data)
if concurrent:
jobs.append(tp.submit(dump_for_detectron, step_data,
output_path,
vid_index * 500 + frame_id))
else:
jobs.append(dump_for_detectron(step_data, output_path, vid_index * 500 + frame_id))
frame_id += 1
actions = config_data['goal']['action_list']
for a in actions:
assert len(a) == 1, "there must be an action"
step_data = controller.step(a[0])
if concurrent:
jobs.append(tp.submit(dump_for_detectron, step_data,
output_path,
vid_index * 500 + frame_id))
else:
jobs.append(dump_for_detectron(step_data, output_path, vid_index * 500 + frame_id))
frame_id += 1
controller.end_scene("classification", 0.0)
if concurrent:
jobs = [j.result() for j in jobs]
return chain.from_iterable(jobs)
class SequentialSceneProcessor:
def __init__(self, mcs_executable: Path, concurrent_dump: bool):
self.controller = mcs.create_controller(str(mcs_executable),
depth_maps=True,
object_masks=True,
history_enabled=False)
self.concurrent = concurrent_dump
self.tp = ThreadPoolExecutor(4)
def process(self, w_arg):
(s, _, o, v) = w_arg
return process_scene(self.controller, s, o, v, self.concurrent, self.tp)
def ParallelSceneProcess(work_q: Queue, result_q: Queue, mcs_executable: Path, concurrent_dump):
controller = mcs.create_controller(str(mcs_executable),
depth_maps=True,
object_masks=True,
history_enabled=False)
with ThreadPoolExecutor(4) as p:
while True:
w_arg = work_q.get()
if w_arg is None:
break
(s, _, o, v) = w_arg
results = process_scene(controller, s, o, v, concurrent_dump, p)
result_q.put(results)
if __name__ == "__main__":
args = parse_args()
scene_files = [args.data_path / a for a in args.data_path.iterdir()]
shutil.rmtree(args.output_dir, ignore_errors=True)
(args.output_dir / 'inputs').mkdir(parents=True, exist_ok=True)
w_args = [(s, e, o, i) for i, (s, e, o) in enumerate(zip(scene_files,
repeat(args.mcs_executable),
repeat(args.output_dir)))]
if args.num_parallel_controllers > 0:
work_queue = Queue()
result_queue = Queue()
workers = [Process(target=ParallelSceneProcess,
args=(work_queue, result_queue, args.mcs_executable, True)) for _ in
range(args.num_parallel_controllers)]
[w.start() for w in workers]
w_args = [work_queue.put(w) for w in w_args]
data_dicts = [result_queue.get() for _ in range(len(w_args))]
[work_queue.put(None) for _ in range(args.num_parallel_controllers)]
[w.join() for w in workers]
work_queue.close()
result_queue.close()
else:
worker = SequentialSceneProcessor(args.mcs_executable, False)
data_dicts = [worker.process(w_arg) for w_arg in w_args]
data_dicts = list(chain.from_iterable(data_dicts))
all_indices = set(range(len(data_dicts)))
val_indices = random.sample(all_indices, 6000)
train_indices = all_indices.difference(val_indices)
val_dicts = [data_dicts[i] for i in val_indices]
train_dicts = [data_dicts[i] for i in train_indices]
meta_data = {'inputs': {'file_name': {'type': 'input_tensor',
'num_channels': 4,
'height': 400,
'width': 600,
**get_pixels_mean_and_std(val_dicts)},
'mask': {'type': 'bitmask'},
'bbox': {'type': 'bounding_box'}},
'outputs': {**{e: get_continuous_metadata(val_dicts, e)
for e in [*[c[0] + c[1] for c in product(['rotation_', 'position_', 'agent_position_'],
'xyz')],
*[f'dimension_{c[0]}_{c[1]}' for c in product(range(8), 'xyz')],
'agent_rotation']},
'shape': get_discrete_metadata(data_dicts, 'shape')}
}
write_serialized(val_dicts, args.output_dir / 'val.json')
write_serialized(train_dicts, args.output_dir / 'train.json')
write_serialized(meta_data, args.output_dir / 'metadata.yml')
# kill stalling controllers
os.system('pkill -f MCS-AI2-THOR-Unity-App -9')
|
[
"“aldopareja@users.noreply.github.com”"
] |
“aldopareja@users.noreply.github.com”
|
3fae254004b75973b3bb00a76c78aaa37798324a
|
0f412ecbbbd5b844430027cd6ac5247fde5798a9
|
/setup.py
|
e76a65865b08adc18e199184f0d29bba18c8265a
|
[
"BSD-3-Clause"
] |
permissive
|
sfc-gh-mlukiyanov/dask-snowflake
|
cb46b76fd3d02421d4c42d7018290f4336e65faa
|
e7977e5a7c73361ab705b9f14eb4a74cb0307a30
|
refs/heads/main
| 2023-08-18T07:22:02.236593
| 2021-09-14T20:19:32
| 2021-09-14T20:19:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
#!/usr/bin/env python
from setuptools import setup
setup(
name="dask-snowflake",
version="0.0.2",
description="Dask + Snowflake intergration",
license="BSD",
maintainer="James Bourbeau",
maintainer_email="james@coiled.io",
packages=["dask_snowflake"],
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
python_requires=">=3.7",
install_requires=open("requirements.txt").read().strip().split("\n"),
include_package_data=True,
zip_safe=False,
)
|
[
"jrbourbeau@gmail.com"
] |
jrbourbeau@gmail.com
|
bcf6c256a620b39458993f9696402e613bcb6be2
|
093bcdf9d708087219baa2a82de8406651b7ec48
|
/AjitPublicResources/processor/64bit/C_multi_core_multi_thread/cpu/SConstruct
|
2a108c0a3a1edac3600f92527fd1051c0ae73aa0
|
[] |
no_license
|
Prajwal-Prathiksh/ajit-toolchain
|
421b5e725cc64188829c5d0ea4b56c7aa11bcc6e
|
230601fe7541611fa3c7ce621231fe1c8d274ba2
|
refs/heads/master
| 2023-07-14T10:26:01.609494
| 2021-08-18T08:17:22
| 2021-08-18T08:17:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,957
|
import os
env = Environment(ENV = {'PATH' : os.environ['PATH']})
COMPILATION_FLAGS = ' -g '
AHIR_RELEASE=os.environ['AHIR_RELEASE']
AHIR_INCLUDE=AHIR_RELEASE+"/include"
env.Append(CPPPATH = './src/:./include:../common/include:')
env.Append(CPPPATH = AHIR_INCLUDE + ":" + "./include:")
AHIR_RELEASE=os.environ['AHIR_RELEASE']
AJIT_HOME=os.environ['AJIT_PROJECT_HOME']
AJIT_C_REF_MODEL=os.environ['AJIT_C_REF_MODEL']
MONITOR_LOGGER_INCLUDE=AJIT_C_REF_MODEL + "/monitorLogger/include"
MMU_INCLUDE=AJIT_C_REF_MODEL + "/mmu/include"
CACHE_INCLUDE=AJIT_C_REF_MODEL + "/cache/include"
env.Append(CPPPATH = AJIT_C_REF_MODEL + "/common/include:" + AJIT_C_REF_MODEL + "/cpu/include:"+ AJIT_C_REF_MODEL + "/cpu_interface/include:" + AJIT_C_REF_MODEL + "/half_precision_float/include:")
#monitorLogger
env.Append(CPPPATH = MONITOR_LOGGER_INCLUDE + ":")
#mmu
env.Append(CPPPATH = MMU_INCLUDE + ":")
#cache
env.Append(CPPPATH = CACHE_INCLUDE + ":")
#hwServer
HWSERVER_INCLUDE=AJIT_C_REF_MODEL + "/debugger/hwServer/include"
env.Append(CPPPATH = HWSERVER_INCLUDE + ":")
#rlut
RLUT_INCLUDE=AJIT_C_REF_MODEL + "/rlut/include"
env.Append(CPPPATH = RLUT_INCLUDE + ":")
#tlbs
TLBS_INCLUDE=AJIT_C_REF_MODEL + "/tlbs/include"
env.Append(CPPPATH = TLBS_INCLUDE + ":")
#AHIR-related
PIPE_HANDLER_INCLUDE=AHIR_RELEASE + "/include"
PIPE_HANDLER_LIBPATH = AHIR_RELEASE + "/lib"
PTHREAD_UTILS = AHIR_RELEASE + "/include"
GNU_PTH_UTILS = AHIR_RELEASE + "/include"
FUNCTIONLIB_PATH = AHIR_RELEASE + "/functionLibrary/lib"
FUNCTIONLIB_INCLUDE=AHIR_RELEASE + "/functionLibrary/include"
env.Append(CPPPATH = FUNCTIONLIB_INCLUDE + ":" + PIPE_HANDLER_INCLUDE + ":" + PTHREAD_UTILS + ":" + GNU_PTH_UTILS + ":" + "./include:")
print "COMPILATION FLAGS = ", COMPILATION_FLAGS
# create a library for the cpu :
#env.SharedLibrary('./lib/libCpu', Glob('src/*.c'), CCFLAGS=COMPILATION_FLAGS+' -DDO_VAL -DGDB' )
env.Library('./lib/libCpu', Glob('src/*.c'), CCFLAGS=COMPILATION_FLAGS)
|
[
"madhav@ee.iitb.ac.in"
] |
madhav@ee.iitb.ac.in
|
|
6291466374cdd799e27b4fbccb78fa6b7072e2ab
|
8e91748296c72473be2c64d79f0a0022100e3a6e
|
/ch03/maoyan_top100_movie/cache.py
|
de46f7637cd866ffe155cc96d5887f0079c67bc6
|
[] |
no_license
|
Gordonhan/spider_tutorial
|
52d9d1594f220889355add069aa3bcfb1390e670
|
9792fa1036c75311d54df05939ea16a156443ab4
|
refs/heads/master
| 2021-08-20T09:28:04.190611
| 2017-11-28T20:02:25
| 2017-11-28T20:02:25
| 110,866,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
# -*- coding:utf-8 -*-
from datetime import datetime, timedelta
import pymongo
import config
class MongoCache(object):
def __init__(self, client=None, expires=timedelta(days=30)):
self.client = client \
or pymongo.MongoClient(host="localhost", port=27017)
self.db = self.client[config.DEFAULT_DB]
self.collection = self.db[config.DEFAULT_COL]
self.collection.create_index('timestamp', expireAfterSeconds=expires.total_seconds())
def __getitem__(self, url):
document = self.collection.find_one({"_id": url})
if document:
return document["result"]
else:
raise KeyError(url + "don't exist")
def __setitem__(self, url, result):
result = {'result': result, 'timestamp': datetime.utcnow()}
self.collection.update({'_id': url}, {'$set': result}, upsert=True)
|
[
"Gordon-Han@hotmail.com"
] |
Gordon-Han@hotmail.com
|
5fcd974f1afbfb564edbd4444f8c9eb3d986aa36
|
d4a61065ba06ccf77873918c3fd79719ae878de7
|
/PyBank/main.py
|
07c7a6c3cb23439aead9523249541851b63e2770
|
[] |
no_license
|
alanacsaposs/python-challenge
|
e1b287c394c172c37fbaa3526f01864c65d95321
|
8c9dfb9be8a5064c79dcca234ead9697131c74fa
|
refs/heads/master
| 2020-06-06T18:32:27.080193
| 2019-06-23T01:07:25
| 2019-06-23T01:07:25
| 192,823,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,129
|
py
|
#imports
import os
import csv
#import csv
budget_csv = os.path.join('..', 'PyBank', 'budget-data.csv')
#create lists for variables
months = []
monthly_change = []
with open(budget_csv, newline="") as csvfile:
# Split the data on commas
budgetfile = csv.reader(csvfile, delimiter=',')
header = next(budgetfile)
total = 0
month_revenue = 0
# Read through each row of data after the header
for row in budgetfile:
months.append(row[0])
total += int(row[1])
# Take the difference between two months and append to monthly profit change
monthly_change.append(int(row[1]) - month_revenue)
month_revenue = int(row[1])
#Find maximum and minimum monthly change
#Greatest increase in profits
max_increase = max(monthly_change)
best_index = monthly_change.index(max_increase)
best_date = months[best_index]
#Greatest decrease (lowest increase) in profits
max_decrease = min(monthly_change)
worst_index = monthly_change.index(max_decrease)
worst_date = months[worst_index]
#Get Average Change
change_total = sum(monthly_change) - 867884
total_months = len(months)
avg_change = round(change_total/total_months, 2)
#print statements
print("Financial Analysis")
print("-------------------------")
print(f"Total Months: {len(months)}")
print(f"Total: ${(total)}")
print(f"Average Change: ${avg_change}")
print(f"Greatest Increase in Profits: {best_date} (${str(max_increase)})")
print(f"Greatest Decrease in Profits: {worst_date} (${str(max_decrease)})")
# save to .txt file
filepath = os.path.join("output_pybank.txt")
with open(filepath,'w') as text:
text.write("Financial Analysis" + "\n")
text.write("-------------------------" + "\n")
text.write(f"Total Months: {len(months)}" + "\n")
text.write(f"Total: ${(total)}" + "\n")
text.write(f"Average Change: ${avg_change}" + "\n")
text.write(f"Greatest Increase in Profits: {best_date} (${str(max_increase)})" + "\n")
text.write(f"Greatest Decrease in Profits: {worst_date} (${str(max_decrease)})" + "\n")
|
[
"alanacsaposs@gmail.com"
] |
alanacsaposs@gmail.com
|
b070ef65e1caa4183c3ae26452c35f96a50d9aba
|
89d24ad0b40790d501760809e0365e730f6eeb7b
|
/utf8/utf8.py
|
c31e493cffe0828afba7c4abbe355ca16a0c63a6
|
[] |
no_license
|
ahua/dataset
|
00259158c7f29745ccf35d17356b63f8f440c1d0
|
7b3325b6d22a97a30d3fbff261f99a0dbb83ab6e
|
refs/heads/master
| 2020-05-17T05:08:21.840561
| 2013-09-04T15:23:41
| 2013-09-04T15:23:41
| 5,768,195
| 20
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
py
|
#!/usr/bin/env python
import sys
def parse(li):
t = li.rstrip().split()
a = "".join(t[2:5])
b = t[-1]
return a,b
def main(filename):
fp = open(filename, "r")
ls = fp.readlines()
fp.close()
s = []
for l in ls:
if not l.startswith("#"):
a, b = parse(l)
x = int(a, 16)
y = ord(b[0]) * 65536 + ord(b[1]) * 256 + ord(b[2])
if x != y:
print x,y
else:
s.append(b)
else:
sys.stderr.write(l)
return s
if __name__ == "__main__":
s = main(sys.argv[1])
s.sort()
t = 0
for w in s:
print 'u"%s",'%w,
t = t + 1
if t % 25 == 0:
print
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# t = [ i for i in range(s[0], s[-1]+1) ]
# print s[0], s[-1], t[0], t[-1]
# print len(s)
# print len(t)#
# for i in s:
# print i
# for i in s:
# t.remove(i)
# for i in t:
# try:
# print unichr(i),
# except:
# pass
"""
|
[
"yhyan@geek.(none)"
] |
yhyan@geek.(none)
|
b9fc5615b4b5f96564d265a37ca08ad0e44e8ea3
|
1c26554b4c8b1f341dd7ce244a033cdb336e7be8
|
/todoproject/todoapp/migrations/0006_auto_20201009_1900.py
|
215f5b3fad1b034a57b9fc10355eaef3d357fc68
|
[] |
no_license
|
GeethaRamanathan/To-Do-App
|
c359ce53d034f22ea39dc75651a9fafb890db614
|
78606a277678c149f9331b920437242990302028
|
refs/heads/master
| 2022-12-27T13:58:10.932713
| 2020-10-11T07:06:54
| 2020-10-11T07:06:54
| 303,061,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# Generated by Django 3.1.2 on 2020-10-09 13:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todoapp', '0005_auto_20201009_1859'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='is_completed',
field=models.BooleanField(),
),
]
|
[
"geetharam740@gmail.com"
] |
geetharam740@gmail.com
|
cdef16b79b22736a1cccc44a94795a5c8c7030d3
|
093b9569be9d1c4e5daf92efbebc38f680917b2d
|
/.history/base/views_20210829083101.py
|
f7277fa3afe4a9018d49e030975924059810adec
|
[] |
no_license
|
Justin-Panagos/todoList
|
95b1e97ff71af1b0be58e7f8937d726a687cea4d
|
10539219b59fcea00f8b19a406db3d4c3f4d289e
|
refs/heads/master
| 2023-08-04T13:27:13.309769
| 2021-08-29T14:06:43
| 2021-08-29T14:06:43
| 400,827,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.contrib.auth.views import LoginView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Task
class CustoomLoginView(LoginView):
template_name = 'base/login.html'
fields = '__all__'
redirect_authenticated_user = True
def get_success_url(self):
return reverse_lazy('tasks')
class TaskList( LoginRequiredMixin, ListView):
model = Task
context_object_name = 'tasks'
def get_context_data(self, **kw)
class TaskDetail(LoginRequiredMixin, DetailView):
model = Task
context_object_name = 'task'
template_name = 'base/task.html'
class TaskCreate(LoginRequiredMixin, CreateView):
model = Task
fields = '__all__'
success_url = reverse_lazy('tasks')
class TaskUpdate( LoginRequiredMixin, UpdateView):
model = Task
fields = '__all__'
success_url = reverse_lazy('tasks')
class TaskDelete(LoginRequiredMixin, DeleteView):
model = Task
context_object_name = 'task'
success_url = reverse_lazy('tasks')
|
[
"justpanagos@gmail.com"
] |
justpanagos@gmail.com
|
48ad6ce14387c8aece2769983287047b001a0c5b
|
d932f40fb253cbe9860b549a7bbd58c1609b4f4f
|
/app/config/secure.py
|
63d6c4b34ae1dd44e77c3bce94a0147d806a0cf5
|
[] |
no_license
|
beizhongshashui/flask_restful
|
1f8d2832ba763dd4c99a64b29e746bb577633c3f
|
bc5e26a191cbb12b160a349c25af1785c36560b7
|
refs/heads/master
| 2021-04-12T14:18:34.548314
| 2020-03-22T09:18:30
| 2020-03-22T09:18:30
| 249,084,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
SQLALCHEMY_DATABASE_URI = \
'mysql+cymysql://root:1234qwer@localhost/flask_mooc01'
# 'postgres+psycopg2://postgres:postgres@localhost/ginger'
SECRET_KEY = '\x88D\xf09\x91\x07\x98\x89\x87\x96\xa0A\xc68\xf9\xecJ:U\x17\xc5V\xbe\x8b\xef\xd7\xd8\xd3\xe6\x98*2'
|
[
"zhangys19@lenovo.com"
] |
zhangys19@lenovo.com
|
cd775c47a564fc423c900c16e838af10a7fd9de9
|
2425d9150334d9a9521f73a9d6efe7b8f39f72b0
|
/homeassistant/components/zha/core/channels/base.py
|
4d1e71e884ea2dac6e15c55ec0f67a36796966ed
|
[
"Apache-2.0"
] |
permissive
|
krzkowalczyk/home-assistant
|
d2117cbe461c2b9bce0d1357487ea05c3e4b96ac
|
513685bbeacca2c758d3ca33b337da3b7e72dd1d
|
refs/heads/dev
| 2023-02-22T13:32:59.251838
| 2021-04-27T21:34:53
| 2021-04-27T21:34:53
| 232,874,252
| 0
| 0
|
Apache-2.0
| 2023-02-22T06:15:56
| 2020-01-09T18:11:47
|
Python
|
UTF-8
|
Python
| false
| false
| 14,579
|
py
|
"""Base classes for channels."""
from __future__ import annotations
import asyncio
from enum import Enum
from functools import wraps
import logging
from typing import Any
import zigpy.exceptions
from homeassistant.const import ATTR_COMMAND
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .. import typing as zha_typing
from ..const import (
ATTR_ARGS,
ATTR_ATTRIBUTE_ID,
ATTR_ATTRIBUTE_NAME,
ATTR_CLUSTER_ID,
ATTR_TYPE,
ATTR_UNIQUE_ID,
ATTR_VALUE,
CHANNEL_ZDO,
SIGNAL_ATTR_UPDATED,
ZHA_CHANNEL_MSG,
ZHA_CHANNEL_MSG_BIND,
ZHA_CHANNEL_MSG_CFG_RPT,
ZHA_CHANNEL_MSG_DATA,
)
from ..helpers import LogMixin, safe_read
_LOGGER = logging.getLogger(__name__)
def parse_and_log_command(channel, tsn, command_id, args):
"""Parse and log a zigbee cluster command."""
cmd = channel.cluster.server_commands.get(command_id, [command_id])[0]
channel.debug(
"received '%s' command with %s args on cluster_id '%s' tsn '%s'",
cmd,
args,
channel.cluster.cluster_id,
tsn,
)
return cmd
def decorate_command(channel, command):
"""Wrap a cluster command to make it safe."""
@wraps(command)
async def wrapper(*args, **kwds):
try:
result = await command(*args, **kwds)
channel.debug(
"executed '%s' command with args: '%s' kwargs: '%s' result: %s",
command.__name__,
args,
kwds,
result,
)
return result
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
channel.debug(
"command failed: '%s' args: '%s' kwargs '%s' exception: '%s'",
command.__name__,
args,
kwds,
str(ex),
)
return ex
return wrapper
class ChannelStatus(Enum):
"""Status of a channel."""
CREATED = 1
CONFIGURED = 2
INITIALIZED = 3
class ZigbeeChannel(LogMixin):
"""Base channel for a Zigbee cluster."""
REPORT_CONFIG = ()
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Initialize ZigbeeChannel."""
self._generic_id = f"channel_0x{cluster.cluster_id:04x}"
self._channel_name = getattr(cluster, "ep_attribute", self._generic_id)
self._ch_pool = ch_pool
self._cluster = cluster
self._id = f"{ch_pool.id}:0x{cluster.cluster_id:04x}"
unique_id = ch_pool.unique_id.replace("-", ":")
self._unique_id = f"{unique_id}:0x{cluster.cluster_id:04x}"
self._report_config = self.REPORT_CONFIG
if not hasattr(self, "_value_attribute") and len(self._report_config) > 0:
attr = self._report_config[0].get("attr")
if isinstance(attr, str):
self.value_attribute = self.cluster.attridx.get(attr)
else:
self.value_attribute = attr
self._status = ChannelStatus.CREATED
self._cluster.add_listener(self)
@property
def id(self) -> str:
"""Return channel id unique for this device only."""
return self._id
@property
def generic_id(self):
"""Return the generic id for this channel."""
return self._generic_id
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the zigpy cluster for this channel."""
return self._cluster
@property
def name(self) -> str:
"""Return friendly name."""
return self._channel_name
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
self._ch_pool.async_send_signal(signal, *args)
async def bind(self):
"""Bind a zigbee cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
try:
res = await self.cluster.bind()
self.debug("bound '%s' cluster: %s", self.cluster.ep_attribute, res[0])
async_dispatcher_send(
self._ch_pool.hass,
ZHA_CHANNEL_MSG,
{
ATTR_TYPE: ZHA_CHANNEL_MSG_BIND,
ZHA_CHANNEL_MSG_DATA: {
"cluster_name": self.cluster.name,
"cluster_id": self.cluster.cluster_id,
"success": res[0] == 0,
},
},
)
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"Failed to bind '%s' cluster: %s", self.cluster.ep_attribute, str(ex)
)
async_dispatcher_send(
self._ch_pool.hass,
ZHA_CHANNEL_MSG,
{
ATTR_TYPE: ZHA_CHANNEL_MSG_BIND,
ZHA_CHANNEL_MSG_DATA: {
"cluster_name": self.cluster.name,
"cluster_id": self.cluster.cluster_id,
"success": False,
},
},
)
async def configure_reporting(self) -> None:
"""Configure attribute reporting for a cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
event_data = {}
kwargs = {}
if self.cluster.cluster_id >= 0xFC00 and self._ch_pool.manufacturer_code:
kwargs["manufacturer"] = self._ch_pool.manufacturer_code
for report in self._report_config:
attr = report["attr"]
attr_name = self.cluster.attributes.get(attr, [attr])[0]
min_report_int, max_report_int, reportable_change = report["config"]
event_data[attr_name] = {
"min": min_report_int,
"max": max_report_int,
"id": attr,
"name": attr_name,
"change": reportable_change,
}
try:
res = await self.cluster.configure_reporting(
attr, min_report_int, max_report_int, reportable_change, **kwargs
)
self.debug(
"reporting '%s' attr on '%s' cluster: %d/%d/%d: Result: '%s'",
attr_name,
self.cluster.ep_attribute,
min_report_int,
max_report_int,
reportable_change,
res,
)
event_data[attr_name]["success"] = (
res[0][0].status == 0 or res[0][0].status == 134
)
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"failed to set reporting for '%s' attr on '%s' cluster: %s",
attr_name,
self.cluster.ep_attribute,
str(ex),
)
event_data[attr_name]["success"] = False
async_dispatcher_send(
self._ch_pool.hass,
ZHA_CHANNEL_MSG,
{
ATTR_TYPE: ZHA_CHANNEL_MSG_CFG_RPT,
ZHA_CHANNEL_MSG_DATA: {
"cluster_name": self.cluster.name,
"cluster_id": self.cluster.cluster_id,
"attributes": event_data,
},
},
)
async def async_configure(self) -> None:
"""Set cluster binding and attribute reporting."""
if not self._ch_pool.skip_configuration:
await self.bind()
if self.cluster.is_server:
await self.configure_reporting()
ch_specific_cfg = getattr(self, "async_configure_channel_specific", None)
if ch_specific_cfg:
await ch_specific_cfg()
self.debug("finished channel configuration")
else:
self.debug("skipping channel configuration")
self._status = ChannelStatus.CONFIGURED
async def async_initialize(self, from_cache: bool) -> None:
"""Initialize channel."""
if not from_cache and self._ch_pool.skip_configuration:
self._status = ChannelStatus.INITIALIZED
return
self.debug("initializing channel: from_cache: %s", from_cache)
attributes = [cfg["attr"] for cfg in self._report_config]
if attributes:
await self.get_attributes(attributes, from_cache=from_cache)
ch_specific_init = getattr(self, "async_initialize_channel_specific", None)
if ch_specific_init:
await ch_specific_init(from_cache=from_cache)
self.debug("finished channel configuration")
self._status = ChannelStatus.INITIALIZED
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
attrid,
self.cluster.attributes.get(attrid, [attrid])[0],
value,
)
@callback
def zdo_command(self, *args, **kwargs):
"""Handle ZDO commands on this cluster."""
@callback
def zha_send_event(self, command: str, args: int | dict) -> None:
"""Relay events to hass."""
self._ch_pool.zha_send_event(
{
ATTR_UNIQUE_ID: self.unique_id,
ATTR_CLUSTER_ID: self.cluster.cluster_id,
ATTR_COMMAND: command,
ATTR_ARGS: args,
}
)
async def async_update(self):
"""Retrieve latest state from cluster."""
async def get_attribute_value(self, attribute, from_cache=True):
"""Get the value for an attribute."""
manufacturer = None
manufacturer_code = self._ch_pool.manufacturer_code
if self.cluster.cluster_id >= 0xFC00 and manufacturer_code:
manufacturer = manufacturer_code
result = await safe_read(
self._cluster,
[attribute],
allow_cache=from_cache,
only_cache=from_cache and not self._ch_pool.is_mains_powered,
manufacturer=manufacturer,
)
return result.get(attribute)
async def get_attributes(self, attributes, from_cache=True):
"""Get the values for a list of attributes."""
manufacturer = None
manufacturer_code = self._ch_pool.manufacturer_code
if self.cluster.cluster_id >= 0xFC00 and manufacturer_code:
manufacturer = manufacturer_code
try:
result, _ = await self.cluster.read_attributes(
attributes,
allow_cache=from_cache,
only_cache=from_cache and not self._ch_pool.is_mains_powered,
manufacturer=manufacturer,
)
return result
except (asyncio.TimeoutError, zigpy.exceptions.ZigbeeException) as ex:
self.debug(
"failed to get attributes '%s' on '%s' cluster: %s",
attributes,
self.cluster.ep_attribute,
str(ex),
)
return {}
def log(self, level, msg, *args):
"""Log a message."""
msg = f"[%s:%s]: {msg}"
args = (self._ch_pool.nwk, self._id) + args
_LOGGER.log(level, msg, *args)
def __getattr__(self, name):
"""Get attribute or a decorated cluster command."""
if hasattr(self._cluster, name) and callable(getattr(self._cluster, name)):
command = getattr(self._cluster, name)
command.__name__ = name
return decorate_command(self, command)
return self.__getattribute__(name)
class ZDOChannel(LogMixin):
"""Channel for ZDO events."""
def __init__(self, cluster, device):
"""Initialize ZDOChannel."""
self.name = CHANNEL_ZDO
self._cluster = cluster
self._zha_device = device
self._status = ChannelStatus.CREATED
self._unique_id = f"{str(device.ieee)}:{device.name}_ZDO"
self._cluster.add_listener(self)
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the aigpy cluster for this channel."""
return self._cluster
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def device_announce(self, zigpy_device):
"""Device announce handler."""
@callback
def permit_duration(self, duration):
"""Permit handler."""
async def async_initialize(self, from_cache):
"""Initialize channel."""
self._status = ChannelStatus.INITIALIZED
async def async_configure(self):
"""Configure channel."""
self._status = ChannelStatus.CONFIGURED
def log(self, level, msg, *args):
"""Log a message."""
msg = f"[%s:ZDO](%s): {msg}"
args = (self._zha_device.nwk, self._zha_device.model) + args
_LOGGER.log(level, msg, *args)
class ClientChannel(ZigbeeChannel):
"""Channel listener for Zigbee client (output) clusters."""
@callback
def attribute_updated(self, attrid, value):
"""Handle an attribute updated on this cluster."""
self.zha_send_event(
SIGNAL_ATTR_UPDATED,
{
ATTR_ATTRIBUTE_ID: attrid,
ATTR_ATTRIBUTE_NAME: self._cluster.attributes.get(attrid, ["Unknown"])[
0
],
ATTR_VALUE: value,
},
)
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle a cluster command received on this cluster."""
if (
self._cluster.server_commands is not None
and self._cluster.server_commands.get(command_id) is not None
):
self.zha_send_event(self._cluster.server_commands.get(command_id)[0], args)
|
[
"noreply@github.com"
] |
krzkowalczyk.noreply@github.com
|
52c49a9b3cb35bdb91d028875d44ae794f0e19a3
|
91ea758a98d27a0387820e66bc44270b430b1980
|
/Ex5_Training/training.py
|
2047bafe4ff484a42e2133c2d3c258a6e038b1d5
|
[] |
no_license
|
OmniXRI/20201024_AIGO_Lab2
|
c9c8e2fc6cb8a9e75e5d6c007da0e4a36b0c2495
|
45f6422b0fe02903ed8b3d62992109fcbb2e47ff
|
refs/heads/main
| 2022-12-31T15:35:19.070059
| 2020-10-23T22:53:03
| 2020-10-23T22:53:03
| 306,760,619
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,221
|
py
|
# -*- coding: utf-8 -*-
"""training.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/19yxTphDHRHaTkGUckbyrIBj_1EDnjZ0h
從YOLO官網下載YOLOv3的預訓練權重檔
"""
!git clone https://github.com/OmniXRI/OpenVINO_RealSense_HarvestBot.git #取得小蕃茄影像及標註資料集
!ls
"""切換至工具路徑 my_yolo3"""
# Commented out IPython magic to ensure Python compatibility.
# %cd OpenVINO_RealSense_HarvestBot
# %cd my_yolo3/
!ls
"""到YOLO官網下載預設權重檔 yolov3.weights"""
!wget https://pjreddie.com/media/files/yolov3.weights
"""展開 my_voc_annotation.py 原始碼"""
# Commented out IPython magic to ensure Python compatibility.
# %pycat my_voc_annotation.py
"""my_voc_annotation.py 原始碼,將標註好的VOC格式檔案轉成YOLO格式。"""
import xml.etree.ElementTree as ET
from os import getcwd
sets=['train', 'val', 'test'] #定義資料集名稱
classes = ["tomato"] #定義自訂義類別名稱
def convert_annotation(img_id, list_file):
in_file = open('VOC2007/Annotations/%s.xml' %img_id, encoding='utf-8') #指定標註檔路徑
tree=ET.parse(in_file)
root = tree.getroot()
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text), int(xmlbox.find('xmax').text), int(xmlbox.find('ymax').text))
list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
for image_set in sets:
img_names = open('VOC2007/ImageSets/Main/%s.txt'%image_set).read().strip().split() #指定待轉換清單檔案名稱
list_file = open('2007_%s.txt'%image_set, 'w') #指定轉換完成清單名稱
for img_name in img_names:
list_file.write('VOC2007/JPEGImages/%s.jpg'%img_name)
img_id = img_name.split('.')
convert_annotation(img_id[0], list_file)
list_file.write('\n')
list_file.close()
"""檢查是否有正確轉出 2007_test.txt, 2007_train.txt, 2007_val.txt"""
!date
!ls *.txt -all
"""將YOLOv3權重檔轉換為keras格式(*.h5),命名為 yolo_weights.h5存放至model_data路徑下。"""
!python convert.py -w yolov3.cfg yolov3.weights model_data/yolo_weights.h5
"""展開 my_train.py 程式碼"""
# Commented out IPython magic to ensure Python compatibility.
# %pycat my_train.py
"""my_train.py 原始碼,負責訓練模型參數。"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = '2007_train.txt' #待訓練清單(YOLO格式)
log_dir = 'logs/000/' #訓練過程及結果暫存路徑
classes_path = 'model_data/my_classes.txt' #自定義標籤檔路徑及名稱
anchors_path = 'model_data/yolo_anchors.txt' #錨點定義檔路徑及名稱
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw 預設輸入影像尺寸須為32的倍數(寬,高)
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5') #指定起始訓練權重檔路徑及名稱
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3) #訓練過程權重檔名稱由第幾輪加上損失率為名稱
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 24 #批次處理數量,依GPU記憶體大小決定
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50, #訓練遍歷次數
initial_epoch=0, #初始訓練遍歷次數
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5') #儲存臨時權重檔案名稱
# 解凍並繼續訓練以進行微調
# 如果效果不好則訓練更長時間
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 24 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100, #訓練遍歷次數
initial_epoch=50, #初始訓練遍歷次數
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5') #儲存最終權重檔
#model.save(log_dir + 'trained_model_final.h5') #儲存完整模型及權重檔
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
"""檢查是否順利完成訓練,產出 trained_weights_final.h5"""
!ls model_data/
"""展開 my_yolo.py 程式碼"""
# Commented out IPython magic to ensure Python compatibility.
# %pycat my_yolo.py
"""my_yolo.py 原始碼,負責最終影像推論。"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
class YOLO(object):
_defaults = {
"model_path": 'model_data/trained_weights_final.h5', #指定YOLO訓練完成權重檔路徑及名稱
"anchors_path": 'model_data/yolo_anchors.txt', #指定錨點定義檔路徑及名稱
"classes_path": 'model_data/my_classes.txt', #指定自定義標籤檔路徑及名稱
"score" : 0.1, #最低置信度門檻(0.01~0.99)
"iou" : 0.45, #重疊區比例(0.01~1.0)
"model_image_size" : (416, 416), #影像尺寸
"gpu_num" : 1, #使用GPU數量
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
if __name__ == '__main__':
t0 = timer()
yolo=YOLO() #進行YOLO初始化
path = 'VOC2007/JPEGImages/img_1550.jpg' #指定待測影像檔案路徑及名稱
try:
t1 = timer()
image = Image.open(path) #開啟待推論影像
except:
print('Open Error! Try again!')
else:
print('Start detect object.\n')
t2 = timer()
r_image = yolo.detect_image(image) #進行推論
t3 = timer()
r_image.show() #顯示有標示物件框的結果影像
print('Yolo inital: %f sec' %(t1-t0)) #計算及顯示YOLO初始化時間
print('Image load: %f sec' %(t2-t1)) #計算及顯示影像載入時間
print('Detect object: %f sec\n' %(t3-t2)) #計算偵測物件時間
yolo.close_session() #結束YOLO工作
|
[
"noreply@github.com"
] |
OmniXRI.noreply@github.com
|
562df90b7ca7b36aa10e10f2bf997cb3821343e7
|
717ec032c10a279cc82c37b25b9306f269e7bec3
|
/Python/csv/batch.py
|
c554af8272076ffce208ad0d301b3b2817c50066
|
[] |
no_license
|
lceric/study-workspace
|
a768d8e52e7d328052116931936cf38aa4dea285
|
b9764fd6f1e40e38174753ed363b46e15923cead
|
refs/heads/master
| 2021-09-19T13:10:36.558329
| 2018-07-28T02:34:18
| 2018-07-28T02:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'处理文件'
__author__ = '作者'
from random import sample
import openpyxl
from openpyxl.styles import Font, colors
def batchFormat(num):
for i in range(num):
fn = str(i)+'.xlsx'
wb = openpyxl.load_workbook(fn)
ws = wb.worksheets[0]
for irow, row in enumerate(ws.rows, start=1):
if irow == 1:
# 表头加粗、黑体
font = Font('黑体', bold=True)
elif irow%2 == 0:
# 偶数行红色,宋体
font = Font('宋体', color=colors.RED)
else:
print('奇数行')
# 奇数行浅蓝色,宋体
# font = Font('宋体', color='00CCFF')
for cell in row:
cell.font = font
# 偶数行添加背景填充色,从红到蓝渐变
if irow%2 == 0:
# cell.fill = openpyxl.styles.fills.GradientFill(stop=['FF0000', '0000FF'])
cell.font = Font('黑体', color=colors.BLUE)
cell.fill = openpyxl.styles.fills.GradientFill(stop=['FF0000', 'FF0000'])
# 另存为新文件
wb.save('new'+fn)
batchFormat(5)
|
[
"18709270892@163.com"
] |
18709270892@163.com
|
d5fda3ef3b7bc610f10be2f284f0781aeec860c5
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/era5_scripts/02_preprocessing/concat82/685-tideGauge.py
|
ce2afc2ae237798722733401bbef84cc243ac159
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465
| 2021-06-25T21:00:44
| 2021-06-25T21:00:44
| 229,080,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,482
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 13 10:02:00 2020
---------------------------------------------------------
This script concatenates yearly predictor files
Browses the predictor folders for the chosen TG
Concatenates the yearly csvs for the chosen predictor
Saves the concatenated csv in a separate directory
---------------------------------------------------------
@author: Michael Tadesse
"""
#%% import packages
import os
import pandas as pd
#%% define directories
home = '/lustre/fs0/home/mtadesse/erafive_localized'
out_path = '/lustre/fs0/home/mtadesse/eraFiveConcat'
#cd to the home dir to get TG information
os.chdir(home)
tg_list = os.listdir()
x = 685
y = 686
#looping through TGs
for t in range(x, y):
tg = tg_list[t]
print(tg)
#concatenate folder paths
os.chdir(os.path.join(home, tg))
#defining the folders for predictors
#choose only u, v, and slp
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp'),\
"wnd_u": os.path.join(where, 'wnd_u'),\
'wnd_v' : os.path.join(where, 'wnd_v')}
#%%looping through predictors
for pred in csv_path.keys():
os.chdir(os.path.join(home, tg))
# print(tg, ' ', pred, '\n')
#cd to the chosen predictor
os.chdir(pred)
#%%looping through the yearly csv files
count = 1
for yr in os.listdir():
print(pred, ' ', yr)
if count == 1:
dat = pd.read_csv(yr)
# print('original size is: {}'.format(dat.shape))
else:
#remove the header of the subsequent csvs before merging
# dat_yr = pd.read_csv(yr, header=None).iloc[1:,:]
dat_yr = pd.read_csv(yr)
dat_yr.shape
dat = pd.concat([dat, dat_yr], axis = 0)
# print('concatenated size is: {}'.format(dat.shape))
count+=1
print(dat.shape)
#saving concatenated predictor
#cd to the saving location
os.chdir(out_path)
#create/cd to the tg folder
try:
os.makedirs(tg)
os.chdir(tg) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg)
#save as csv
pred_name = '.'.join([pred, 'csv'])
dat.to_csv(pred_name)
|
[
"michaelg.tadesse@gmail.com"
] |
michaelg.tadesse@gmail.com
|
9e169c3b6e4c276cbbf5d9f5e129c8de5b740332
|
58d7ace148c853528fb0b8c0fdb71b74a3d6d7b6
|
/Python/Maximum69Number.py
|
c2f4142fe9f929339a42071eaccb2d8e8076295a
|
[] |
no_license
|
WinrichSy/LeetCode_Solutions
|
4200779aa201e3c8a0d7c977c0a5b911e434d803
|
149b70b8bd7d7eaa67e8345d3dfd659289b75573
|
refs/heads/master
| 2022-07-16T15:15:22.192519
| 2020-05-22T01:18:06
| 2020-05-22T01:18:06
| 261,314,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
#Maximum 69 Number
#https://leetcode.com/problems/maximum-69-number/
class Solution:
def maximum69Number (self, num: int) -> int:
str_num = str(num)
if str_num.count('6')==0:
return num
str_num = list(str_num)
str_num[str_num.index('6')]='9'
return int(''.join(str_num))
|
[
"winrichsy@gmail.com"
] |
winrichsy@gmail.com
|
6f68b50c77692e985515c7f3c64945c263e05d27
|
fea3593a6cc3c05a865d547e5b2966642157739d
|
/11/tridy7_Marcela.py
|
0e297e125d93c7ef4af8518fe73ba17274487638
|
[] |
no_license
|
benkeanna/pyladies
|
bd9e1f7d62ee1f40fcc14886703c8d7c9e8f8577
|
840cf3fea844dd32ab783b1bdcb91a45a2360492
|
refs/heads/master
| 2021-10-24T13:55:04.429666
| 2019-03-26T14:40:48
| 2019-03-26T14:40:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
import turtle
class KreslimObecne(turtle.Turtle):
POCET_HRAN = 0
UHEL_OTOCENI = 0
def neco_udelej(self):
for x in range(self.POCET_HRAN):
self.forward(50)
self.left(self.UHEL_OTOCENI)
class KreslimCtverec(KreslimObecne):
POCET_HRAN = 4
UHEL_OTOCENI = 90
class KreslimTroj(KreslimObecne):
POCET_HRAN = 3
UHEL_OTOCENI = 120
class KreslimMnohouhelnik(KreslimObecne):
POCET_HRAN = 18
UHEL_OTOCENI = 20
objekty = [KreslimCtverec(), KreslimTroj(), KreslimMnohouhelnik()]
for objekt in objekty:
objekt.neco_udelej()
turtle.exitonclick()
|
[
"haymannovaa@gmail.com"
] |
haymannovaa@gmail.com
|
bee735e385587ecb19e903b1d6a9a7d3e91d7bc4
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/2601.py
|
bf1fa19e1e73d54bfd4394cd3e108d3db0f1d5b0
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
from sys import stdin
def printAnswer(caseIndex, answer):
print("Case #", caseIndex+1, ": ", answer, sep='')
T = int(input())
for t in range(T):
(farmCost, farmExtraProd, winCost) = map(float, input().split())
currProd = 2
timeForWin = winCost / currProd
prevTimeForWin = timeForWin
accTime = 0
while timeForWin <= prevTimeForWin:
accTime += farmCost / currProd
currProd += farmExtraProd
prevTimeForWin = timeForWin
timeForWin = winCost / currProd + accTime
printAnswer(t, prevTimeForWin)
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
0b51ac3b2723630ba8481f98e3b89c7261cd6ea1
|
68367ae671b414f4fdf427005bfd5083f7ab055c
|
/embedding_no_cont.py
|
01cea2d28f37a2b672ed0e2e6638871ea08c75d6
|
[] |
no_license
|
Xinxinatg/embedding_RBP
|
6f0527f84b89e6b047f4299e9d417d7b38ec1d84
|
bd9a398047fe346f5815a8d51419f0c6d7f781a0
|
refs/heads/main
| 2023-08-25T18:08:15.943423
| 2021-10-25T02:44:53
| 2021-10-25T02:44:53
| 410,151,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,184
|
py
|
import argparse
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as Data
import torch.nn.utils.rnn as rnn_utils
import time
import pickle
import pandas as pd
from termcolor import colored
from sklearn.metrics import accuracy_score,balanced_accuracy_score,precision_recall_curve,auc,roc_auc_score
import os
# import tensorflow as tf
import numpy as np
from sklearn.metrics import accuracy_score,balanced_accuracy_score, matthews_corrcoef
import math
parser = argparse.ArgumentParser(description='embeddings_for_RBP_prediction')
parser.add_argument('--epoch', type=int, default=200, help='epoch number')
parser.add_argument('--model_dir', default='Model/', help='model directory')
parser.add_argument('--rep_dir', help='represention file directory')
parser.add_argument('--pro_label_dir', help='pro_label file directory')
parser.add_argument('--load_model_dir', default=None,help='trained model file directory')
parser.add_argument('--big_or_small_model',type=int,default=0, help='choose between big and small model,0 means big')
parser.add_argument('--learning_rate',type=float,default=0.0001, help='learning rate')
parser.add_argument('--batch_size',type=int,default=1024)
args = parser.parse_args()
rep_all_pd=pd.read_csv(args.rep_dir)
pro=pd.read_csv(args.pro_label_dir)
label=torch.tensor(pro['label'].values)
head,tail=os.path.split(args.pro_label_dir)
trP=tail.split('trP')[1].split('_')[0]
trN=tail.split('trN')[1].split('_')[0]
vaP=tail.split('VaP')[1].split('_')[0]
vaN=tail.split('VaN')[1].split('_')[0]
teP=tail.split('TeP')[1].split('_')[0]
teN=tail.split('TeN')[1].split('_')[0]
data=torch.tensor(rep_all_pd.values)
print(trP,trN,vaP,vaN,teP,teN)
# print(data.shape,label.shape)
print(label.shape,data.shape)
train_data,train_label=data[:int(trP)+int(trN)].double(),label[:int(trP)+int(trN)]
test_data,test_label=data[int(trP)+int(trN):-int(teP)-int(teN)].double(),label[int(trP)+int(trN):-int(teP)-int(teN)]
# LOSS_WEIGHT_POSITIVE = math.sqrt((int(trP)+int(trN)) / (2.0 * int(trP)) )
# LOSS_WEIGHT_NEGATIVE = math.sqrt((int(trP)+int(trN)) / (2.0 * int(trN)) )
LOSS_WEIGHT_POSITIVE = (int(trP)+int(trN)) / (2.0 * int(trP))
LOSS_WEIGHT_NEGATIVE = (int(trP)+int(trN)) / (2.0 * int(trN))
# https://towardsdatascience.com/deep-learning-with-weighted-cross-entropy-loss-on-imbalanced-tabular-data-using-fastai-fe1c009e184c
soft_max=nn.Softmax(dim=1)
# class_weights=torch.FloatTensor([w_0, w_1]).cuda()
weig=torch.FloatTensor([LOSS_WEIGHT_NEGATIVE,LOSS_WEIGHT_POSITIVE]).double().cuda()
# train_data,train_label=genData("./train_peptide.csv",260)
# test_data,test_label=genData("./test_peptide.csv",260)
train_dataset = Data.TensorDataset(train_data, train_label)
test_dataset = Data.TensorDataset(test_data, test_label)
batch_size=args.batch_size
train_iter = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_iter = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
Emb_dim=data.shape[1]
if not os.path.exists(args.model_dir):
os.mkdir(args.model_dir)
head1,tail1=os.path.split(args.pro_label_dir)
if args.load_model_dir ==None:
logits_output=os.path.join(args.model_dir,tail1.split('_')[0]+'_'+args.rep_dir.split('/')[-2] \
+str(args.big_or_small_model)+ '_logits.csv')
model_loc=os.path.join(args.model_dir,tail1.split('_')[0]+'_'+args.rep_dir.split('/')[-2] \
+str(args.big_or_small_model)+ '.pl')
else:
logits_output=os.path.join(args.model_dir,'fine_tune'+tail1.split('_')[0]+'_'+args.rep_dir.split('/')[-2] \
+str(args.big_or_small_model)+ '_logits.csv')
model_loc=os.path.join(args.model_dir,'fine_tune'+tail1.split('_')[0]+'_'+args.rep_dir.split('/')[-2] \
+str(args.big_or_small_model)+ '.pl')
class newModel1(nn.Module):
def __init__(self, vocab_size=26):
super().__init__()
self.hidden_dim = 256
self.batch_size = 256
self.emb_dim = Emb_dim
# self.embedding = nn.Embedding(vocab_size, self.emb_dim, padding_idx=0)
# self.encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=2)
# self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=1)
# self.gmlp_t=gMLP(num_tokens = 1000,dim = 32, depth = 2, seq_len = 40, act = nn.Tanh())
self.gru = nn.GRU(self.emb_dim, self.hidden_dim, num_layers=6,
bidirectional=True, dropout=0.05)
self.block1=nn.Sequential(nn.Linear(3584,1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(),
nn.Linear(1024,512),
nn.BatchNorm1d(512),
nn.LeakyReLU(),
nn.Linear(512,256),
)
self.block2=nn.Sequential(
nn.BatchNorm1d(256),
nn.LeakyReLU(),
nn.Linear(256,128),
nn.BatchNorm1d(128),
nn.LeakyReLU(),
nn.Linear(128,64),
nn.BatchNorm1d(64),
nn.LeakyReLU(),
nn.Linear(64,2)
)
def forward(self, x):
# x=self.embedding(x)
# output=self.transformer_encoder(x).permute(1, 0, 2)
# output=self.gmlp_t(x).permute(1, 0, 2)
x=x.view(1,x.shape[0],x.shape[1])
# output=self.gmlp_t(x).permute(1, 0, 2)
# print(output.shape)
output,hn=self.gru(x)
output=output.permute(1,0,2)
hn=hn.permute(1,0,2)
output=output.reshape(output.shape[0],-1)
hn=hn.reshape(output.shape[0],-1)
output=torch.cat([output,hn],1)
# print('output.shape',output.shape)
output=self.block1(output)
return self.block2(output)
class newModel2(nn.Module):
def __init__(self, vocab_size=26):
super().__init__()
self.hidden_dim = 48
self.batch_size = 256
self.emb_dim = Emb_dim
# self.embedding = nn.Embedding(vocab_size, self.emb_dim, padding_idx=0)
# self.encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=2)
# self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=1)
# self.gmlp_t=gMLP(num_tokens = 1000,dim = 32, depth = 2, seq_len = 40, act = nn.Tanh())
# self.gru = nn.GRU(self.emb_dim, self.hidden_dim, num_layers=4,
# bidirectional=True, dropout=0.2)
self.c1_1 = nn.Conv1d(32, 256, 1)
self.c1_2 = nn.Conv1d(32, 256, 3)
self.c1_3 = nn.Conv1d(32, 256, 5)
self.p1 = nn.MaxPool1d(3, stride=3)
self.c2 = nn.Conv1d(256, 128, 3)
self.p2 = nn.MaxPool1d(3, stride=3)
self.c3 = nn.Conv1d(128, 128, 3)
# self.p3 = nn.MaxPool1d(3, stride=1)
self.drop=nn.Dropout(p=0.01)
self.block2=nn.Sequential(
nn.Linear(896,512),
nn.BatchNorm1d(512),
nn.LeakyReLU(),
nn.Linear(512,64),
nn.BatchNorm1d(64),
nn.LeakyReLU(),
nn.Linear(64,2)
)
def forward(self, x):
# x=self.embedding(x)
# output=self.transformer_encoder(x).permute(1, 0, 2)
# output=self.gmlp_t(x).permute(1, 0, 2)
x=x.view(x.shape[0],32,32)
# x=x.transpose(1,2)
# output=self.gmlp_t(x).permute(1, 0, 2)
# print(output.shape)
c1_1=self.c1_1(x)
c1_2=self.c1_2(x)
c1_3=self.c1_3(x)
c=torch.cat((c1_1, c1_2, c1_3), -1)
# print(c1_1.shape,c1_2.shape,c1_3.shape,c.shape)
p = self.p1(c)
c=self.c2(p)
p=self.p2(c)
# print(p.shape)
c=self.c3(p)
# print(c.shape)
# p=self.p3(c)
# print(p.shape)
# print('output.shape',output.shape)
# print(c.shape)
c=c.view(c.shape[0],-1)
c=self.drop(c)
# print(c.shape)
return self.block2(c)
class ContrastiveLoss(torch.nn.Module):
def __init__(self, margin=2.5):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
# euclidean_distance: [128]
euclidean_distance = F.pairwise_distance(output1, output2)
# print(output1.shape,output2.shape,label.shape)
loss_contrastive = torch.mean((label) * torch.pow(euclidean_distance, 2) + # calmp夹断用法
(1-label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def collate(batch):
seq1_ls=[]
seq2_ls=[]
label1_ls=[]
label2_ls=[]
label_ls=[]
batch_size=len(batch)
for i in range(int(batch_size/2)):
seq1,label1=batch[i][0],batch[i][1]
seq2,label2=batch[i+int(batch_size/2)][0],batch[i+int(batch_size/2)][1]
label1_ls.append(label1.unsqueeze(0))
label2_ls.append(label2.unsqueeze(0))
label=(label1*label2)+(1-label1)*(1-label2)
# label=(label1^label2)
seq1_ls.append(seq1.unsqueeze(0))
seq2_ls.append(seq2.unsqueeze(0))
label_ls.append(label.unsqueeze(0))
seq1=torch.cat(seq1_ls).to(device)
seq2=torch.cat(seq2_ls).to(device)
label=torch.cat(label_ls).to(device)
label1=torch.cat(label1_ls).to(device)
label2=torch.cat(label2_ls).to(device)
return seq1,seq2,label,label1,label2
train_iter_cont = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True,collate_fn=collate)
device = torch.device("cuda",0)
def evaluate_accuracy(data_iter, net):
acc_sum, n = 0.0, 0
for x, y in data_iter:
x,y=x.to(device),y.to(device)
outputs=net(x)
acc_sum += (outputs.argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
def to_log(log):
with open("./modelLog.log","a+") as f:
f.write(log+'\n')
def main():
if args.big_or_small_model ==0:
net=newModel1().double().to(device)
else:
net=newModel2().double().to(device)
# state_dict=torch.load('/content/Model/pretrain.pl')
# net.load_state_dict(state_dict['model'])
if args.load_model_dir != None:
state_dict=torch.load(args.load_model_dir)
net.load_state_dict(state_dict['model'])
# lr = 0.0001
optimizer = torch.optim.Adam(net.parameters(), lr=args.learning_rate,weight_decay=5e-4)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,patience=5, factor=0.75,verbose=True)
# https://discuss.pytorch.org/t/reducelronplateau-not-doing-anything/24575/10
# criterion = ContrastiveLoss()
# criterion_model = nn.CrossEntropyLoss(reduction='sum')
criterion_model = nn.CrossEntropyLoss(weight=weig,reduction='mean')
best_bacc=0
best_aupr=0
EPOCH=args.epoch
CUDA_LAUNCH_BLOCKING=1
for epoch in range(EPOCH):
loss_ls=[]
t0=time.time()
net.train()
# for seq1,seq2,label,label1,label2 in train_iter_cont:
for seq,label in train_iter:
# print(seq1.shape,seq2.shape,label.shape,label1.shape,label2.shape)
seq,label=seq.to(device),label.to(device)
output=net(seq)
loss=criterion_model(output,label)
# print(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_ls.append(loss.item())
lr_scheduler.step(loss)
if epoch %100 ==0:
torch.save({
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict()
}, os.path.join('/content/Model', 'ckpt_{}.pl'.format(epoch)))
net.eval()
with torch.no_grad():
train_acc=evaluate_accuracy(train_iter,net)
# test_acc=evaluate_accuracy(test_iter,net)
test_data_gpu=test_data.to(device)
test_logits=net(test_data_gpu)
outcome=np.argmax(test_logits.detach().cpu(), axis=1)
test_bacc=balanced_accuracy_score(test_label, outcome)
precision, recall, thresholds = precision_recall_curve(test_label, soft_max(test_logits.cpu())[:,1])
test_aupr = auc(recall, precision)
results=f"epoch: {epoch+1}, loss: {np.mean(loss_ls):.5f}\n"
# results=f"epoch: {epoch+1}\n"
results+=f'\ttrain_acc: {train_acc:.4f}, test_aupr: {colored(test_aupr,"red")},test_bacc: {colored(test_bacc,"red")}, time: {time.time()-t0:.2f}'
print(results)
to_log(results)
if test_aupr>best_aupr:
best_aupr=test_aupr
torch.save({"best_aupr":best_aupr,"model":net.state_dict(),'args':args},model_loc)
print(f"best_aupr: {best_aupr}")
state_dict=torch.load(model_loc)
# state_dict=torch.load('/content/Model/pretrain.pl')
net.load_state_dict(state_dict['model'])
pro=pd.read_csv(args.pro_label_dir)
label=torch.tensor(pro['label'].values)
# final_test_data,final_test_label=data[9655+1068:].double(),label[9655+1068:]
# train_data,train_label=data[:6011].double(),label[:6011]
final_test_data,final_test_label=data[-int(teP)-int(teN):].double(),label[-int(teP)-int(teN):]
final_test_data=final_test_data.to(device)
net.eval()
with torch.no_grad():
logits=net(final_test_data)
# logits_output=os.path.split(rep_file)[1].replace('.csv','_logtis.csv')
logits_cpu=logits.cpu().detach().numpy()
logits_cpu_pd=pd.DataFrame(logits_cpu)
logits_cpu_pd.to_csv(logits_output,index=False)
outcome=np.argmax(logits.cpu().detach().numpy(), axis=1)
MCC= matthews_corrcoef(final_test_label, outcome)
acc = accuracy_score(final_test_label, outcome)
bacc=balanced_accuracy_score(final_test_label, outcome)
precision1, recall1, thresholds1 = precision_recall_curve(final_test_label, soft_max(torch.tensor(logits_cpu))[:,1])
final_test_aupr = auc(recall1, precision1)
final_auc_roc=roc_auc_score(final_test_label, soft_max(torch.tensor(logits_cpu))[:,1])
# final_test_aupr=0
print('bacc,MCC,final_test_aupr,final_auc_roc')
print(bacc,MCC,final_test_aupr,final_auc_roc)
if __name__ == '__main__':
CUDA_LAUNCH_BLOCKING=1
main()
|
[
"noreply@github.com"
] |
Xinxinatg.noreply@github.com
|
5de61b20f5b4440f894d7ad89f1565d568fde089
|
40dd244441a60370690ca0f8b1d9846812d89c05
|
/World 3/Lists/ex089 - Newsletter with List.py
|
5e58d31194d0bce378dd4f91357c6fd321699cf8
|
[
"MIT"
] |
permissive
|
MiguelChichorro/PythonExercises
|
6fe5aab37bc74e914b5742a9648c662ac64cdeb3
|
3b2726e7d9ef92c1eb6b977088692c42a2a7b86e
|
refs/heads/main
| 2023-07-18T04:42:38.879349
| 2021-09-01T16:01:48
| 2021-09-01T16:01:48
| 360,982,518
| 2
| 1
| null | 2021-09-01T15:40:01
| 2021-04-23T19:11:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
from time import sleep
colors = {"clean": "\033[m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"purple": "\033[35m",
"cian": "\033[36m"}
ans = 1
while ans == 1:
students = list()
ans2 = 'Y'
while ans2 == 'Y':
name = str(input("Enter a name:"))
n1 = int(input("Entear a mark: "))
n2 = int(input("Enter a mark: "))
avg = (n1 + n2) / 2
students.append([name, [n1, n2], avg])
ans2 = str(input("Want continue? [Y/N]")).upper()
print(f"{colors['blue']}Reading data...{colors['clean']}")
sleep(1)
print("-=" * 20)
print(f"{'NU.':<8}{'NAME':<20}{'AVG':<16}")
print("-" * 37)
for i, a in enumerate(students):
print(f"{i:<8}{a[0]:<25}{a[2]:<8.1f}")
print("-" * 37)
while True:
print(f"{colors['red']}if you want to stop just enter 999{colors['clean']}")
n = int(input("Enter a number:"))
if n == 999:
print(f"{colors['red']}Stoping...{colors['clean']}")
break
if n <= len(students) - 1:
print(f"{colors['green']}the {students[n][0]} marks are {students[n][1]}{colors['clean']}")
print(f"{colors['green']}AGAIN{colors['clean']}")
else:
print(f"{colors['red']}This student dosen´t exist{colors['clean']}")
print(f"{colors['red']}COMEBACK{colors['clean']}")
ans = int(input(f"{colors['cian']}\nPress [ 1 ] to do again or another number to leave: {colors['clean']}"))
if ans != 1:
print(f"{colors['green']}Have a good day!{colors['clean']}")
|
[
"51164882+MiguelChichorro@users.noreply.github.com"
] |
51164882+MiguelChichorro@users.noreply.github.com
|
908f8699daf9af3cbdaa929a7bb26e56fd664b84
|
d103b4299b8fd473c7848f40750bbd546a7250cc
|
/user_db/asgi.py
|
dd5c5ad7ee9fa9123c7cae0d2ba4a43c36c8a809
|
[] |
no_license
|
lpakule/HW_28_04_2021
|
1754c8247e5d1433a65e5d3f5a3738444bec8a91
|
d92d403325c11d79299f9e8de570381b1c9456e2
|
refs/heads/main
| 2023-04-22T02:26:53.798501
| 2021-05-10T18:15:06
| 2021-05-10T18:15:06
| 364,479,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
ASGI config for user_db project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'user_db.settings')
application = get_asgi_application()
|
[
"larisa.pakule@outlook.com"
] |
larisa.pakule@outlook.com
|
6c1efadbe1d1249e5cec851c3f1722a4c95adb78
|
feab8d31e5a41ef8793946d5bf906162c1d1dce8
|
/reservation/migrations/0003_alter_reservationmodel_unique_together.py
|
8244221f08693fd7cc014a6e035853fb4a785723
|
[] |
no_license
|
Mariusz798/warsztatDjango
|
1e2da8641440c51d7379ba40253900487a1ed18b
|
745560f394481b054917ca0c212100af6c3b3523
|
refs/heads/master
| 2023-04-15T09:11:18.167648
| 2021-04-17T16:54:38
| 2021-04-17T16:54:38
| 358,530,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
# Generated by Django 3.2 on 2021-04-17 15:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reservation', '0002_reservationmodel'),
]
operations = [
migrations.AlterUniqueTogether(
name='reservationmodel',
unique_together={('date', 'room_id')},
),
]
|
[
"mariusz042475@gmail.com"
] |
mariusz042475@gmail.com
|
1a5d071aaa0bdf028f1746c525c3c71b5b6c7ce3
|
5445eced339414f795bedcdf1f801be7c68fbcf1
|
/base/driver.py
|
1827f7db5fb8b12b1a2bb2d73beec94f0bc76d92
|
[] |
no_license
|
risengzr/aolai
|
5574ff5833e8e2f5a8c3353bc78bc7811abf9bfe
|
b8dfed89eb2b1fbb462b9dd40fce64a5cbc4bde2
|
refs/heads/master
| 2020-04-18T22:00:58.273524
| 2019-01-27T07:26:13
| 2019-01-27T07:26:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
from appium import webdriver
def init_driver():
desired_caps = {}
desired_caps['platformName'] = "Android"
desired_caps['platformVersion'] = "5.1"
desired_caps['deviceName'] = "192.168.56.101:5555"
desired_caps['appPackage'] = "com.yunmall.lc"
desired_caps['appActivity'] = "com.yunmall.ymctoc.ui.activity.MainActivity"
desired_caps['automationName'] = "Uiautomator2"
return webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
|
[
"13844911496@163.com"
] |
13844911496@163.com
|
b6f9b16a7681d673cefd4543cbb50a8e33138ed0
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2684/60720/286577.py
|
b3b51d52b25f409e0c9f8a385adb80eb382dfca4
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
size=int(input())
list0=[]
timen=[]
def findt(i,flag):
if flag==1:
return min(timen[i-1][0],timen[i-1][1])+list0[i]
if flag==0:
return timen[i-1][1]
for k in range(size):
timen=[]
n=int(input())
list0=input().split()
list0=[int(list0[i]) for i in range(n)]
timen.append([0,list0[0]])
lst=[]
for i in range(1,n):
lst=[]
lst.append(findt(i,0))
lst.append(findt(i,1))
timen.append(lst)
print(min(timen[-1][0],timen[-1][1]))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
a92f1ad232822e9e027286abc9da0dd12c1f3807
|
a857d1911a118b8aa62ffeaa8f154c8325cdc939
|
/toontown/ai/ToontownAIMsgTypes.py
|
d2325cc0185c40cb767e4a6c93c0385f73b14b58
|
[
"MIT"
] |
permissive
|
DioExtreme/TT-CL-Edition
|
761d3463c829ec51f6bd2818a28b667c670c44b6
|
6b85ca8352a57e11f89337e1c381754d45af02ea
|
refs/heads/main
| 2023-06-01T16:37:49.924935
| 2021-06-24T02:25:22
| 2021-06-24T02:25:22
| 379,310,849
| 0
| 0
|
MIT
| 2021-06-22T15:07:31
| 2021-06-22T15:07:30
| null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
from otp.ai.AIMsgTypes import *
TTAIMsgName2Id = {'DBSERVER_GET_ESTATE': 1040,
'DBSERVER_GET_ESTATE_RESP': 1041,
'PARTY_MANAGER_UD_TO_ALL_AI': 1042,
'IN_GAME_NEWS_MANAGER_UD_TO_ALL_AI': 1043,
'WHITELIST_MANAGER_UD_TO_ALL_AI': 1044}
TTAIMsgId2Names = invertDictLossless(TTAIMsgName2Id)
if config.GetBool('isclient-check', False):
if not isClient():
print 'EXECWARNING ToontownAIMsgTypes: %s' % TTAIMsgName2Id
printStack()
for name, value in TTAIMsgName2Id.items():
exec '%s = %s' % (name, value)
del name
del value
DBSERVER_PET_OBJECT_TYPE = 5
|
[
"devinhall4@gmail.com"
] |
devinhall4@gmail.com
|
a2ebd938e26e13e0ef5376e03fe25331606a304d
|
108f2c23503c82f36d2f16e4f66ead5684a97d8d
|
/honeyPot/fakeShell/linuxCommand/ls.py
|
b4e5c96b5b15586f9abfab6d89642654452d5733
|
[] |
no_license
|
icysun/honeyPotController
|
cc540b562bf2e219e8491b18451c9e8aea8c90ff
|
37bd04783b6381c31ee1af40e445cf30e6879ed5
|
refs/heads/main
| 2023-06-14T22:00:05.756017
| 2021-07-09T07:48:07
| 2021-07-09T07:48:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,255
|
py
|
# -*- coding:utf-8 -*-
# author: dzhhey
response = """Desktop\tDownloads\tMusicptmxtest.c\tPycharmProjects\tTemplatesVideos\tDocument\tkyber\tPictures\tPublic\tsnaptest"""
ls_al = """total 108
drwxr-xr-x 21 dzh dzh 4096 Jul 7 00:45 .
drwxr-xr-x 3 root root 4096 Apr 1 08:50 ..
-rw------- 1 dzh dzh 1583 Jul 7 00:02 .bash_history
-rw-r--r-- 1 dzh dzh 220 Apr 1 08:50 .bash_logout
-rw-r--r-- 1 dzh dzh 3771 Apr 1 08:50 .bashrc
drwxr-xr-x 13 dzh dzh 4096 Jul 6 21:31 .cache
drwx------ 12 dzh dzh 4096 Jul 6 21:31 .config
drwxr-xr-x 2 dzh dzh 4096 Apr 5 04:41 Desktop
drwxr-xr-x 2 dzh dzh 4096 Apr 5 04:41 Documents
drwxr-xr-x 2 dzh dzh 4096 Apr 5 04:41 Downloads
drwx------ 3 dzh dzh 4096 Jul 7 04:23 .gnupg
drwxrwxr-x 4 dzh dzh 4096 Jul 6 21:32 .java
drwxr-xr-x 5 root root 4096 Jun 8 22:00 kyber
drwxr-xr-x 3 dzh dzh 4096 Apr 5 04:41 .local
drwx------ 5 dzh dzh 4096 May 17 06:43 .mozilla
drwxr-xr-x 2 dzh dzh 4096 Apr 5 04:41 Music
drwxr-xr-x 2 dzh dzh 4096 Apr 5 04:41 Pictures
-rw-r--r-- 1 dzh dzh 807 Apr 1 08:50 .profile
-rw-r--r-- 1 root root 1279 Jul 7 00:45 ptmxtest.c
drwxr-xr-x 2 dzh dzh 4096 Apr 5 04:41 Public
drwxrwxr-x 3 dzh dzh 4096 Jul 6 21:34 PycharmProjects
drwxr-xr-x 4 dzh dzh 4096 Jul 6 21:31 snap
drwx------ 2 dzh dzh 4096 Jul 6 22:30 .ssh
-rw-r--r-- 1 dzh dzh 0 Apr 5 04:55 .sudo_as_admin_successful
drwxr-xr-x 2 dzh dzh 4096 Apr 5 04:41 Templates
drwxrwxr-x 6 dzh dzh 4096 Apr 19 00:06 test
drwxr-xr-x 2 dzh dzh 4096 Apr 5 04:41 Videos
-rw-rw-r-- 1 dzh dzh 169 Apr 5 04:58 .wget-hsts
"""
def parse(args_=None):
try:
if not args_:
with open("buffer", "w") as f:
f.write(response)
if len(args_) == 1:
if args_[0] == "-a":
with open("buffer", "w") as f:
f.write(response)
if args_[0] == "-al" or args_[0] == "-la":
with open("buffer", "w") as f:
f.write(ls_al)
else:
with open("buffer", "w") as f:
f.write("ls :command not found\r\n")
except Exception:
with open("buffer", "w") as f:
f.write("ls :command not found\r\n")
|
[
"974341189@qq.com"
] |
974341189@qq.com
|
45b5f39432925a3fc5cae9b876aa0db8c91e30a3
|
ff4137ddf4f0970fcd5ce4ed65f23a7b4ca37547
|
/new.py
|
0e53921377c501f0cf3d617131ae59b2266012db
|
[] |
no_license
|
clivejj/nasa_vs_spacex
|
b6696b120e2d6fe05dfa8c472199f8d01892984e
|
14185c2ec42d34405586d79025c746e6212dc53b
|
refs/heads/master
| 2021-08-11T12:37:16.648212
| 2017-11-13T18:06:16
| 2017-11-13T18:06:16
| 110,191,204
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,700
|
py
|
from flask import Flask, render_template, request
import urllib2
import xml.etree.ElementTree as ET
app=Flask(__name__)
@app.route('/')
def root():
return render_template("submit.html")
@app.route('/results')
def results():
track = request.args["track"]
xml = '''
http://production.shippingapis.com/ShippingAPI.dll?API=TrackV2&XML=<?xml version="1.0"
encoding="UTF-8" ?>
<TrackRequest USERID="074STUYV1630">
<TrackID ID="'''
xml += track + '''"></TrackID>''' + "</TrackRequest>"
u = urllib2.urlopen(xml).read()
'''u = \'''
<?xml version="1.0" encoding="UTF-8"?>
<TrackResponse><TrackInfo ID="9405509699937073048953"><TrackSummary>The item is currently in transit to the destination as of November 13, 2017 at 9:03 am. It is on its way to ZIP Code 10025.</TrackSummary><TrackDetail>In Transit to Destination, November 12, 2017, 9:08 am, On its way to ZIP Code 10025</TrackDetail><TrackDetail>Departed USPS Regional Facility, November 12, 2017, 7:03 am, DES MOINES IA DISTRIBUTION CENTER</TrackDetail><TrackDetail>Arrived at USPS Regional Origin Facility, November 11, 2017, 9:08 pm, DES MOINES IA DISTRIBUTION CENTER</TrackDetail><TrackDetail>Departed Post Office, November 11, 2017, 6:16 pm, AMES, IA 50010</TrackDetail><TrackDetail>USPS in possession of item, November 11, 2017, 4:38 pm, AMES, IA 50010</TrackDetail><TrackDetail>Shipping Label Created, USPS Awaiting Item, November 11, 2017, 8:32 am, AMES, IA 50014</TrackDetail></TrackInfo></TrackResponse>
\''''''
return "This has yet to be formatted" + u
#return render_template("results.html", track=request.args["track"])
if __name__ == '__main__':
app.debug = True
app.run()
|
[
"cjohnston1@stuy.edu"
] |
cjohnston1@stuy.edu
|
e904ee6a7216c0b2c3ac14bc79bce97f28142967
|
a674199c7d5edd80e198933bcfdd5f2b683b8801
|
/slackbot_settings.py
|
c5724030a3aa9a36eff1f10f8fe46efcb53d9a80
|
[] |
no_license
|
Dakurtz422/Slack-as-Remote-Desktop
|
8f1a8b6e08f21b99fd6fa64eea7de42762c609c2
|
b96be50d98c57cb8eff635c404a61ae95803aaa4
|
refs/heads/master
| 2020-08-02T20:36:01.796987
| 2019-09-28T13:05:32
| 2019-09-28T13:05:32
| 211,499,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
import os
# Get Slack API_TOKEN from enviroment (or hardcode here)
api_key = os.environ.get('BOT_API')
API_TOKEN = api_key
# Default message when Bot can't find an appropriate answer
DEFAULT_REPLY = "Excuse me"
# Name of a directory where we will store our Bot settings
PLUGINS = ['plugins']
|
[
"noreply@github.com"
] |
Dakurtz422.noreply@github.com
|
25cec13326e3c495322757e51ec6ff994f5c0630
|
573a66e4f4753cc0f145de8d60340b4dd6206607
|
/JS-CS-Detection-byExample/Dataset (ALERT 5 GB)/362764/shogun-2.0.0/shogun-2.0.0/examples/undocumented/python_modular/converter_stochasticproximityembedding_modular.py
|
7a8f0ad0d582efd44b62703f5ad89146c6ef8102
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
mkaouer/Code-Smells-Detection-in-JavaScript
|
3919ec0d445637a7f7c5f570c724082d42248e1b
|
7130351703e19347884f95ce6d6ab1fb4f5cfbff
|
refs/heads/master
| 2023-03-09T18:04:26.971934
| 2022-03-23T22:04:28
| 2022-03-23T22:04:28
| 73,915,037
| 8
| 3
| null | 2023-02-28T23:00:07
| 2016-11-16T11:47:44
| null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
#!/usr/bin/env python
from tools.load import LoadMatrix
lm = LoadMatrix()
data = lm.load_numbers('../data/fm_train_real.dat')
parameter_list = [[data, 12]]
def converter_stochasticproximityembedding_modular (data, k):
from shogun.Features import RealFeatures
from shogun.Converter import StochasticProximityEmbedding, SPE_GLOBAL, SPE_LOCAL
features = RealFeatures(data)
converter = StochasticProximityEmbedding()
converter.set_target_dim(1)
converter.set_nupdates(40)
# Embed with local strategy
converter.set_k(k)
converter.set_strategy(SPE_LOCAL)
converter.embed(features)
# Embed with global strategy
converter.set_strategy(SPE_GLOBAL)
converter.embed(features)
return features
if __name__=='__main__':
print('StochasticProximityEmbedding')
converter_stochasticproximityembedding_modular(*parameter_list[0])
|
[
"mmkaouer@umich.edu"
] |
mmkaouer@umich.edu
|
d50ea5fe9eaf251c47008c2e2963f6b0cc477a91
|
5f88483bd0ffc6f97e5ed20d6b79eb68f710142e
|
/Sean_the_sheep_of_the_dead_with_GUI.py
|
92e51f5bdc53ef3841ba7eec032174dfec22fd6c
|
[] |
no_license
|
NathanKhadaroo/Pythonintro
|
0053472d18c6ee6efa008331babc4724a04d8fba
|
4d59735a9f161f8de0cf91cd075888cdbbc8afd0
|
refs/heads/master
| 2020-07-27T02:46:14.834831
| 2019-10-24T09:46:59
| 2019-10-24T09:46:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,335
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 14:12:33 2019
@author: gynjkm
"""
#imports required packages
import tkinter as tk
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.animation
import agentframework_zombies
import csv
import random
#Creates a window which allows us to enter in parameters
fields = ('Number of Sheep', 'Number of Zombies', 'Number of Landmines', 'Number of Iterations', 'Neighborhood size', 'Explosion size')
def run():
animation = matplotlib.animation.FuncAnimation(fig, update(), interval=1, repeat=False, frames=num_of_iterations)
canvas.show()
def makeform(root, fields):
entries = {}
for field in fields:
row = tk.Frame(root)
lab = tk.Label(row, width=22, text=field+": ", anchor='w')
ent = tk.Entry(row)
ent.insert(0, "0")
row.pack(side=tk.TOP,
fill=tk.X,
padx=5,
pady=5)
lab.pack(side=tk.LEFT)
ent.pack(side=tk.RIGHT,
expand=tk.YES,
fill=tk.X)
entries[field] = ent
return entries
root = tk.Tk()
root.wm_title("Sheep Horror Model")
ents = makeform(root, fields)
b1 = tk.Button(root, text='Run the model!',command=(run))
b1.pack(side=tk.LEFT, padx=5, pady=5)
fig = plt.figure(figsize=(12, 12))
root.mainloop()
canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
#defines our arguments and creating the lists of sheep and zombiesheep
num_of_agents = int(entries['Number of Sheep'].get())
num_of_zombsheep = int(entries['Number of Zombies'].get())
num_of_landmines = int(entries['Number of Landmines'].get())
num_of_iterations = int(entries['Number of Iterations'].get())
neighbourhood = int(entries['Neighborhood size'].get())
blast_radius = int(entries['Explosion size'].get())
agents = []
zombsheep = []
holylandmines = []
#creates the environment from the csv file
environment = []
with open('in.txt', newline='') as f:
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader:
rowlist = []
for value in row:
rowlist.append(value)
environment.append(rowlist)
#Tests whether the environment has read in properly
"""
plt.imshow(environment)
plt.show()
"""
#Assign starting points to all our agents in their environment
for i in range (num_of_agents):
agents.append(agentframework_zombies.Agent(environment, agents))
for i in range (num_of_zombsheep):
zombsheep.append(agentframework_zombies.Zombiesheep(environment, zombsheep, agents))
for i in range (num_of_landmines):
holylandmines.append(agentframework_zombies.Holy_landmine_of_Antioch(environment, zombsheep))
'''
#Testing to see if our agents have acces to the locations of other agents
print("Our first sheep is at", agents[0].x, agents[0].y, ", some other sheep he knows are at:")
for i in range(10):
print(agents[0].agents[i].x, agents[0].agents[i].y)
'''
'''
#This makes the model run until the zombies have wiped out all ofthe sheep or
#the desired number of iterations has been reached.
'''
def update(frame_number):
fig.clear()
plt.imshow(environment)
plt.xlim(0, agents[0].environment_width)
plt.ylim(0, agents[0].environment_height)
plt.xlim(0, zombsheep[0].environment_width)
plt.ylim(0, zombsheep[0].environment_height)
if len(holylandmines) == 0:
pass
else:
plt.xlim(0, holylandmines[0].environment_width)
plt.ylim(0, holylandmines[0].environment_height)
#shuffles the order in which agents in a list move to avoid "first mover" advantages
random.shuffle(agents)
random.shuffle(zombsheep)
random.shuffle(holylandmines)
for agent in agents:
agent.move()
agent.eat()
agent.share_with_neighbours(neighbourhood)
for zombiesheep in zombsheep:
zombiesheep.move()
#creates a list of all sheep within "biting range"
target_agents = zombiesheep.bite(neighbourhood, agents, zombsheep)
for target in target_agents:
#adds a new zombie in place of the target's location
zombsheep.append(agentframework_zombies.Zombiesheep(environment, zombsheep, agents, [target.y, target.x]))
#kills the target
agents.remove(target)
#this is done in this order to avoid losing the coordinates of the target
if len(holylandmines) == 0:
pass
else:
for Holy_landmine_of_Antioch in holylandmines:
ded_zombies = Holy_landmine_of_Antioch.detonate(blast_radius, zombsheep)
if len(ded_zombies)> 0:
for ded_zombie in ded_zombies:
zombsheep.remove(ded_zombie)
holylandmines.remove(Holy_landmine_of_Antioch)
#plots our sheep in white and our zombies in red and our landmines in gold
for agent in agents:
plt.scatter(agent.x, agent.y, c="snow")
for zombiesheep in zombsheep:
plt.scatter(zombiesheep.x, zombiesheep.y, c="red")
if len(holylandmines) == 0:
pass
else:
for Holy_landmine_of_Antioch in holylandmines:
plt.scatter(Holy_landmine_of_Antioch.x, Holy_landmine_of_Antioch.y, c="gold")
print(frame_number)
#Prints an update on how the sheep vs zombie battle is going
print("There are", str(len(agents)), "sheep, ", str(len(zombsheep)), "zombie sheep, and", str(len(holylandmines)), "remaining.")
#prints a victory message for the zombies if they manage to convert all the sheep
if len(agents) == 0:
print("Braiiiiins! Zombies win!")
#prints a victory message for the sheep if they manage to survive until dawn or all zombies die
if int(frame_number) == int(num_of_iterations)-1:
print("Baaaahhhh! Sheep win!")
if len(zombsheep) == 0:
print("Baaaahhhh! Sheep win!")
#Showing our model in an animation
plt.ylim(0, 299)
plt.xlim(0, 299)
plt.imshow(environment)
#for i in range (num_of_agents):
# plt.scatter(agents[i].x,agents[i].y)
#animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=num_of_iterations)
#plt.show()
#ends
tk.mainloop()
|
[
"55386091+NathanKhadaroo@users.noreply.github.com"
] |
55386091+NathanKhadaroo@users.noreply.github.com
|
0493a224edaa31bec7f72ac3170b5d58a7ad04b6
|
4476c376d8e1304ef30f70813db4a04b8f57474b
|
/Outil Aero/Python27_J_TC/VLM.py
|
3f4ab7c532ca49f942443707db07dd089ecbe1ad
|
[] |
no_license
|
emmanuelbenard/QuentinProject
|
05a5d64e049abbec43a00110209527b069d4029c
|
9c488678b54bd74b02676bf110cecc28abecd7db
|
refs/heads/master
| 2022-01-12T08:00:43.729908
| 2018-04-08T17:06:27
| 2018-04-08T17:06:27
| 176,025,207
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176,412
|
py
|
# -*- coding: utf-8 -*-
import math as m
import numpy as np
import utilitaire as u
import Polar as p
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import Flow
def ICMatrix(ac,cla,flow):
""" Prediction of aerodynamic characteristics of the wing
Autor : Quentin borlon
Date : 5 mai 2017
Function that predicts the aerodynamic coefficients for a given wing.
Based on the wing geometry and the sectional 2D aerodynamic datas.
Function initially based, and notation preserved from :
2004 Mihai Pruna, Alberto Davila;
free software; under the terms of the GNU General Public License
INPUT:
clAlpha : vertical array with clAlphas(i) is the lift curve slope of the
panel from wing.y(i) to wing.y(i+1);
wing : a structral object with as fields:
b : span
chord : vertical array with the chord at the root (1) any
discontinuity of taper ratio (2:end-1) and at the tip (end);
flapsDiscY : vertical array with the spanwise coordinate of the flaps
discontinuity sections
afDiscY : vertical array with the spanwise coordinate of the
airfoil discontinuity sections
airfoil : a cell-array with each cell gives the airfoil naca number
representation, cell 1 correspond to first panel after root.
sweep : vertical array with wing.sweep(i) is the sweep angle of
the panel from wing.y(i) to wing.y(i+1) (rad)
dih : vertical array with wing.dih(i) is the dihedral angle of
the panel from wing.y(i) to wing.y(i+1) (rad)
twist : vertical array with wing.twist(i) is the twist angle of
the section at wing.y(i) (rad)
deltasFlaps : vertical array with wing.deltasFlaps(i) is the
flaps defection of the panel from wing.y(i) to wing.y(i+1)
(deg)
r : number of spanwise panel along the wing;
m : number of chordwise panel along the airfoil;
Mach : flight mach number
cFlaps_cLoc : vertical array with wing.cFlaps_cLocs(i) is the
local flaps to chord ratio
y : the spanwise location of (-b/2 -> b/2) the limits of the panels
discY : vertical array of the complete set of the spanwise location
airfoilIndex : vertical array with wing.airfoilIndex(i) is the index of
the airfoil (wing.airfoil) to use for the section at wing.y(i)
chordDistrib : vertical array with wing.chordDistrib(i) is the chord length of
the section at wing.y(i)
OUTPUT:
A : the influence coefficient matrix [n x n] such that A*{GAMMA/2} + {Q}*{normal} = 0
normal : a [3 x (wing.getR()/2+1)] matrix that provides the normal downward
of the panel."""
prop = ac.prop;
wing = ac.wing;
cf = wing.getCF();
# Generate grid coordinates
# Generate collocation points and normal : where tangency condition is
# satisfied. Distance from bound vortex depends on the sectional lift
# curve slope : (dist/localChord) = clAlphas/(4*pi), clAlphas assumed to be 2 *pi
if prop.bool and cf != 0.:
return getGridF_Engines(flow,ac,cla);
elif prop.bool:
return getGrid_Engines(flow,ac,cla);
elif cf !=0.:
return getGridF_NOEngines(flow,ac,cla);
else:
return getGrid_NOEngines(flow,ac,cla);
def getGrid_NOEngines(flow,ac,cla):
# flow sideslip and aoa angles
beta = - flow.getBeta()*m.pi/180.;
aoa = m.pi * (flow.getAMax()+flow.getAMin())/180.;
# Main lifting surfaces
wing = ac.wing;
htail = ac.htail;
# Numerical parameters for discretization
mC = wing.mC; # chordwise discretisation number of control point for the chord
mW = max([8,int(3.*flow.V0/wing.getMac())]); # discretisation of the wake, get correct direction of it behind parts
n = wing.getR()+htail.getR(); # spanwise discretisation number of panel
# Recover the wing parameters
# Panels' coordinates and main parameters (at c/4)
xp = wing.getXP();
yp = wing.getYP();
zp = wing.getZP();
cP = wing.getChord();
tw = wing.getTwist();
dih = wing.getDih();
sw = wing.getSweepC4();
# Panel bordes' coordinate and main parameters (at c/4)
x = wing.getX();
y = wing.getY();
z = wing.getZ();
c = wing.getChordDist();
twSec = wing.twSec;
xW = np.unique(np.concatenate([0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.),[0.25]]));
mC = len(xW);
iC4W = np.where(xW == 0.25)[0][0];
zW = np.zeros([mC,len(wing.getAF())],dtype = float);
for ii in range(len(wing.getAF())):
zW[:,ii]= camber(wing.getAF(ii),xW);
if htail.bool:
# Panel bordes' coordinate and main parameters (at c/4)
x = np.concatenate([x,htail.getX()]);
y = np.concatenate([y,htail.getY()]);
z = np.concatenate([z,htail.getZ()]);
c = np.concatenate([c,htail.getChordDist()]);
twSec = np.concatenate([wing.twSec,htail.twSec]);
# Panels' coordinates and main parameters (at c/4)
xp = np.concatenate([xp,htail.getXP()]);
yp = np.concatenate([yp,htail.getYP()]);
zp = np.concatenate([zp,htail.getZP()]);
cP = np.concatenate([cP,htail.getChord()]);
tw = np.concatenate([tw,htail.getTwist()]);
dih = np.concatenate([dih,htail.getDih()]);
sw = np.concatenate([sw,htail.getSweepC4()]);
# Elevator, Assumed to be as plain flaps
cfT = htail.getCF();
if cfT != 0:
xT = np.unique(np.concatenate([np.linspace(1.,1.-cfT,2),(1.-cfT)*0.5*(np.cos(np.linspace(m.pi,0.,mC-1))+1.)]));
xT[abs((xT-0.25)) == np.min(abs(xT-0.25))] = 0.25;
else:
xT = 0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.);
xT[abs((xT-0.25)) == np.min(abs(xT-0.25))] = 0.25;
iC4T = np.where(xT == 0.25)[0][0];
zT = np.zeros([mC,len(htail.getAF())],dtype = float);
for ii in range(len(htail.getAF())):
zT[:,ii-1]= camber(htail.getAF(ii),xT);
X = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
Y = np.zeros(n * (2 * (mC + mW)+1),dtype = float); # initialization
Z = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
COLOCX=np.zeros((mC-1)*n);
COLOCY=np.zeros((mC-1)*n);
COLOCZ=np.zeros((mC-1)*n);
normal = np.zeros([3,(mC-1)*n]);
coef = 0.25+cla*0.25/m.pi;
ds = np.zeros((mC-1)*n); # vector of area of any panel
dS = np.zeros(n); # vector of area of a spanwise section
xvl = np.zeros(mC + mW,dtype = float);
yvl = np.zeros(mC + mW,dtype = float);
zvl = np.zeros(mC + mW,dtype = float);
xvr = np.zeros(mC + mW,dtype = float);
yvr = np.zeros(mC + mW,dtype = float);
zvr = np.zeros(mC + mW,dtype = float);
dzdx = np.zeros(mW-1,dtype = float);
dydx = np.zeros(mW-1,dtype = float);
for i in range(wing.getR()):
camb = zW[:,wing.getAFI(i)]
il = i;
cl = c[il];
twl = twSec[il];
xl = (xW - 0.25) * cl + x[il];
yl = y[il] * np.ones(mC);
zl = camb * cl + z[il];
center = np.array([xl[iC4W],yl[iC4W],zl[iC4W]]);
alpha = 180./m.pi*twl;
Rot = u.roty(alpha);
for ii in range(mC):
point = np.array([xl[ii],yl[ii],zl[ii]])-center;
point = np.dot(Rot,point) + center;
xl[ii] = point[0];
yl[ii] = point[1];
zl[ii] = point[2];
xvl[:mC-1] = 0.75 * xl[:-1] + 0.25 * xl[1:];
yvl[:mC-1] = 0.75 * yl[:-1] + 0.25 * yl[1:];
zvl[:mC-1] = 0.75 * zl[:-1] + 0.25 * zl[1:];
xvl[mC-1] = xvl[mC-2] + (xl[-1]-xl[-2]);
yvl[mC-1] = yvl[mC-2] + (yl[-1]-yl[-2]);
zvl[mC-1] = zvl[mC-2] + (zl[-1]-zl[-2]);
# End of chord vortex = begining of wake vortex
xvl[mC:-1] = xvl[mC-1] + 2.5 * cl * (1.+np.array(range(mW-1),dtype = float))/mW;
xvl[-1] = 10. * wing.b;
dzdxl = (zl[mC-1]-zl[mC-2])/(xl[mC-1]-xl[mC-2]);
dydx = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
dzdx = dzdxl * np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
for ii in range(mW-1):
zvl[mC+ii] = zvl[mC+(ii-1)] + dzdx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
yvl[mC+ii] = yvl[mC+(ii-1)] + dydx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
zvl[-1] = zvl[-2] + m.tan(aoa) * (xvl[-1] - xvl[-2]);
yvl[-1] = yvl[-2] + m.tan(beta) * (xvl[-1] - xvl[-2]);
ir = i+1;
cr = c[ir];
twr = twSec[ir];
xr = (xW - 0.25) * cr + x[ir];
yr = y[ir] * np.ones(mC);
zr = camb * cr + z[ir];
center = np.array([xr[iC4W],yr[iC4W],zr[iC4W]]);
alpha = 180./m.pi*twr;
Rot = u.roty(alpha);
for ii in range(0,mC):
point = np.array([xr[ii],yr[ii],zr[ii]])-center;
point = np.dot(Rot,point) + center;
xr[ii] = point[0];
yr[ii] = point[1];
zr[ii] = point[2];
xvr[:mC-1] = 0.75 * xr[:-1] + 0.25 * xr[1:];
yvr[:mC-1] = 0.75 * yr[:-1] + 0.25 * yr[1:];
zvr[:mC-1] = 0.75 * zr[:-1] + 0.25 * zr[1:];
xvr[mC-1] = xvr[mC-2] + (xr[-1]-xr[-2]);
yvr[mC-1] = yvr[mC-2] + (yr[-1]-yr[-2]);
zvr[mC-1] = zvr[mC-2] + (zr[-1]-zr[-2]);
# End of chord vortex = begining of wake vortex
xvr[mC:-1] = xvr[mC-1] + 2.5 * cr * (1.+np.array(range(mW-1),dtype = float))/mW;
xvr[-1] = 10. * wing.b;
dzdxr = (zr[mC-1]-zr[mC-2])/(xr[mC-1]-xr[mC-2]);
dydx = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])));
dzdx = dzdxr * np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])));
for ii in range(mW-1):
zvr[mC+ii] = zvr[mC+(ii-1)] + dzdx[ii] * (xvr[mC+ii] - xvr[mC+(ii-1)]);
yvr[mC+ii] = yvr[mC+(ii-1)] + dydx[ii] * (xvr[mC+ii] - xvr[mC+(ii-1)]);
zvr[-1] = zvr[-2] + m.tan(aoa) * (xvr[-1] - xvr[-2]);
yvr[-1] = yvr[-2] + m.tan(beta) * (xvr[-1] - xvr[-2]);
setTable(X,2*(mC+mW)+1,i,np.concatenate([[xvl[0]],xvr,xvl[::-1]]));
setTable(Y,2*(mC+mW)+1,i,np.concatenate([[yvl[0]],yvr,yvl[::-1]]));
setTable(Z,2*(mC+mW)+1,i,np.concatenate([[zvl[0]],zvr,zvl[::-1]]));
for j in range(mC-1):
val = [xvl[j],xvr[j],0.5* (xl[j] + xr[j]), 0.5* (xl[j+1] + xr[j+1])];
COLOCX[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvl[j],yvr[j],0.5* (yl[j] + yr[j]), 0.5* (yl[j+1] + yr[j+1])];
COLOCY[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvl[j],zvr[j],0.5* (zl[j] + zr[j]), 0.5* (zl[j+1] + zr[j+1])];
COLOCZ[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[i * (mC-1) + j] = cpmag;
normal[:, i * (mC-1) + j] = cp/cpmag;
dS[i] = sum(ds[i * (mC-1):(i+1) * (mC-1)]);
for i in range(wing.getR(),wing.getR()+htail.getR()):
iPT = i-wing.getR();
camb = zT[:,htail.getAFI(iPT)]
il = i+1;
cl = c[il];
twl = twSec[il];
xl = (xT - 0.25) * cl + x[il];
yl = y[il] * np.ones(mC);
zl = camb * cl + z[il];
center = np.array([xl[iC4T],yl[iC4T],zl[iC4T]]);
alpha = 180./m.pi*twl;
Rot = u.roty(alpha);
for ii in range(mC):
point = np.array([xl[ii],yl[ii],zl[ii]])-center;
point = np.dot(Rot,point) + center;
xl[ii] = point[0];
yl[ii] = point[1];
zl[ii] = point[2];
if htail.getDF(iPT) != 0.:
delta = htail.getDF(iPT);
RotF = u.roty(delta);
center = np.array([xl[-2],yl[-2],zl[-2]]);
point = np.array([xl[-1],yl[-1],zl[-1]])-center;
point = np.dot(RotF,point) + center;
xl[-1] = point[0];
yl[-1] = point[1];
zl[-1] = point[2];
xvl[:mC-1] = 0.75 * xl[:-1] + 0.25 * xl[1:];
yvl[:mC-1] = 0.75 * yl[:-1] + 0.25 * yl[1:];
zvl[:mC-1] = 0.75 * zl[:-1] + 0.25 * zl[1:];
xvl[mC-1] = xvl[mC-2] + (xl[-1]-xl[-2]);
yvl[mC-1] = yvl[mC-2] + (yl[-1]-yl[-2]);
zvl[mC-1] = zvl[mC-2] + (zl[-1]-zl[-2]);
# End of chord vortex = begining of wake vortex
xvl[mC:-1] = xvl[mC-1] + 2.5 * cl * (1.+np.array(range(mW-1),dtype = float))/mW;
xvl[-1] = 10. * wing.b;
dzdxl = (zl[mC-1]-zl[mC-2])/(xl[mC-1]-xl[mC-2]);
dydx = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
dzdx = dzdxl * np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
for ii in range(mW-1):
zvl[mC+ii] = zvl[mC+(ii-1)] + dzdx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
yvl[mC+ii] = yvl[mC+(ii-1)] + dydx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
zvl[-1] = zvl[-2] + m.tan(aoa) * (xvl[-1] - xvl[-2]);
yvl[-1] = yvl[-2] + m.tan(beta) * (xvl[-1] - xvl[-2]);
ir = i+2;
cr = c[ir];
twr = twSec[ir];
xr = (xT - 0.25) * cr + x[ir];
yr = y[ir] * np.ones(mC);
zr = camb * cr + z[ir];
center = np.array([xr[iC4T],yr[iC4T],zr[iC4T]]);
alpha = 180./m.pi*twr;
Rot = u.roty(alpha);
for ii in range(0,mC):
point = np.array([xr[ii],yr[ii],zr[ii]])-center;
point = np.dot(Rot,point) + center;
xr[ii] = point[0];
yr[ii] = point[1];
zr[ii] = point[2];
if htail.getDF(iPT) != 0.:
delta = htail.getDF(iPT);
RotF = u.roty(delta);
center = np.array([xr[-2],yr[-2],zr[-2]]);
point = np.array([xr[-1],yr[-1],zr[-1]])-center;
point = np.dot(RotF,point) + center;
xr[-1] = point[0];
yr[-1] = point[1];
zr[-1] = point[2];
xvr[:mC-1] = 0.75 * xr[:-1] + 0.25 * xr[1:];
yvr[:mC-1] = 0.75 * yr[:-1] + 0.25 * yr[1:];
zvr[:mC-1] = 0.75 * zr[:-1] + 0.25 * zr[1:];
xvr[mC-1] = xvr[mC-2] + (xr[-1]-xr[-2]);
yvr[mC-1] = yvr[mC-2] + (yr[-1]-yr[-2]);
zvr[mC-1] = zvr[mC-2] + (zr[-1]-zr[-2]);
# End of chord vortex = begining of wake vortex
xvr[mC:-1] = xvr[mC-1] + 2.5 * cr * (1.+np.array(range(mW-1),dtype = float))/mW;
xvr[-1] = 10. * wing.b;
dzdxr = (zr[mC-1]-zr[mC-2])/(xr[mC-1]-xr[mC-2]);
dydx = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])));
dzdx = dzdxr * np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])));
for ii in range(mW-1):
zvr[mC+ii] = zvr[mC+(ii-1)] + dzdx[ii] * (xvr[mC+ii] - xvr[mC+(ii-1)]);
yvr[mC+ii] = yvr[mC+(ii-1)] + dydx[ii] * (xvr[mC+ii] - xvr[mC+(ii-1)]);
zvr[-1] = zvr[-2] + m.tan(aoa) * (xvr[-1] - xvr[-2]);
yvr[-1] = yvr[-2] + m.tan(beta) * (xvr[-1] - xvr[-2]);
setTable(X,2*(mC+mW)+1,i,np.concatenate([[xvl[0]],xvr,xvl[::-1]]));
setTable(Y,2*(mC+mW)+1,i,np.concatenate([[yvl[0]],yvr,yvl[::-1]]));
setTable(Z,2*(mC+mW)+1,i,np.concatenate([[zvl[0]],zvr,zvl[::-1]]));
for j in range(mC-1):
val = [xvl[j],xvr[j],0.5* (xl[j] + xr[j]), 0.5* (xl[j+1] + xr[j+1])];
COLOCX[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvl[j],yvr[j],0.5* (yl[j] + yr[j]), 0.5* (yl[j+1] + yr[j+1])];
COLOCY[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvl[j],zvr[j],0.5* (zl[j] + zr[j]), 0.5* (zl[j+1] + zr[j+1])];
COLOCZ[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[i * (mC-1) + j] = cpmag;
normal[:, i * (mC-1) + j] = cp/cpmag;
dS[i] = sum(ds[i * (mC-1):(i+1) * (mC-1)]);
select = np.zeros([n,n * (mC-1)]); # rechercher intensité du dernier vortex uniquement
select2 = np.zeros([n * (mC-1),n]); # pour chaque paneau sur même section y, même velocity triangle
select3 = np.zeros([n + len(ac.prop.D),n * (mC-1) + len(ac.prop.D)]); #
for i in range(n):
select[i,(mC-2) + (mC-1)*i] = 1.;
select2[(mC-1)*i:(mC-1)*(i+1),i] = 1.;
select3[i,(mC-1)*i:(mC-1)*(i+1)] = ds[(mC-1)*i:(mC-1)*(i+1)]/dS[i];
if ac.prop.bool:
select3[-len(ac.prop.D):,-len(ac.prop.D):] = np.eye(len(ac.prop.D));
Ao,Vxo,Vyo,Vzo = ICM(X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac,n,mC,mW);
invA = np.linalg.inv(Ao);
A = invA;
Vx = np.dot(select3,Vxo);
Vy = np.dot(select3,Vyo);
Vz = np.dot(select3,Vzo);
return A,normal,Vx,Vy,Vz,select,select2;
def getGrid_Engines(flow,ac,cla):
# flow sideslip and aoa angles
beta = - flow.getBeta()*m.pi/180.;
aoa = m.pi * (flow.getAMax()+flow.getAMin())/180.;
# Main lifting surfaces
wing = ac.wing;
htail = ac.htail;
prop = ac.prop;
rho0=1.225; #masse volumique à niveau de la mer [kg/m^3]
dT=-6.5; #gradiente de temperature dans la troposphere [K/km]
T0=288.15; #Temperature à niveau de la mer [K]
g=9.80665; #gravité [m/s^2]
Rair=287.1; #Constante de l'air [m^2/(s^2*K)]
h = flow.getH(); # flight altitude [km]
V0 = flow.getV0(); # freestream velocity [m/s]
rho = rho0 * (1. + dT*h/T0)**(- g/(Rair*dT*10**(-3)) - 1.); # air density
Sh = m.pi * prop.getD()**2 *0.25;#Surface disque actuator [m^2]
nbE = len(prop.getD());
OWU = prop.getYp()/np.abs(prop.getYp());
for ii in range(nbE):
if not(prop.OWU[ii]):
OWU *= -1.;
# Numerical parameters for discretization
tF = 2.; # temps caractéristique de convection des vortex
nbEch = 1.; # Nombre minimal de points de controle par rotation des lignes de courant/tourbillons
mW = int(tF*nbEch/(2*m.pi)*max(prop.getOmega())); # discretisation of the wake, get correct direction of it behind parts
times = np.linspace(0.,tF,mW);
mC = wing.mC; # chordwise discretisation number of control point for the chord
n = wing.getR()+htail.getR(); # spanwise discretisation number of panel
# Recover the wing parameters
# Panels' coordinates and main parameters (at c/4)
xp = wing.getXP();
yp = wing.getYP();
zp = wing.getZP();
cP = wing.getChord();
tw = wing.getTwist();
dih = wing.getDih();
sw = wing.getSweepC4();
# Panel bordes' coordinate and main parameters (at c/4)
x = wing.getX();
y = wing.getY();
z = wing.getZ();
c = wing.getChordDist();
twSec = wing.twSec;
# Flaps
xW = np.unique(np.concatenate([0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.),[0.25]]));
mC = len(xW);
iC4W = np.where(xW == 0.25)[0][0];
zW = np.zeros([mC,len(wing.getAF())],dtype = float);
for ii in range(len(wing.getAF())):
zW[:,ii]= camber(wing.getAF(ii),xW);
if htail.bool:
# Panel bordes' coordinate and main parameters (at c/4)
x = np.concatenate([x,htail.getX()]);
y = np.concatenate([y,htail.getY()]);
z = np.concatenate([z,htail.getZ()]);
c = np.concatenate([c,htail.getChordDist()]);
twSec = np.concatenate([wing.twSec,htail.twSec]);
# Panels' coordinates and main parameters (at c/4)
xp = np.concatenate([xp,htail.getXP()]);
yp = np.concatenate([yp,htail.getYP()]);
zp = np.concatenate([zp,htail.getZP()]);
cP = np.concatenate([cP,htail.getChord()]);
tw = np.concatenate([tw,htail.getTwist()]);
dih = np.concatenate([dih,htail.getDih()]);
sw = np.concatenate([sw,htail.getSweepC4()]);
# Elevator, Assumed to be as plain flaps
cfT = htail.getCF();
if cfT != 0:
xT = np.unique(np.concatenate([np.linspace(1.,1.-cfT,2),(1.-cfT)*0.5*(np.cos(np.linspace(m.pi,0.,mC-1))+1.)]));
xT[abs((xT-0.25)) == np.min(abs(xT-0.25))] = 0.25;
else:
xT = 0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.);
xT[abs((xT-0.25)) == np.min(abs(xT-0.25))] = 0.25;
iC4T = np.where(xT == 0.25)[0][0];
zT = np.zeros([mC,len(htail.getAF())],dtype = float);
for ii in range(len(htail.getAF())):
zT[:,ii-1]= camber(htail.getAF(ii),xT);
X = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
Y = np.zeros(n * (2 * (mC + mW)+1),dtype = float); # initialization
Z = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
COLOCX=np.zeros((mC-1)*n);
COLOCY=np.zeros((mC-1)*n);
COLOCZ=np.zeros((mC-1)*n);
normal = np.zeros([3,(mC-1)*n]);
coef = 0.25+cla*0.25/m.pi;
ds = np.zeros((mC-1)*n); # vector of area of any panel
dS = np.zeros(n); # vector of area of a spanwise section
xvl = np.zeros(mC + mW,dtype = float);
yvl = np.zeros(mC + mW,dtype = float);
zvl = np.zeros(mC + mW,dtype = float);
xvr = np.zeros(mC + mW,dtype = float);
yvr = np.zeros(mC + mW,dtype = float);
zvr = np.zeros(mC + mW,dtype = float);
dzdx = np.zeros(mW-1,dtype = float);
dydx = np.zeros(mW-1,dtype = float);
for i in range(wing.getR()):
camb = zW[:,wing.getAFI(i)]
il = i;
cl = c[il];
twl = twSec[il];
xl = (xW - 0.25) * cl + x[il];
yl = y[il] * np.ones(mC);
zl = camb * cl + z[il];
center = np.array([xl[iC4W],yl[iC4W],zl[iC4W]]);
alpha = 180./m.pi*twl;
Rot = u.roty(alpha);
for ii in range(mC):
point = np.array([xl[ii],yl[ii],zl[ii]])-center;
point = np.dot(Rot,point) + center;
xl[ii] = point[0];
yl[ii] = point[1];
zl[ii] = point[2];
xvl[:mC-1] = 0.75 * xl[:-1] + 0.25 * xl[1:];
yvl[:mC-1] = 0.75 * yl[:-1] + 0.25 * yl[1:];
zvl[:mC-1] = 0.75 * zl[:-1] + 0.25 * zl[1:];
xvl[mC-1] = xvl[mC-2] + (xl[-1]-xl[-2]);
yvl[mC-1:] = yvl[mC-2] + (yl[-1]-yl[-2]); # initial guess : stay straight at the end of the wing
zvl[mC-1:] = zvl[mC-2] + (zl[-1]-zl[-2]);
# End of chord vortex = begining of wake vortex
# introduced effect of prop
# attention : prendre en compte le fait que certaines ldc sont sous l'influence de 2 moteurs!
centerPropY = prop.getYp() + (xvl[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvl[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvl[mC-1] - centerPropY[j])**2 + (zvl[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvl[mC-1] - centerPropZ[j],yvl[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvl[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvl[mC-1] ;
yvl[mC:-1] += dY;
zvl[mC:-1] += dZ;
xvl[mC-1:-1] = xvl[mC-1] + times * (V0+vix);
xvl[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvl >= xvl[mC-1] + 2.5 * cl)[0][1];
dzdxl = (zl[mC-1]-zl[mC-2])/(xl[mC-1]-xl[mC-2]);
# Vérifie ça!
dydx = V0/(V0+vix) *m.tan(beta) * (1.-np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])));
dzdx = V0/(V0+vix) * dzdxl * np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])) \
+ V0/(V0+vix) * m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvl[mC-1+ii] - xvl[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvl[mC-1+ii] - xvl[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvl[indiceFinLocalEffectCamber:] - xvl[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvl[indiceFinLocalEffectCamber:] - xvl[indiceFinLocalEffectCamber-1]);
yvl[mC-1:] += dY;
zvl[mC-1:] += dZ;
ir = i+1;
cr = c[ir];
twr = twSec[ir];
xr = (xW - 0.25) * cr + x[ir];
yr = y[ir] * np.ones(mC);
zr = camb * cr + z[ir];
center = np.array([xr[iC4W],yr[iC4W],zr[iC4W]]);
alpha = 180./m.pi*twr;
Rot = u.roty(alpha);
for ii in range(0,mC):
point = np.array([xr[ii],yr[ii],zr[ii]])-center;
point = np.dot(Rot,point) + center;
xr[ii] = point[0];
yr[ii] = point[1];
zr[ii] = point[2];
xvr[:mC-1] = 0.75 * xr[:-1] + 0.25 * xr[1:];
yvr[:mC-1] = 0.75 * yr[:-1] + 0.25 * yr[1:];
zvr[:mC-1] = 0.75 * zr[:-1] + 0.25 * zr[1:];
xvr[mC-1] = xvr[mC-2] + (xr[-1]-xr[-2]);
yvr[mC-1:] = yvr[mC-2] + (yr[-1]-yr[-2]);
zvr[mC-1:] = zvr[mC-2] + (zr[-1]-zr[-2]);
# End of chord vortex = begining of wake vortex
centerPropY = prop.getYp() + (xvr[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvr[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvr[mC-1] - centerPropY[j])**2 + (zvr[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvr[mC-1] - centerPropZ[j],yvr[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvr[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvr[mC-1] ;
yvr[mC:-1] += dY;
zvr[mC:-1] += dZ;
xvr[mC-1:-1] = xvr[mC-1] + times * (V0+vix);
xvr[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvr >= xvr[mC-1] + 2.5 * cr)[0][1];
dzdxr = (zr[mC-1]-zr[mC-2])/(xr[mC-1]-xr[mC-2]);
# Vérifie ça!
dydx = V0/(V0+vix) * m.tan(beta) * (1.-np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])));
dzdx = V0/(V0+vix) * dzdxr * np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])) \
+ V0/(V0+vix) * m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvr[mC-1+ii] - xvr[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvr[mC-1+ii] - xvr[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvr[indiceFinLocalEffectCamber:] - xvr[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvr[indiceFinLocalEffectCamber:] - xvr[indiceFinLocalEffectCamber-1]);
yvr[mC-1:] += dY;
zvr[mC-1:] += dZ;
setTable(X,2*(mC+mW)+1,i,np.concatenate([[xvl[0]],xvr,xvl[::-1]]));
setTable(Y,2*(mC+mW)+1,i,np.concatenate([[yvl[0]],yvr,yvl[::-1]]));
setTable(Z,2*(mC+mW)+1,i,np.concatenate([[zvl[0]],zvr,zvl[::-1]]));
for j in range(mC-1):
val = [xvl[j],xvr[j],0.5* (xl[j] + xr[j]), 0.5* (xl[j+1] + xr[j+1])];
COLOCX[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvl[j],yvr[j],0.5* (yl[j] + yr[j]), 0.5* (yl[j+1] + yr[j+1])];
COLOCY[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvl[j],zvr[j],0.5* (zl[j] + zr[j]), 0.5* (zl[j+1] + zr[j+1])];
COLOCZ[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[i * (mC-1) + j] = cpmag;
normal[:, i * (mC-1) + j] = cp/cpmag;
dS[i] = sum(ds[i * (mC-1):(i+1) * (mC-1)]);
for i in range(wing.getR(),wing.getR()+htail.getR()):
iPT = i-wing.getR();
camb = zT[:,htail.getAFI(iPT)]
il = i+1;
cl = c[il];
twl = twSec[il];
xl = (xT - 0.25) * cl + x[il];
yl = y[il] * np.ones(mC);
zl = camb * cl + z[il];
center = np.array([xl[iC4T],yl[iC4T],zl[iC4T]]);
alpha = 180./m.pi*twl;
Rot = u.roty(alpha);
for ii in range(mC):
point = np.array([xl[ii],yl[ii],zl[ii]])-center;
point = np.dot(Rot,point) + center;
xl[ii] = point[0];
yl[ii] = point[1];
zl[ii] = point[2];
if htail.getDF(iPT) != 0.:
delta = htail.getDF(iPT);
RotF = u.roty(delta);
center = np.array([xl[-2],yl[-2],zl[-2]]);
point = np.array([xl[-1],yl[-1],zl[-1]])-center;
point = np.dot(RotF,point) + center;
xl[-1] = point[0];
yl[-1] = point[1];
zl[-1] = point[2];
xvl[:mC-1] = 0.75 * xl[:-1] + 0.25 * xl[1:];
yvl[:mC-1] = 0.75 * yl[:-1] + 0.25 * yl[1:];
zvl[:mC-1] = 0.75 * zl[:-1] + 0.25 * zl[1:];
xvl[mC-1] = xvl[mC-2] + (xl[-1]-xl[-2]);
yvl[mC-1:] = yvl[mC-2] + (yl[-1]-yl[-2]);
zvl[mC-1:] = zvl[mC-2] + (zl[-1]-zl[-2]);
# End of chord vortex = begining of wake vortex
centerPropY = prop.getYp() + (xvl[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvl[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvl[mC-1] - centerPropY[j])**2 + (zvl[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvl[mC-1] - centerPropZ[j],yvl[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvl[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvl[mC-1] ;
yvl[mC:-1] += dY;
zvl[mC:-1] += dZ;
xvl[mC-1:-1] = xvl[mC-1] + times * (V0+vix);
xvl[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvl >= xvl[mC-1] + 2.5 * cl)[0][1];
dzdxl = (zl[mC-1]-zl[mC-2])/(xl[mC-1]-xl[mC-2]);
# Vérifie ça!
dydx = V0/(V0+vix) * m.tan(beta) * (1.-np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])));
dzdx = V0/(V0+vix)*dzdxl * np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])) \
+ V0/(V0+vix)*m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvl[mC-1+ii] - xvl[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvl[mC-1+ii] - xvl[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvl[indiceFinLocalEffectCamber:] - xvl[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvl[indiceFinLocalEffectCamber:] - xvl[indiceFinLocalEffectCamber-1]);
yvl[mC-1:] += dY;
zvl[mC-1:] += dZ;
ir = i+2;
cr = c[ir];
twr = twSec[ir];
xr = (xT - 0.25) * cr + x[ir];
yr = y[ir] * np.ones(mC);
zr = camb * cr + z[ir];
center = np.array([xr[iC4T],yr[iC4T],zr[iC4T]]);
alpha = 180./m.pi*twr;
Rot = u.roty(alpha);
for ii in range(0,mC):
point = np.array([xr[ii],yr[ii],zr[ii]])-center;
point = np.dot(Rot,point) + center;
xr[ii] = point[0];
yr[ii] = point[1];
zr[ii] = point[2];
if htail.getDF(iPT) != 0.:
delta = htail.getDF(iPT);
RotF = u.roty(delta);
center = np.array([xr[-2],yr[-2],zr[-2]]);
point = np.array([xr[-1],yr[-1],zr[-1]])-center;
point = np.dot(RotF,point) + center;
xr[-1] = point[0];
yr[-1] = point[1];
zr[-1] = point[2];
xvr[:mC-1] = 0.75 * xr[:-1] + 0.25 * xr[1:];
yvr[:mC-1] = 0.75 * yr[:-1] + 0.25 * yr[1:];
zvr[:mC-1] = 0.75 * zr[:-1] + 0.25 * zr[1:];
xvr[mC-1] = xvr[mC-2] + (xr[-1]-xr[-2]);
yvr[mC-1:] = yvr[mC-2] + (yr[-1]-yr[-2]);
zvr[mC-1:] = zvr[mC-2] + (zr[-1]-zr[-2]);
# End of chord vortex = begining of wake vortex
centerPropY = prop.getYp() + (xvr[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvr[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvr[mC-1] - centerPropY[j])**2 + (zvr[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvr[mC-1] - centerPropZ[j],yvr[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvr[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvr[mC-1] ;
yvr[mC:-1] += dY;
zvr[mC:-1] += dZ;
xvr[mC-1:-1] = xvr[mC-1] + times * (V0+vix);
xvr[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvr >= xvr[mC-1] + 2.5 * cr)[0][1];
dzdxr = (zr[mC-1]-zr[mC-2])/(xr[mC-1]-xr[mC-2]);
# # Vérifie ça!
dydx = V0/(V0+vix)*m.tan(beta) * (1.-np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])));
dzdx = V0/(V0+vix)*dzdxr * np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])) \
+ V0/(V0+vix)*m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvr[mC-1+ii] - xvr[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvr[mC-1+ii] - xvr[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvr[indiceFinLocalEffectCamber:] - xvr[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvr[indiceFinLocalEffectCamber:] - xvr[indiceFinLocalEffectCamber-1]);
yvr[mC-1:] += dY;
zvr[mC-1:] += dZ;
setTable(X,2*(mC+mW)+1,i,np.concatenate([[xvl[0]],xvr,xvl[::-1]]));
setTable(Y,2*(mC+mW)+1,i,np.concatenate([[yvl[0]],yvr,yvl[::-1]]));
setTable(Z,2*(mC+mW)+1,i,np.concatenate([[zvl[0]],zvr,zvl[::-1]]));
for j in range(mC-1):
val = [xvl[j],xvr[j],0.5* (xl[j] + xr[j]), 0.5* (xl[j+1] + xr[j+1])];
COLOCX[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvl[j],yvr[j],0.5* (yl[j] + yr[j]), 0.5* (yl[j+1] + yr[j+1])];
COLOCY[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvl[j],zvr[j],0.5* (zl[j] + zr[j]), 0.5* (zl[j+1] + zr[j+1])];
COLOCZ[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[i * (mC-1) + j] = cpmag;
normal[:, i * (mC-1) + j] = cp/cpmag;
dS[i] = sum(ds[i * (mC-1):(i+1) * (mC-1)]);
select = np.zeros([n,n * (mC-1)]); # rechercher intensité du dernier vortex uniquement
select2 = np.zeros([n * (mC-1),n]); # pour chaque paneau sur même section y, même velocity triangle
select3 = np.zeros([n + len(ac.prop.D),n * (mC-1) + len(ac.prop.D)]); #
for i in range(n):
select[i,(mC-2) + (mC-1)*i] = 1.;
select2[(mC-1)*i:(mC-1)*(i+1),i] = 1.;
select3[i,(mC-1)*i:(mC-1)*(i+1)] = ds[(mC-1)*i:(mC-1)*(i+1)]/dS[i];
if ac.prop.bool:
select3[-len(ac.prop.D):,-len(ac.prop.D):] = np.eye(len(ac.prop.D));
# plt.plot(Y,X),plt.axis([-8,8,-1,15]),plt.show()
# plt.plot(Y,Z),plt.axis([-8,8,-1,15]),plt.show()
# plt.plot(X,Z),plt.axis([-1,15,-1,15]),plt.show()
# return
Ao,Vxo,Vyo,Vzo = ICM(X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac,n,mC,mW);
invA = np.linalg.inv(Ao);
A = invA;
Vx = np.dot(select3,Vxo);
Vy = np.dot(select3,Vyo);
Vz = np.dot(select3,Vzo);
return A,normal,Vx,Vy,Vz,select,select2;
def getGridF_NOEngines(flow,ac,cla):
# flow sideslip and aoa angles
beta = - flow.getBeta()*m.pi/180.;
aoa = m.pi * (flow.getAMax()+flow.getAMin())/180.;
# Main lifting surfaces
wing = ac.wing;
htail = ac.htail;
cf = wing.getCF();
# Numerical parameters for discretization
mC = wing.mC; # chordwise discretisation number of control point for the chord
mW = flow.mW; # discretisation of the wake, get correct direction of it behind parts
n = 2*wing.getR()+htail.getR(); # spanwise discretisation number of panel# Recover the wing parameters
# Panels' coordinates and main parameters (at c/4)
xp = wing.getXP();
yp = wing.getYP();
zp = wing.getZP();
cP = wing.getChord();
tw = wing.getTwist();
dih = wing.getDih();
sw = wing.getSweepC4();
# Panel bordes' coordinate and main parameters (at c/4)
x = wing.getX();
y = wing.getY();
z = wing.getZ();
c = wing.getChordDist();
twSec = wing.twSec;
xW = np.unique(np.concatenate([(1.-cf)*0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.),[0.25]]));
mC = len(xW);
iC4W = np.where(xW == 0.25)[0][0]; # Indice du début du flaps / fin de main element
zW = np.zeros([mC,len(wing.getAF())],dtype = float);
for ii in range(len(wing.getAF())):
zW[:,ii]= camber(wing.getAF(ii),xW);
xF = 1. - cf * np.linspace(1.,0,mC);
zF = np.zeros([mC,len(wing.getAF())],dtype = float);
for ii in range(len(wing.getAF())):
zF[:,ii] = camber(wing.getAF(ii),xF);
if htail.bool:
# Panel bordes' coordinate and main parameters (at c/4)
x = np.concatenate([x,htail.getX()]);
y = np.concatenate([y,htail.getY()]);
z = np.concatenate([z,htail.getZ()]);
c = np.concatenate([c,htail.getChordDist()]);
twSec = np.concatenate([wing.twSec,htail.twSec]);
# Panels' coordinates and main parameters (at c/4)
xp = np.concatenate([xp,htail.getXP()]);
yp = np.concatenate([yp,htail.getYP()]);
zp = np.concatenate([zp,htail.getZP()]);
cP = np.concatenate([cP,htail.getChord()]);
tw = np.concatenate([tw,htail.getTwist()]);
dih = np.concatenate([dih,htail.getDih()]);
sw = np.concatenate([sw,htail.getSweepC4()]);
# Elevator, Assumed to be as plain flaps
cfT = htail.getCF();
if cfT != 0:
xT = np.unique(np.concatenate([np.linspace(1.,1.-cfT,2),(1.-cfT)*0.5*(np.cos(np.linspace(m.pi,0.,mC-1))+1.)]));
xT[abs((xT-0.25)) == np.min(abs(xT-0.25))] = 0.25;
else:
xT = 0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.);
xT[abs((xT-0.25)) == np.min(abs(xT-0.25))] = 0.25;
iC4T = np.where(xT == 0.25)[0][0];
zT = np.zeros([mC,len(htail.getAF())],dtype = float);
for ii in range(len(htail.getAF())):
zT[:,ii-1]= camber(htail.getAF(ii),xT);
#generate grid corner coordinates
# generate collocation points and normal : where tangency condition is
# satisfied. Distance from bound vortex depends on the sectional lift
# curve slope : (dist/localChord) = clAlphas/(4*pi)
X = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
Y = np.zeros(n * (2 * (mC + mW)+1),dtype = float); # initialization
Z = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
COLOCX=np.zeros((mC-1)*n);
COLOCY=np.zeros((mC-1)*n);
COLOCZ=np.zeros((mC-1)*n);
normal = np.zeros([3,(mC-1)*n]);
coef = 0.25+cla*0.25/m.pi;
ds = np.zeros((mC-1)*n); # vector of area of any panel
dS = np.zeros(wing.r+htail.r); # vector of area of a spanwise section
xvl = np.zeros(mC + mW,dtype = float);
yvl = np.zeros(mC + mW,dtype = float);
zvl = np.zeros(mC + mW,dtype = float);
xvr = np.zeros(mC + mW,dtype = float);
yvr = np.zeros(mC + mW,dtype = float);
zvr = np.zeros(mC + mW,dtype = float);
xvlf = np.zeros(mC + mW,dtype = float);
yvlf = np.zeros(mC + mW,dtype = float);
zvlf = np.zeros(mC + mW,dtype = float);
xvrf = np.zeros(mC + mW,dtype = float);
yvrf = np.zeros(mC + mW,dtype = float);
zvrf = np.zeros(mC + mW,dtype = float);
dzdx = np.zeros(mW-1,dtype = float);
dzdxf = np.zeros(mW-1,dtype = float);
for i in range(wing.getR()):
camb = zW[:,wing.getAFI(i)]
cambF = zF[:,wing.getAFI(i)];
il = i;
cl = c[il];
twl = twSec[il];
xl = (xW - 0.25) * cl + x[il];
yl = y[il] * np.ones(mC);
zl = camb * cl + z[il];
xlf = (xF - 0.25) * cl + x[il];
ylf = y[il] * np.ones(mC);
zlf = cambF * cl + z[il];
center = np.array([xl[iC4W],yl[iC4W],zl[iC4W]]);
alpha = 180./m.pi*twl;
Rot = u.roty(alpha);
for ii in range(mC):
point = np.array([xl[ii],yl[ii],zl[ii]])-center;
point = np.dot(Rot,point) + center;
xl[ii] = point[0];
yl[ii] = point[1];
zl[ii] = point[2];
pointf = np.array([xlf[ii],ylf[ii],zlf[ii]])-center;
pointf = np.dot(Rot,pointf) + center;
xlf[ii] = pointf[0] - 0.02 * cl;
ylf[ii] = pointf[1];
zlf[ii] = pointf[2] - 0.02 * cl;
centerf = np.array([xlf[0],ylf[0],zlf[0]]);
delta = wing.getDF(i);
Rotf = u.roty(delta);
for ii in range(mC):
pointf = np.array([xlf[ii],ylf[ii],zlf[ii]])-centerf;
pointf = np.dot(Rotf,pointf) + centerf;
xlf[ii] = pointf[0];
ylf[ii] = pointf[1];
zlf[ii] = pointf[2];
xvl[:mC-1] = 0.75 * xl[:-1] + 0.25 * xl[1:];
yvl[:mC-1] = 0.75 * yl[:-1] + 0.25 * yl[1:];
zvl[:mC-1] = 0.75 * zl[:-1] + 0.25 * zl[1:];
xvl[mC-1] = xvl[mC-2] + (xl[-1]-xl[-2]);
yvl[mC-1] = yvl[mC-2] + (yl[-1]-yl[-2]);
zvl[mC-1] = zvl[mC-2] + (zl[-1]-zl[-2]);
xvlf[:mC-1] = 0.75 * xlf[:-1] + 0.25 * xlf[1:];
yvlf[:mC-1] = 0.75 * ylf[:-1] + 0.25 * ylf[1:];
zvlf[:mC-1] = 0.75 * zlf[:-1] + 0.25 * zlf[1:];
xvlf[mC-1] = xvlf[mC-2] + (xlf[-1]-xlf[-2]);
yvlf[mC-1] = yvlf[mC-2] + (ylf[-1]-ylf[-2]);
zvlf[mC-1] = zvlf[mC-2] + (zlf[-1]-zlf[-2]);
# End of chord vortex = begining of wake vortex
Wake = 1.;
xvl[mC:-1] = xvl[mC-1] + Wake * 2.5 * cl * (1.+np.array(range(mW-1),dtype = float))/mW;
xvl[-1] = 10. * wing.b * Wake + (1.- Wake) * xvl[mC-1];
dzdxl = (zl[mC-1]-zl[mC-2])/(xl[mC-1]-xl[mC-2]);
dydx = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
dzdx = dzdxl * np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
for ii in range(mW-1):
zvl[mC+ii] = zvl[mC+(ii-1)] + dzdx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
yvl[mC+ii] = yvl[mC+(ii-1)] + dydx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
zvl[-1] = zvl[-2] + m.tan(aoa) * (xvl[-1] - xvl[-2]);
yvl[-1] = yvl[-2] + m.tan(beta) * (xvl[-1] - xvl[-2]);
xvlf[mC:-1] = xvlf[mC-1] + 2.5 * cl * (1.+np.array(range(mW-1),dtype = float))/mW;
xvlf[-1] = 10. * wing.b;
dzdxlf = (zlf[mC-1]-zlf[mC-2])/(xlf[mC-1]-xlf[mC-2]);
dzdxf = dzdxlf * np.exp(-3.*(np.array(xvlf[mC:-1] - xvlf[mC]))/(xvlf[-2] - xvlf[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvlf[mC:-1] - xvlf[mC]))/(xvlf[-2] - xvlf[mC])));
dydxf = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvlf[mC:-1] - xvlf[mC]))/(xvlf[-2] - xvlf[mC])));
for ii in range(mW-1):
zvlf[mC+ii] = zvlf[mC+(ii-1)] + dzdxf[ii] * (xvlf[mC+ii] - xvlf[mC+(ii-1)]);
yvlf[mC+ii] = yvlf[mC+(ii-1)] + dydxf[ii] * (xvlf[mC+ii] - xvlf[mC+(ii-1)]);
zvlf[-1] = zvlf[-2] + m.tan(aoa) * (xvlf[-1] - xvlf[-2]);
yvlf[-1] = yvlf[-2] + m.tan(beta) * (xvlf[-1] - xvlf[-2]);
## Right Part
ir = i+1;
cr = c[ir];
twr = twSec[ir];
xr = (xW - 0.25) * cr + x[ir];
yr = y[ir] * np.ones(mC);
zr = camb * cr + z[ir];
xrf = (xF - 0.25) * cr + x[ir];
yrf = y[ir] * np.ones(mC);
zrf = cambF * cr + z[ir];
center = np.array([xr[iC4W],yr[iC4W],zr[iC4W]]);
alpha = 180./m.pi*twr;
Rot = u.roty(alpha);
for ii in range(0,mC):
point = np.array([xr[ii],yr[ii],zr[ii]])-center;
point = np.dot(Rot,point) + center;
xr[ii] = point[0];
yr[ii] = point[1];
zr[ii] = point[2];
pointf = np.array([xrf[ii],yrf[ii],zrf[ii]])-center;
pointf = np.dot(Rot,pointf) + center;
xrf[ii] = pointf[0] - 0.02 * cr;
yrf[ii] = pointf[1];
zrf[ii] = pointf[2] - 0.02 * cr;
centerf = np.array([xrf[0],yrf[0],zrf[0]]);
for ii in range(mC):
pointf = np.array([xrf[ii],yrf[ii],zrf[ii]])-centerf;
pointf = np.dot(Rotf,pointf) + centerf;
xrf[ii] = pointf[0];
yrf[ii] = pointf[1];
zrf[ii] = pointf[2];
xvr[:mC-1] = 0.75 * xr[:-1] + 0.25 * xr[1:];
yvr[:mC-1] = 0.75 * yr[:-1] + 0.25 * yr[1:];
zvr[:mC-1] = 0.75 * zr[:-1] + 0.25 * zr[1:];
xvr[mC-1] = xvr[mC-2] + (xr[-1]-xr[-2]);
yvr[mC-1] = yvr[mC-2] + (yr[-1]-yr[-2]);
zvr[mC-1] = zvr[mC-2] + (zr[-1]-zr[-2]);
xvrf[:mC-1] = 0.75 * xrf[:-1] + 0.25 * xrf[1:];
yvrf[:mC-1] = 0.75 * yrf[:-1] + 0.25 * yrf[1:];
zvrf[:mC-1] = 0.75 * zrf[:-1] + 0.25 * zrf[1:];
xvrf[mC-1] = xvrf[mC-2] + (xrf[-1]-xrf[-2]);
yvrf[mC-1] = yvrf[mC-2] + (yrf[-1]-yrf[-2]);
zvrf[mC-1] = zvrf[mC-2] + (zrf[-1]-zrf[-2]);
# End of chord vortex = begining of wake vortex
xvr[mC:-1] = xvr[mC-1] + Wake * 2.5 * cr * (1.+np.array(range(mW-1),dtype = float))/mW;
xvr[-1] = 10. * wing.b * Wake + (1.- Wake) * xvr[mC-1];
dzdxr = (zr[mC-1]-zr[mC-2])/(xr[mC-1]-xr[mC-2]);
dydx = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])));
dzdx = dzdxr * np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])));
for ii in range(mW-1):
zvr[mC+ii] = zvr[mC+(ii-1)] + dzdx[ii] * (xvr[mC+ii] - xvr[mC+(ii-1)]);
yvr[mC+ii] = yvr[mC+(ii-1)] + dydx[ii] * (xvr[mC+ii] - xvr[mC+(ii-1)]);
zvr[-1] = zvr[-2] + m.tan(aoa) * (xvr[-1] - xvr[-2]);
yvr[-1] = yvr[-2] + m.tan(beta) * (xvr[-1] - xvr[-2]);
xvrf[mC:-1] = xvrf[mC-1] + 2.5 * cr * (1.+np.array(range(mW-1),dtype = float))/mW;
xvrf[-1] = 10. * wing.b;
yvrf[mC:] = yvrf[mC-1] + m.tan(beta) * (xvrf[mC:] - xvrf[mC-1]);
dzdxrf = (zrf[mC-1]-zrf[mC-2])/(xrf[mC-1]-xrf[mC-2]);
dydx = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvrf[mC:-1] - xvrf[mC]))/(xvrf[-2] - xvrf[mC])));
dzdx = dzdxrf * np.exp(-3.*(np.array(xvrf[mC:-1] - xvrf[mC]))/(xvrf[-2] - xvrf[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvrf[mC:-1] - xvrf[mC]))/(xvrf[-2] - xvrf[mC])));
for ii in range(mW-1):
zvrf[mC+ii] = zvrf[mC+(ii-1)] + dzdxf[ii] * (xvrf[mC+ii] - xvrf[mC+(ii-1)]);
yvrf[mC+ii] = yvrf[mC+(ii-1)] + dydxf[ii] * (xvrf[mC+ii] - xvrf[mC+(ii-1)]);
zvrf[-1] = zvrf[-2] + m.tan(aoa) * (xvrf[-1] - xvrf[-2]);
yvrf[-1] = yvrf[-2] + m.tan(beta) * (xvrf[-1] - xvrf[-2]);
setTable(X,2*(mC+mW)+1,i,np.concatenate([[xvl[0]],xvr,xvl[::-1]]));
setTable(Y,2*(mC+mW)+1,i,np.concatenate([[yvl[0]],yvr,yvl[::-1]]));
setTable(Z,2*(mC+mW)+1,i,np.concatenate([[zvl[0]],zvr,zvl[::-1]]));
setTable(X,2*(mC+mW)+1,wing.r+i,np.concatenate([[xvlf[0]],xvrf,xvlf[::-1]]));
setTable(Y,2*(mC+mW)+1,wing.r+i,np.concatenate([[yvlf[0]],yvrf,yvlf[::-1]]));
setTable(Z,2*(mC+mW)+1,wing.r+i,np.concatenate([[zvlf[0]],zvrf,zvlf[::-1]]));
for j in range(mC-1):
val = [xvl[j],xvr[j],0.5* (xl[j] + xr[j]), 0.5* (xl[j+1] + xr[j+1])];
COLOCX[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvl[j],yvr[j],0.5* (yl[j] + yr[j]), 0.5* (yl[j+1] + yr[j+1])];
COLOCY[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvl[j],zvr[j],0.5* (zl[j] + zr[j]), 0.5* (zl[j+1] + zr[j+1])];
COLOCZ[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[i * (mC-1) + j] = cpmag;
normal[:, i * (mC-1) + j] = cp/cpmag;
val = [xvlf[j],xvrf[j],0.5* (xlf[j] + xrf[j]), 0.5* (xlf[j+1] + xrf[j+1])];
COLOCX[(i+wing.r) * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvlf[j],yvrf[j],0.5* (ylf[j] + yrf[j]), 0.5* (ylf[j+1] + yrf[j+1])];
COLOCY[(i+wing.r) * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvlf[j],zvrf[j],0.5* (zlf[j] + zrf[j]), 0.5* (zlf[j+1] + zrf[j+1])];
COLOCZ[(i+wing.r) * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[(i + wing.r) * (mC-1) + j] = cpmag;
normal[:, (i + wing.r) * (mC-1) + j] = cp/cpmag;
dS[i] = sum(ds[i * (mC-1):(i+1) * (mC-1)]) + sum(ds[(i+wing.r) * (mC-1):(i+wing.r+1) * (mC-1)]);
for i in range(2*wing.getR(),2*wing.getR()+htail.getR()):
iPT = i- 2 * wing.getR();
camb = zT[:,htail.getAFI(iPT)]
il = i+1 - wing.r;
cl = c[il];
twl = twSec[il];
xl = (xT - 0.25) * cl + x[il];
yl = y[il] * np.ones(mC);
zl = camb * cl + z[il];
center = np.array([xl[iC4T],yl[iC4T],zl[iC4T]]);
alpha = 180./m.pi*twl;
Rot = u.roty(alpha);
for ii in range(mC):
point = np.array([xl[ii],yl[ii],zl[ii]])-center;
point = np.dot(Rot,point) + center;
xl[ii] = point[0];
yl[ii] = point[1];
zl[ii] = point[2];
if htail.getDF(iPT) != 0.:
delta = htail.getDF(iPT);
RotF = u.roty(delta);
center = np.array([xl[-2],yl[-2],zl[-2]]);
point = np.array([xl[-1],yl[-1],zl[-1]])-center;
point = np.dot(RotF,point) + center;
xl[-1] = point[0];
yl[-1] = point[1];
zl[-1] = point[2];
xvl[:mC-1] = 0.75 * xl[:-1] + 0.25 * xl[1:];
yvl[:mC-1] = 0.75 * yl[:-1] + 0.25 * yl[1:];
zvl[:mC-1] = 0.75 * zl[:-1] + 0.25 * zl[1:];
xvl[mC-1] = xvl[mC-2] + (xl[-1]-xl[-2]);
yvl[mC-1] = yvl[mC-2] + (yl[-1]-yl[-2]);
zvl[mC-1] = zvl[mC-2] + (zl[-1]-zl[-2]);
# End of chord vortex = begining of wake vortex
xvl[mC:-1] = xvl[mC-1] + 2.5 * cl * (1.+np.array(range(mW-1),dtype = float))/mW;
xvl[-1] = 10. * wing.b;
dzdxl = (zl[mC-1]-zl[mC-2])/(xl[mC-1]-xl[mC-2]);
dydx = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
dzdx = dzdxl * np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
for ii in range(mW-1):
zvl[mC+ii] = zvl[mC+(ii-1)] + dzdx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
yvl[mC+ii] = yvl[mC+(ii-1)] + dydx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
zvl[-1] = zvl[-2] + m.tan(aoa) * (xvl[-1] - xvl[-2]);
yvl[-1] = yvl[-2] + m.tan(beta) * (xvl[-1] - xvl[-2]);
ir = i+2 - wing.r;
cr = c[ir];
twr = twSec[ir];
xr = (xT - 0.25) * cr + x[ir];
yr = y[ir] * np.ones(mC);
zr = camb * cr + z[ir];
center = np.array([xr[iC4T],yr[iC4T],zr[iC4T]]);
alpha = 180./m.pi*twr;
Rot = u.roty(alpha);
for ii in range(0,mC):
point = np.array([xr[ii],yr[ii],zr[ii]])-center;
point = np.dot(Rot,point) + center;
xr[ii] = point[0];
yr[ii] = point[1];
zr[ii] = point[2];
if htail.getDF(iPT) != 0.:
delta = htail.getDF(iPT);
RotF = u.roty(delta);
center = np.array([xr[-2],yr[-2],zr[-2]]);
point = np.array([xr[-1],yr[-1],zr[-1]])-center;
point = np.dot(RotF,point) + center;
xr[-1] = point[0];
yr[-1] = point[1];
zr[-1] = point[2];
xvr[:mC-1] = 0.75 * xr[:-1] + 0.25 * xr[1:];
yvr[:mC-1] = 0.75 * yr[:-1] + 0.25 * yr[1:];
zvr[:mC-1] = 0.75 * zr[:-1] + 0.25 * zr[1:];
xvr[mC-1] = xvr[mC-2] + (xr[-1]-xr[-2]);
yvr[mC-1] = yvr[mC-2] + (yr[-1]-yr[-2]);
zvr[mC-1] = zvr[mC-2] + (zr[-1]-zr[-2]);
# End of chord vortex = begining of wake vortex
xvr[mC:-1] = xvr[mC-1] + 2.5 * cr * (1.+np.array(range(mW-1),dtype = float))/mW;
xvr[-1] = 10. * wing.b;
dzdxr = (zr[mC-1]-zr[mC-2])/(xr[mC-1]-xr[mC-2]);
dydx = m.tan(beta) * (1.-np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])));
dzdx = dzdxr * np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])) \
+ m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvr[mC:-1] - xvr[mC]))/(xvr[-2] - xvr[mC])));
for ii in range(mW-1):
zvr[mC+ii] = zvr[mC+(ii-1)] + dzdx[ii] * (xvr[mC+ii] - xvr[mC+(ii-1)]);
yvr[mC+ii] = yvr[mC+(ii-1)] + dydx[ii] * (xvr[mC+ii] - xvr[mC+(ii-1)]);
zvr[-1] = zvr[-2] + m.tan(aoa) * (xvr[-1] - xvr[-2]);
yvr[-1] = yvr[-2] + m.tan(beta) * (xvr[-1] - xvr[-2]);
setTable(X,2*(mC+mW)+1,i,np.concatenate([[xvl[0]],xvr,xvl[::-1]]));
setTable(Y,2*(mC+mW)+1,i,np.concatenate([[yvl[0]],yvr,yvl[::-1]]));
setTable(Z,2*(mC+mW)+1,i,np.concatenate([[zvl[0]],zvr,zvl[::-1]]));
for j in range(mC-1):
val = [xvl[j],xvr[j],0.5* (xl[j] + xr[j]), 0.5* (xl[j+1] + xr[j+1])];
COLOCX[i * (mC-1) + j] = val[2] * (1.-coef[i - wing.r]) + val[3] * coef[i - wing.r];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvl[j],yvr[j],0.5* (yl[j] + yr[j]), 0.5* (yl[j+1] + yr[j+1])];
COLOCY[i * (mC-1) + j] = val[2] * (1.-coef[i - wing.r]) + val[3] * coef[i - wing.r];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvl[j],zvr[j],0.5* (zl[j] + zr[j]), 0.5* (zl[j+1] + zr[j+1])];
COLOCZ[i * (mC-1) + j] = val[2] * (1.-coef[i - wing.r]) + val[3] * coef[i - wing.r];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[i * (mC-1) + j] = cpmag;
normal[:, i * (mC-1) + j] = cp/cpmag;
dS[i-wing.r] = sum(ds[i * (mC-1):(i+1) * (mC-1)]);
select = np.zeros([wing.r + htail.r,n * (mC-1)]); # rechercher intensité du dernier vortex uniquement
select2 = np.zeros([n * (mC-1),wing.r + htail.r]); # pour chaque paneau sur même section y, même velocity triangle
select3 = np.zeros([wing.r + htail.r + len(ac.prop.D),n * (mC-1) + len(ac.prop.D)]); #
for i in range(wing.r):
select[i,(mC-2) + (mC-1)*i] = 1.;
select2[(mC-1)*i:(mC-1)*(i+1),i] = 1.;
select3[i,(mC-1)*i:(mC-1)*(i+1)] = ds[(mC-1)*i:(mC-1)*(i+1)]/dS[i];
for i in range(wing.r,n):
select[i-wing.r,(mC-2) + (mC-1)*i] = 1.;
select2[(mC-1)*i:(mC-1)*(i+1),i - wing.r] = 1.;
select3[i - wing.r,(mC-1)*i:(mC-1)*(i+1)] = ds[(mC-1)*i:(mC-1)*(i+1)]/dS[i-wing.r];
if ac.prop.bool:
select3[-len(ac.prop.D):,-len(ac.prop.D):] = np.eye(len(ac.prop.D));
Ao,Vxo,Vyo,Vzo = ICM_F(X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac,n,mC,mW);
invA = np.linalg.inv(Ao);
A = invA;
Vx = np.dot(select3,Vxo);
Vy = np.dot(select3,Vyo);
Vz = np.dot(select3,Vzo);
return A,normal,Vx,Vy,Vz,select,select2;
def getGridF_Engines(flow,ac,cla):
beta = - flow.getBeta()*m.pi/180.;
aoa = m.pi * (flow.getAMax()+flow.getAMin())/180.;
# Main lifting surfaces
wing = ac.wing;
htail = ac.htail;
prop = ac.prop;
cf = wing.getCF();
rho0=1.225; #masse volumique à niveau de la mer [kg/m^3]
dT=-6.5; #gradiente de temperature dans la troposphere [K/km]
T0=288.15; #Temperature à niveau de la mer [K]
g=9.80665; #gravité [m/s^2]
Rair=287.1; #Constante de l'air [m^2/(s^2*K)]
h = flow.getH(); # flight altitude [km]
V0 = flow.getV0(); # freestream velocity [m/s]
rho = rho0 * (1. + dT*h/T0)**(- g/(Rair*dT*10**(-3)) - 1.); # air density
Sh = m.pi * prop.getD()**2 *0.25;#Surface disque actuator [m^2]
nbE = len(prop.getD());
OWU = prop.getYp()/np.abs(prop.getYp());
for ii in range(nbE):
if not(prop.OWU[ii]):
OWU *= -1.;
# Numerical parameters for discretization
tF = 2.; # temps caractéristique de convection des vortex
nbEch = 1.; # Nombre minimal de points de controle par rotation des lignes de courant/tourbillons
mW = int(tF*nbEch/(2*m.pi)*max(prop.getOmega())); # discretisation of the wake, get correct direction of it behind parts
times = np.linspace(0.,tF,mW);
mC = wing.mC; # chordwise discretisation number of control point for the chord
n = 2*wing.getR()+htail.getR(); # spanwise discretisation number of panel
# Panels' coordinates and main parameters (at c/4)
xp = wing.getXP();
yp = wing.getYP();
zp = wing.getZP();
cP = wing.getChord();
tw = wing.getTwist();
dih = wing.getDih();
sw = wing.getSweepC4();
# Panel bordes' coordinate and main parameters (at c/4)
x = wing.getX();
y = wing.getY();
z = wing.getZ();
c = wing.getChordDist();
twSec = wing.twSec;
xW = np.unique(np.concatenate([(1.-cf)*0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.),[0.25]]));
mC = len(xW);
iC4W = np.where(xW == 0.25)[0][0]; # Indice du début du flaps / fin de main element
zW = np.zeros([mC,len(wing.getAF())],dtype = float);
for ii in range(len(wing.getAF())):
zW[:,ii]= camber(wing.getAF(ii),xW);
xF = 1. - cf * np.linspace(1.,0,mC);
zF = np.zeros([mC,len(wing.getAF())],dtype = float);
for ii in range(len(wing.getAF())):
zF[:,ii] = camber(wing.getAF(ii),xF);
if htail.bool:
# Panel bordes' coordinate and main parameters (at c/4)
x = np.concatenate([x,htail.getX()]);
y = np.concatenate([y,htail.getY()]);
z = np.concatenate([z,htail.getZ()]);
c = np.concatenate([c,htail.getChordDist()]);
twSec = np.concatenate([wing.twSec,htail.twSec]);
# Panels' coordinates and main parameters (at c/4)
xp = np.concatenate([xp,htail.getXP()]);
yp = np.concatenate([yp,htail.getYP()]);
zp = np.concatenate([zp,htail.getZP()]);
cP = np.concatenate([cP,htail.getChord()]);
tw = np.concatenate([tw,htail.getTwist()]);
dih = np.concatenate([dih,htail.getDih()]);
sw = np.concatenate([sw,htail.getSweepC4()]);
# Elevator, Assumed to be as plain flaps
cfT = htail.getCF();
if cfT != 0:
xT = np.unique(np.concatenate([np.linspace(1.,1.-cfT,2),(1.-cfT)*0.5*(np.cos(np.linspace(m.pi,0.,mC-1))+1.)]));
xT[abs((xT-0.25)) == np.min(abs(xT-0.25))] = 0.25;
else:
xT = 0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.);
xT[abs((xT-0.25)) == np.min(abs(xT-0.25))] = 0.25;
iC4T = np.where(xT == 0.25)[0][0];
zT = np.zeros([mC,len(htail.getAF())],dtype = float);
for ii in range(len(htail.getAF())):
zT[:,ii-1]= camber(htail.getAF(ii),xT);
#generate grid corner coordinates
# generate collocation points and normal : where tangency condition is
# satisfied. Distance from bound vortex depends on the sectional lift
# curve slope : (dist/localChord) = clAlphas/(4*pi)
X = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
Y = np.zeros(n * (2 * (mC + mW)+1),dtype = float); # initialization
Z = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
COLOCX=np.zeros((mC-1)*n);
COLOCY=np.zeros((mC-1)*n);
COLOCZ=np.zeros((mC-1)*n);
normal = np.zeros([3,(mC-1)*n]);
coef = 0.25+cla*0.25/m.pi;
ds = np.zeros((mC-1)*n); # vector of area of any panel
dS = np.zeros(wing.r+htail.r); # vector of area of a spanwise section
xvl = np.zeros(mC + mW,dtype = float);
yvl = np.zeros(mC + mW,dtype = float);
zvl = np.zeros(mC + mW,dtype = float);
xvr = np.zeros(mC + mW,dtype = float);
yvr = np.zeros(mC + mW,dtype = float);
zvr = np.zeros(mC + mW,dtype = float);
xvlf = np.zeros(mC + mW,dtype = float);
yvlf = np.zeros(mC + mW,dtype = float);
zvlf = np.zeros(mC + mW,dtype = float);
xvrf = np.zeros(mC + mW,dtype = float);
yvrf = np.zeros(mC + mW,dtype = float);
zvrf = np.zeros(mC + mW,dtype = float);
dzdx = np.zeros(mW-1,dtype = float);
for i in range(wing.getR()):
camb = zW[:,wing.getAFI(i)]
cambF = zF[:,wing.getAFI(i)];
il = i;
cl = c[il];
twl = twSec[il];
xl = (xW - 0.25) * cl + x[il];
yl = y[il] * np.ones(mC);
zl = camb * cl + z[il];
xlf = (xF - 0.25) * cl + x[il];
ylf = y[il] * np.ones(mC);
zlf = cambF * cl + z[il];
center = np.array([xl[iC4W],yl[iC4W],zl[iC4W]]);
alpha = 180./m.pi*twl;
Rot = u.roty(alpha);
for ii in range(mC):
point = np.array([xl[ii],yl[ii],zl[ii]])-center;
point = np.dot(Rot,point) + center;
xl[ii] = point[0];
yl[ii] = point[1];
zl[ii] = point[2];
pointf = np.array([xlf[ii],ylf[ii],zlf[ii]])-center;
pointf = np.dot(Rot,pointf) + center;
xlf[ii] = pointf[0] - 0.02 * cl;
ylf[ii] = pointf[1];
zlf[ii] = pointf[2] - 0.02 * cl;
centerf = np.array([xlf[0],ylf[0],zlf[0]]);
delta = wing.getDF(i);
Rotf = u.roty(delta);
for ii in range(mC):
pointf = np.array([xlf[ii],ylf[ii],zlf[ii]])-centerf;
pointf = np.dot(Rotf,pointf) + centerf;
xlf[ii] = pointf[0];
ylf[ii] = pointf[1];
zlf[ii] = pointf[2];
xvl[:mC-1] = 0.75 * xl[:-1] + 0.25 * xl[1:];
yvl[:mC-1] = 0.75 * yl[:-1] + 0.25 * yl[1:];
zvl[:mC-1] = 0.75 * zl[:-1] + 0.25 * zl[1:];
xvl[mC-1] = xvl[mC-2] + (xl[-1]-xl[-2]);
yvl[mC-1:] = yvl[mC-2] + (yl[-1]-yl[-2]);
zvl[mC-1:] = zvl[mC-2] + (zl[-1]-zl[-2]);
xvlf[:mC-1] = 0.75 * xlf[:-1] + 0.25 * xlf[1:];
yvlf[:mC-1] = 0.75 * ylf[:-1] + 0.25 * ylf[1:];
zvlf[:mC-1] = 0.75 * zlf[:-1] + 0.25 * zlf[1:];
xvlf[mC-1] = xvlf[mC-2] + (xlf[-1]-xlf[-2]);
yvlf[mC-1:] = yvlf[mC-2] + (ylf[-1]-ylf[-2]);
zvlf[mC-1:] = zvlf[mC-2] + (zlf[-1]-zlf[-2]);
centerPropY = prop.getYp() + (xvl[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvl[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvl[mC-1] - centerPropY[j])**2 + (zvl[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvl[mC-1] - centerPropZ[j],yvl[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvl[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvl[mC-1] ;
yvl[mC:-1] += dY;
zvl[mC:-1] += dZ;
xvl[mC-1:-1] = xvl[mC-1] + times * (V0+vix);
xvl[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvl >= xvl[mC-1] + 2.5 * cl)[0][1];
dzdxl = (zl[mC-1]-zl[mC-2])/(xl[mC-1]-xl[mC-2]);
# Vérifie ça!
dydx = V0/(V0+vix) *m.tan(beta) * (1.-np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])));
dzdx = V0/(V0+vix) * dzdxl * np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])) \
+ V0/(V0+vix) * m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvl[mC-1+ii] - xvl[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvl[mC-1+ii] - xvl[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvl[indiceFinLocalEffectCamber:] - xvl[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvl[indiceFinLocalEffectCamber:] - xvl[indiceFinLocalEffectCamber-1]);
yvl[mC-1:] += dY;
zvl[mC-1:] += dZ;
centerPropY = prop.getYp() + (xvlf[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvlf[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvlf[mC-1] - centerPropY[j])**2 + (zvlf[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvlf[mC-1] - centerPropZ[j],yvlf[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvlf[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvlf[mC-1] ;
yvlf[mC:-1] += dY;
zvlf[mC:-1] += dZ;
xvlf[mC-1:-1] = xvlf[mC-1] + times * (V0+vix);
xvlf[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvlf >= xvlf[mC-1] + 2.5 * cl)[0][1];
dzdxl = (zlf[mC-1]-zlf[mC-2])/(xlf[mC-1]-xlf[mC-2]);
# Vérifie ça!
dydx = V0/(V0+vix) *m.tan(beta) * (1.-np.exp(-3.*(np.array(xvlf[mC:indiceFinLocalEffectCamber] - xvlf[mC-1]))/(xvlf[indiceFinLocalEffectCamber-1] - xvlf[mC-1])));
dzdx = V0/(V0+vix) * dzdxl * np.exp(-3.*(np.array(xvlf[mC:indiceFinLocalEffectCamber] - xvlf[mC-1]))/(xvlf[indiceFinLocalEffectCamber-1] - xvlf[mC-1])) \
+ V0/(V0+vix) * m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvlf[mC:indiceFinLocalEffectCamber] - xvlf[mC-1]))/(xvlf[indiceFinLocalEffectCamber-1] - xvlf[mC-1])));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvlf[mC-1+ii] - xvlf[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvlf[mC-1+ii] - xvlf[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvlf[indiceFinLocalEffectCamber:] - xvlf[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvlf[indiceFinLocalEffectCamber:] - xvlf[indiceFinLocalEffectCamber-1]);
yvlf[mC-1:] += dY;
zvlf[mC-1:] += dZ;
## Right Part
ir = i+1;
cr = c[ir];
twr = twSec[ir];
xr = (xW - 0.25) * cr + x[ir];
yr = y[ir] * np.ones(mC);
zr = camb * cr + z[ir];
xrf = (xF - 0.25) * cr + x[ir];
yrf = y[ir] * np.ones(mC);
zrf = cambF * cr + z[ir];
center = np.array([xr[iC4W],yr[iC4W],zr[iC4W]]);
alpha = 180./m.pi*twr;
Rot = u.roty(alpha);
for ii in range(0,mC):
point = np.array([xr[ii],yr[ii],zr[ii]])-center;
point = np.dot(Rot,point) + center;
xr[ii] = point[0];
yr[ii] = point[1];
zr[ii] = point[2];
pointf = np.array([xrf[ii],yrf[ii],zrf[ii]])-center;
pointf = np.dot(Rot,pointf) + center;
xrf[ii] = pointf[0] - 0.02 * cr;
yrf[ii] = pointf[1];
zrf[ii] = pointf[2] - 0.02 * cr;
centerf = np.array([xrf[0],yrf[0],zrf[0]]);
for ii in range(mC):
pointf = np.array([xrf[ii],yrf[ii],zrf[ii]])-centerf;
pointf = np.dot(Rotf,pointf) + centerf;
xrf[ii] = pointf[0];
yrf[ii] = pointf[1];
zrf[ii] = pointf[2];
xvr[:mC-1] = 0.75 * xr[:-1] + 0.25 * xr[1:];
yvr[:mC-1] = 0.75 * yr[:-1] + 0.25 * yr[1:];
zvr[:mC-1] = 0.75 * zr[:-1] + 0.25 * zr[1:];
xvr[mC-1] = xvr[mC-2] + (xr[-1]-xr[-2]);
yvr[mC-1:] = yvr[mC-2] + (yr[-1]-yr[-2]);
zvr[mC-1:] = zvr[mC-2] + (zr[-1]-zr[-2]);
xvrf[:mC-1] = 0.75 * xrf[:-1] + 0.25 * xrf[1:];
yvrf[:mC-1] = 0.75 * yrf[:-1] + 0.25 * yrf[1:];
zvrf[:mC-1] = 0.75 * zrf[:-1] + 0.25 * zrf[1:];
xvrf[mC-1] = xvrf[mC-2] + (xrf[-1]-xrf[-2]);
yvrf[mC-1:] = yvrf[mC-2] + (yrf[-1]-yrf[-2]);
zvrf[mC-1:] = zvrf[mC-2] + (zrf[-1]-zrf[-2]);
centerPropY = prop.getYp() + (xvr[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvr[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvr[mC-1] - centerPropY[j])**2 + (zvr[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvr[mC-1] - centerPropZ[j],yvr[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvr[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvr[mC-1] ;
yvr[mC:-1] += dY;
zvr[mC:-1] += dZ;
xvr[mC-1:-1] = xvr[mC-1] + times * (V0+vix);
xvr[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvr >= xvr[mC-1] + 2.5 * cr)[0][1];
dzdxr = (zr[mC-1]-zr[mC-2])/(xr[mC-1]-xr[mC-2]);
# Vérifie ça!
dydx = V0/(V0+vix) *m.tan(beta) * (1.-np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])));
dzdx = V0/(V0+vix) * dzdxr * np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])) \
+ V0/(V0+vix) * m.tan(aoa) * (1.-np.exp((-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1]))));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvr[mC-1+ii] - xvr[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvr[mC-1+ii] - xvr[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvr[indiceFinLocalEffectCamber:] - xvr[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvr[indiceFinLocalEffectCamber:] - xvr[indiceFinLocalEffectCamber-1]);
yvr[mC-1:] += dY;
zvr[mC-1:] += dZ;
centerPropY = prop.getYp() + (xvrf[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvrf[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvrf[mC-1] - centerPropY[j])**2 + (zvrf[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvrf[mC-1] - centerPropZ[j],yvrf[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvrf[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvrf[mC-1] ;
yvrf[mC:-1] += dY;
zvrf[mC:-1] += dZ;
xvrf[mC-1:-1] = xvrf[mC-1] + times * (V0+vix);
xvrf[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvrf >= xvrf[mC-1] + 2.5 * cr)[0][1];
dzdxr = (zrf[mC-1]-zrf[mC-2])/(xrf[mC-1]-xrf[mC-2]);
# Vérifie ça!
dydx = V0/(V0+vix) *m.tan(beta) * (1.-np.exp(-3.*(np.array(xvrf[mC:indiceFinLocalEffectCamber] - xvrf[mC-1]))/(xvrf[indiceFinLocalEffectCamber-1] - xvrf[mC-1])));
dzdx = V0/(V0+vix) * dzdxl * np.exp(-3.*(np.array(xvrf[mC:indiceFinLocalEffectCamber] - xvrf[mC-1]))/(xvrf[indiceFinLocalEffectCamber-1] - xvrf[mC-1])) \
+ V0/(V0+vix) * m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvrf[mC:indiceFinLocalEffectCamber] - xvrf[mC-1]))/(xvrf[indiceFinLocalEffectCamber-1] - xvrf[mC-1])));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvrf[mC-1+ii] - xvrf[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvrf[mC-1+ii] - xvrf[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvrf[indiceFinLocalEffectCamber:] - xvrf[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvrf[indiceFinLocalEffectCamber:] - xvrf[indiceFinLocalEffectCamber-1]);
yvrf[mC-1:] += dY;
zvrf[mC-1:] += dZ;
setTable(X,2*(mC+mW)+1,i,np.concatenate([[xvl[0]],xvr,xvl[::-1]]));
setTable(Y,2*(mC+mW)+1,i,np.concatenate([[yvl[0]],yvr,yvl[::-1]]));
setTable(Z,2*(mC+mW)+1,i,np.concatenate([[zvl[0]],zvr,zvl[::-1]]));
setTable(X,2*(mC+mW)+1,wing.r+i,np.concatenate([[xvlf[0]],xvrf,xvlf[::-1]]));
setTable(Y,2*(mC+mW)+1,wing.r+i,np.concatenate([[yvlf[0]],yvrf,yvlf[::-1]]));
setTable(Z,2*(mC+mW)+1,wing.r+i,np.concatenate([[zvlf[0]],zvrf,zvlf[::-1]]));
for j in range(mC-1):
val = [xvl[j],xvr[j],0.5* (xl[j] + xr[j]), 0.5* (xl[j+1] + xr[j+1])];
COLOCX[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvl[j],yvr[j],0.5* (yl[j] + yr[j]), 0.5* (yl[j+1] + yr[j+1])];
COLOCY[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvl[j],zvr[j],0.5* (zl[j] + zr[j]), 0.5* (zl[j+1] + zr[j+1])];
COLOCZ[i * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[i * (mC-1) + j] = cpmag;
normal[:, i * (mC-1) + j] = cp/cpmag;
val = [xvlf[j],xvrf[j],0.5* (xlf[j] + xrf[j]), 0.5* (xlf[j+1] + xrf[j+1])];
COLOCX[(i+wing.r) * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvlf[j],yvrf[j],0.5* (ylf[j] + yrf[j]), 0.5* (ylf[j+1] + yrf[j+1])];
COLOCY[(i+wing.r) * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvlf[j],zvrf[j],0.5* (zlf[j] + zrf[j]), 0.5* (zlf[j+1] + zrf[j+1])];
COLOCZ[(i+wing.r) * (mC-1) + j] = val[2] * (1.-coef[i]) + val[3] * coef[i];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[(i + wing.r) * (mC-1) + j] = cpmag;
normal[:, (i + wing.r) * (mC-1) + j] = cp/cpmag;
dS[i] = sum(ds[i * (mC-1):(i+1) * (mC-1)]) + sum(ds[(i+wing.r) * (mC-1):(i+wing.r+1) * (mC-1)]);
for i in range(2*wing.getR(),2*wing.getR()+htail.getR()):
iPT = i- 2 * wing.getR();
camb = zT[:,htail.getAFI(iPT)]
il = i+1 - wing.r;
cl = c[il];
twl = twSec[il];
xl = (xT - 0.25) * cl + x[il];
yl = y[il] * np.ones(mC);
zl = camb * cl + z[il];
center = np.array([xl[iC4T],yl[iC4T],zl[iC4T]]);
alpha = 180./m.pi*twl;
Rot = u.roty(alpha);
for ii in range(mC):
point = np.array([xl[ii],yl[ii],zl[ii]])-center;
point = np.dot(Rot,point) + center;
xl[ii] = point[0];
yl[ii] = point[1];
zl[ii] = point[2];
if htail.getDF(iPT) != 0.:
delta = htail.getDF(iPT);
RotF = u.roty(delta);
center = np.array([xl[-2],yl[-2],zl[-2]]);
point = np.array([xl[-1],yl[-1],zl[-1]])-center;
point = np.dot(RotF,point) + center;
xl[-1] = point[0];
yl[-1] = point[1];
zl[-1] = point[2];
xvl[:mC-1] = 0.75 * xl[:-1] + 0.25 * xl[1:];
yvl[:mC-1] = 0.75 * yl[:-1] + 0.25 * yl[1:];
zvl[:mC-1] = 0.75 * zl[:-1] + 0.25 * zl[1:];
xvl[mC-1] = xvl[mC-2] + (xl[-1]-xl[-2]);
yvl[mC-1:] = yvl[mC-2] + (yl[-1]-yl[-2]);
zvl[mC-1:] = zvl[mC-2] + (zl[-1]-zl[-2]);
# End of chord vortex = begining of wake vortex
centerPropY = prop.getYp() + (xvl[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvl[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvl[mC-1] - centerPropY[j])**2 + (zvl[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvl[mC-1] - centerPropZ[j],yvl[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvl[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvl[mC-1] ;
yvl[mC:-1] += dY;
zvl[mC:-1] += dZ;
xvl[mC-1:-1] = xvl[mC-1] + times * (V0+vix);
xvl[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvl >= xvl[mC-1] + 2.5 * cl)[0][1];
dzdxl = (zl[mC-1]-zl[mC-2])/(xl[mC-1]-xl[mC-2]);
# Vérifie ça!
dydx = V0/(V0+vix) *m.tan(beta) * (1.-np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])));
dzdx = V0/(V0+vix) * dzdxl * np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])) \
+ V0/(V0+vix) * m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvl[mC:indiceFinLocalEffectCamber] - xvl[mC-1]))/(xvl[indiceFinLocalEffectCamber-1] - xvl[mC-1])));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvl[mC-1+ii] - xvl[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvl[mC-1+ii] - xvl[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvl[indiceFinLocalEffectCamber:] - xvl[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvl[indiceFinLocalEffectCamber:] - xvl[indiceFinLocalEffectCamber-1]);
yvl[mC-1:] += dY;
zvl[mC-1:] += dZ;
ir = i+2 - wing.r;
cr = c[ir];
twr = twSec[ir];
xr = (xT - 0.25) * cr + x[ir];
yr = y[ir] * np.ones(mC);
zr = camb * cr + z[ir];
center = np.array([xr[iC4T],yr[iC4T],zr[iC4T]]);
alpha = 180./m.pi*twr;
Rot = u.roty(alpha);
for ii in range(0,mC):
point = np.array([xr[ii],yr[ii],zr[ii]])-center;
point = np.dot(Rot,point) + center;
xr[ii] = point[0];
yr[ii] = point[1];
zr[ii] = point[2];
if htail.getDF(iPT) != 0.:
delta = htail.getDF(iPT);
RotF = u.roty(delta);
center = np.array([xr[-2],yr[-2],zr[-2]]);
point = np.array([xr[-1],yr[-1],zr[-1]])-center;
point = np.dot(RotF,point) + center;
xr[-1] = point[0];
yr[-1] = point[1];
zr[-1] = point[2];
xvr[:mC-1] = 0.75 * xr[:-1] + 0.25 * xr[1:];
yvr[:mC-1] = 0.75 * yr[:-1] + 0.25 * yr[1:];
zvr[:mC-1] = 0.75 * zr[:-1] + 0.25 * zr[1:];
xvr[mC-1] = xvr[mC-2] + (xr[-1]-xr[-2]);
yvr[mC-1:] = yvr[mC-2] + (yr[-1]-yr[-2]);
zvr[mC-1:] = zvr[mC-2] + (zr[-1]-zr[-2]);
# End of chord vortex = begining of wake vortex
centerPropY = prop.getYp() + (xvr[mC-1] - prop.getXp()) * m.tan(beta);
centerPropZ = prop.getZp() + (xvr[mC-1] - prop.getXp()) * m.tan(aoa);
vix = 0.;
for j in range(nbE):
d = m.sqrt((yvr[mC-1] - centerPropY[j])**2 + (zvr[mC-1] - centerPropZ[j])**2);
rP = prop.rHub[j];
D = prop.D[j];
vitheta = 0.;
theta0 = np.arctan2(zvr[mC-1] - centerPropZ[j],yvr[mC-1] - centerPropY[j]);
if ((d >= rP) and (d <= D * 0.5) and prop.Omega[j] != 0.):
vix += 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
vix2 = 0.5*V0*(m.sqrt(1.+2.*prop.T[j]/(rho*Sh[j]*V0**2))-1.);
a = vix2/V0;
aprim = 0.5 * (1. - m.sqrt(abs(1.-4.*a*(1.+a)*(V0/(prop.Omega[j] * d))**2)));
vitheta = OWU[j]*abs((aprim * 2. * prop.Omega[j] * d));
Theta = times*vitheta/d + theta0;
dY = np.cos(Theta[1:]) * d + centerPropY[j] - yvr[mC-1];
dZ = np.sin(Theta[1:]) * d + centerPropZ[j] - zvr[mC-1] ;
yvr[mC:-1] += dY;
zvr[mC:-1] += dZ;
xvr[mC-1:-1] = xvr[mC-1] + times * (V0+vix);
xvr[-1] = 10. * wing.b;
indiceFinLocalEffectCamber = np.where(xvr >= xvr[mC-1] + 2.5 * cr)[0][1];
dzdxr = (zr[mC-1]-zr[mC-2])/(xr[mC-1]-xr[mC-2]);
# Vérifie ça!
dydx = V0/(V0+vix) *m.tan(beta) * (1.-np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])));
dzdx = V0/(V0+vix) * dzdxr * np.exp(-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1])) \
+ V0/(V0+vix) * m.tan(aoa) * (1.-np.exp((-3.*(np.array(xvr[mC:indiceFinLocalEffectCamber] - xvr[mC-1]))/(xvr[indiceFinLocalEffectCamber-1] - xvr[mC-1]))));
dY = np.zeros(mW+1);
dZ = np.zeros(mW+1);
for ii in range(1,indiceFinLocalEffectCamber-mC+1):
dZ[ii] = dZ[(ii-1)] + dzdx[ii-1] * (xvr[mC-1+ii] - xvr[(mC-1+ii-1)]);
dY[ii] = dY[(ii-1)] + dydx[ii-1] * (xvr[mC-1+ii] - xvr[(mC-1+ii-1)]);
dZ[indiceFinLocalEffectCamber-mC+1:] = dZ[indiceFinLocalEffectCamber-mC] + m.tan(aoa) * (xvr[indiceFinLocalEffectCamber:] - xvr[indiceFinLocalEffectCamber-1]);
dY[indiceFinLocalEffectCamber-mC+1:] = dY[indiceFinLocalEffectCamber-mC] + m.tan(beta) * (xvr[indiceFinLocalEffectCamber:] - xvr[indiceFinLocalEffectCamber-1]);
yvr[mC-1:] += dY;
zvr[mC-1:] += dZ;
setTable(X,2*(mC+mW)+1,i,np.concatenate([[xvl[0]],xvr,xvl[::-1]]));
setTable(Y,2*(mC+mW)+1,i,np.concatenate([[yvl[0]],yvr,yvl[::-1]]));
setTable(Z,2*(mC+mW)+1,i,np.concatenate([[zvl[0]],zvr,zvl[::-1]]));
for j in range(mC-1):
val = [xvl[j],xvr[j],0.5* (xl[j] + xr[j]), 0.5* (xl[j+1] + xr[j+1])];
COLOCX[i * (mC-1) + j] = val[2] * (1.-coef[i - wing.r]) + val[3] * coef[i - wing.r];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvl[j],yvr[j],0.5* (yl[j] + yr[j]), 0.5* (yl[j+1] + yr[j+1])];
COLOCY[i * (mC-1) + j] = val[2] * (1.-coef[i - wing.r]) + val[3] * coef[i - wing.r];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvl[j],zvr[j],0.5* (zl[j] + zr[j]), 0.5* (zl[j+1] + zr[j+1])];
COLOCZ[i * (mC-1) + j] = val[2] * (1.-coef[i - wing.r]) + val[3] * coef[i - wing.r];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[i * (mC-1) + j] = cpmag;
normal[:, i * (mC-1) + j] = cp/cpmag;
dS[i-wing.r] = sum(ds[i * (mC-1):(i+1) * (mC-1)]);
# plt.plot(Y[:wing.r*(2*(mC+mW)+1)],X[:wing.r*(2*(mC+mW)+1)]);
# plt.plot(Y[wing.r*(2*(mC+mW)+1):2*wing.r*(2*(mC+mW)+1)],X[wing.r*(2*(mC+mW)+1):2*wing.r*(2*(mC+mW)+1)]);
# plt.plot(Y[2*wing.r*(2*(mC+mW)+1):(2*wing.r+htail.r)*(2*(mC+mW)+1)],X[2*wing.r*(2*(mC+mW)+1):(2*wing.r+htail.r)*(2*(mC+mW)+1)]);
# plt.plot(Y[(2*wing.r+htail.r)*(2*(mC+mW)+1):],X[(2*wing.r+htail.r)*(2*(mC+mW)+1):]);
# plt.axis([-7,7,-1,13])
# plt.show()
#
# plt.plot(Y[:wing.r*(2*(mC+mW)+1)],Z[:wing.r*(2*(mC+mW)+1)]);
# plt.plot(Y[wing.r*(2*(mC+mW)+1):2*wing.r*(2*(mC+mW)+1)],Z[wing.r*(2*(mC+mW)+1):2*wing.r*(2*(mC+mW)+1)]);
# plt.plot(Y[2*wing.r*(2*(mC+mW)+1):(2*wing.r+htail.r)*(2*(mC+mW)+1)],Z[2*wing.r*(2*(mC+mW)+1):(2*wing.r+htail.r)*(2*(mC+mW)+1)]);
# plt.plot(Y[(2*wing.r+htail.r)*(2*(mC+mW)+1):],Z[(2*wing.r+htail.r)*(2*(mC+mW)+1):]);
# plt.axis([-7,7,-3,7])
# plt.show()
#
# plt.plot(X[:wing.r*(2*(mC+mW)+1)],Z[:wing.r*(2*(mC+mW)+1)]);
# plt.plot(X[wing.r*(2*(mC+mW)+1):2*wing.r*(2*(mC+mW)+1)],Z[wing.r*(2*(mC+mW)+1):2*wing.r*(2*(mC+mW)+1)]);
# plt.plot(X[2*wing.r*(2*(mC+mW)+1):(2*wing.r+htail.r)*(2*(mC+mW)+1)],Z[2*wing.r*(2*(mC+mW)+1):(2*wing.r+htail.r)*(2*(mC+mW)+1)]);
# plt.plot(X[(2*wing.r+htail.r)*(2*(mC+mW)+1):],Z[(2*wing.r+htail.r)*(2*(mC+mW)+1):]);
# plt.axis([-1,13,-3,7])
# plt.show()
# return
select = np.zeros([wing.r + htail.r,n * (mC-1)]); # rechercher intensité du dernier vortex uniquement
select2 = np.zeros([n * (mC-1),wing.r + htail.r]); # pour chaque paneau sur même section y, même velocity triangle
select3 = np.zeros([wing.r + htail.r + len(ac.prop.D),n * (mC-1) + len(ac.prop.D)]); #
for i in range(wing.r):
select[i,(mC-2) + (mC-1)*i] = 1.;
select2[(mC-1)*i:(mC-1)*(i+1),i] = 1.;
select3[i,(mC-1)*i:(mC-1)*(i+1)] = ds[(mC-1)*i:(mC-1)*(i+1)]/dS[i];
for i in range(wing.r,n):
select[i-wing.r,(mC-2) + (mC-1)*i] = 1.;
select2[(mC-1)*i:(mC-1)*(i+1),i - wing.r] = 1.;
select3[i - wing.r,(mC-1)*i:(mC-1)*(i+1)] = ds[(mC-1)*i:(mC-1)*(i+1)]/dS[i-wing.r];
if ac.prop.bool:
select3[-len(ac.prop.D):,-len(ac.prop.D):] = np.eye(len(ac.prop.D));
Ao,Vxo,Vyo,Vzo = ICM_F(X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac,n,mC,mW);
invA = np.linalg.inv(Ao);
A = invA;
Vx = np.dot(select3,Vxo);
Vy = np.dot(select3,Vyo);
Vz = np.dot(select3,Vzo);
return A,normal,Vx,Vy,Vz,select,select2;
def ICM(X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac,n,mC,mW):
if ac.fus.bool:
HWing = ac.fus.vDist > 0;
if ac.htail.bool and ac.vtail.bool:
HTail = ac.htail.z[ac.htail.getR()/2] > ((ac.vtail.z[-1]-ac.vtail.z[0]) * 0.66) + ac.vtail.z[0];
if not(ac.fus.bool):
if not(ac.vtail.bool) or not(ac.htail.bool) or HTail:
A,Vx,Vy,Vz = OnlyWing(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
A,Vx,Vy,Vz = BothWingOneTailVtail(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
if not(ac.vtail.bool):
if HWing:
A,Vx,Vy,Vz = OnlyWing(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
A,Vx,Vy,Vz = OneWingBothTail(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
if HWing and HTail:
A,Vx,Vy,Vz = OnlyWing(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
elif HTail:
A,Vx,Vy,Vz = OneWingBothTail(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
elif HWing:
A,Vx,Vy,Vz = BothWingOneTailVtail(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
A,Vx,Vy,Vz = OneWingOneTailVtail(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
return A,Vx,Vy,Vz;
def ICM_F(X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac,n,mC,mW):
if ac.fus.bool:
HWing = ac.fus.vDist > 0;
if ac.htail.bool and ac.vtail.bool:
HTail = ac.htail.z[ac.htail.getR()/2] > ((ac.vtail.z[-1]-ac.vtail.z[0]) * 0.66) + ac.vtail.z[0];
if not(ac.fus.bool):
if not(ac.vtail.bool) or not(ac.htail.bool) or HTail:
A,Vx,Vy,Vz = OnlyWing(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
A,Vx,Vy,Vz = BothWingOneTailVtailF(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
if not(ac.vtail.bool):
if HWing:
A,Vx,Vy,Vz = OnlyWing(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
A,Vx,Vy,Vz = OneWingBothTailF(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
if HWing and HTail:
A,Vx,Vy,Vz = OnlyWing(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
elif HTail:
A,Vx,Vy,Vz = OneWingBothTailF(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
elif HWing:
A,Vx,Vy,Vz = BothWingOneTailVtailF(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
else:
A,Vx,Vy,Vz = OneWingOneTailVtailF(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac);
return A,Vx,Vy,Vz;
def OnlyWing(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac):
m = n * (mC-1);
if ac.prop.bool:
nbE = len(ac.prop.D);
m += nbE;
A = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vx = np.zeros([m,n*(mC-1)],dtype = float);
Vy = np.zeros([m,n*(mC-1)],dtype = float);
Vz = np.zeros([m,n*(mC-1)],dtype = float);
for b in range(n * (mC - 1)):
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
if ac.prop.bool:
for b in range(n * (mC - 1),m):
x = ac.prop.xp[b-n* (mC - 1)];
y = ac.prop.yp[b-n* (mC - 1)];
z = ac.prop.zp[b-n* (mC - 1)];
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(x,y,z,np.array([-1.,0.,0.]),pathX,pathY,pathZ,mC,mW);
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
return A,Vx,Vy,Vz;
def BothWingOneTailVtail(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac):
m = n * (mC-1);
if ac.prop.bool:
nbE = len(ac.prop.D);
m += nbE;
A = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vx = np.zeros([m,n*(mC-1)],dtype = float);
Vy = np.zeros([m,n*(mC-1)],dtype = float);
Vz = np.zeros([m,n*(mC-1)],dtype = float);
for b in range(ac.wing.getR()*(mC-1)):
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()*(mC-1),(ac.htail.getR()/2+ac.wing.getR())*(mC-1)):
for j in range(ac.htail.getR()/2+ac.wing.getR()-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range((ac.wing.getR()+ac.htail.getR()/2)*(mC-1),n*(mC-1)):
for j in range(ac.wing.getR()):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j = ac.wing.getR()+ac.htail.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()+ac.htail.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
if ac.prop.bool:
for b in range(n * (mC - 1),m):
x = ac.prop.xp[b-n* (mC - 1)];
y = ac.prop.yp[b-n* (mC - 1)];
z = ac.prop.zp[b-n* (mC - 1)];
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(x,y,z,np.array([-1.,0.,0.]),pathX,pathY,pathZ,mC,mW);
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
return A,Vx,Vy,Vz;
def BothWingOneTailVtailF(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac):
m = n * (mC-1);
if ac.prop.bool:
nbE = len(ac.prop.D);
m += nbE;
A = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vx = np.zeros([m,n*(mC-1)],dtype = float);
Vy = np.zeros([m,n*(mC-1)],dtype = float);
Vz = np.zeros([m,n*(mC-1)],dtype = float);
for b in range(2*ac.wing.getR()*(mC-1)):
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(2*ac.wing.getR()*(mC-1),(ac.htail.getR()/2+2*ac.wing.getR())*(mC-1)):
for j in range(ac.htail.getR()/2+2*ac.wing.getR()-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range((2*ac.wing.getR()+ac.htail.getR()/2)*(mC-1),n*(mC-1)):
for j in range(2*ac.wing.getR()):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j = 2*ac.wing.getR()+ac.htail.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(2*ac.wing.getR()+ac.htail.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
if ac.prop.bool:
for b in range(n * (mC - 1),m):
x = ac.prop.xp[b-n* (mC - 1)];
y = ac.prop.yp[b-n* (mC - 1)];
z = ac.prop.zp[b-n* (mC - 1)];
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(x,y,z,np.array([-1.,0.,0.]),pathX,pathY,pathZ,mC,mW);
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
return A,Vx,Vy,Vz;
def OneWingBothTail(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac):
m = n * (mC-1);
if ac.prop.bool:
nbE = len(ac.prop.D);
m += nbE;
A = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vx = np.zeros([m,n*(mC-1)],dtype = float);
Vy = np.zeros([m,n*(mC-1)],dtype = float);
Vz = np.zeros([m,n*(mC-1)],dtype = float);
for b in range(ac.wing.getR()/2*(mC-1)):
for j in range(ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR(),n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()/2*(mC-1),ac.wing.getR()*(mC-1)):
j = ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()*(mC-1),n*(mC-1)):
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
if ac.prop.bool:
for b in range(n * (mC - 1),m):
x = ac.prop.xp[b-n* (mC - 1)];
y = ac.prop.yp[b-n* (mC - 1)];
z = ac.prop.zp[b-n* (mC - 1)];
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(x,y,z,np.array([-1.,0.,0.]),pathX,pathY,pathZ,mC,mW);
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
return A,Vx,Vy,Vz;
def OneWingBothTailF(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac):
m = n * (mC-1);
if ac.prop.bool:
nbE = len(ac.prop.D);
m += nbE;
A = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vx = np.zeros([m,n*(mC-1)],dtype = float);
Vy = np.zeros([m,n*(mC-1)],dtype = float);
Vz = np.zeros([m,n*(mC-1)],dtype = float);
for b in range(ac.wing.getR()/2*(mC-1)):
for j in range(ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR(),ac.wing.getR()+ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(2*ac.wing.getR(),n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()/2*(mC-1),ac.wing.getR()*(mC-1)):
j = ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()/2+1,ac.wing.getR()):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j = ac.wing.r+ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()+ac.wing.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()*(mC-1),ac.wing.getR()*(mC-1)+ac.wing.getR()/2*(mC-1)):
for j in range(ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR(),ac.wing.getR()+ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(2*ac.wing.getR(),n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()*(mC-1)+ac.wing.getR()/2*(mC-1),2*ac.wing.getR()*(mC-1)):
j = ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()/2+1,ac.wing.getR()):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j = ac.wing.r+ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()+ac.wing.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(2*ac.wing.getR()*(mC-1),n*(mC-1)):
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
if ac.prop.bool:
for b in range(n * (mC - 1),m):
x = ac.prop.xp[b-n* (mC - 1)];
y = ac.prop.yp[b-n* (mC - 1)];
z = ac.prop.zp[b-n* (mC - 1)];
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(x,y,z,np.array([-1.,0.,0.]),pathX,pathY,pathZ,mC,mW);
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
return A,Vx,Vy,Vz;
def OneWingOneTailVtail(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac):
m = n * (mC-1);
if ac.prop.bool:
nbE = len(ac.prop.D);
m += nbE;
A = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vx = np.zeros([m,n*(mC-1)],dtype = float);
Vy = np.zeros([m,n*(mC-1)],dtype = float);
Vz = np.zeros([m,n*(mC-1)],dtype = float);
for b in range(ac.wing.getR()/2*(mC-1)):
for j in range(ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR(),n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()/2*(mC-1),ac.wing.getR()*(mC-1)):
j = ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()*(mC-1),(ac.htail.getR()/2+ac.wing.getR())*(mC-1)):
for j in range(ac.htail.getR()/2+ac.wing.getR()-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range((ac.wing.getR()+ac.htail.getR()/2)*(mC-1),n*(mC-1)):
for j in range(ac.wing.getR()):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j = ac.wing.getR()+ac.htail.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()+ac.htail.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
if ac.prop.bool:
for b in range(n * (mC - 1),m):
x = ac.prop.xp[b-n* (mC - 1)];
y = ac.prop.yp[b-n* (mC - 1)];
z = ac.prop.zp[b-n* (mC - 1)];
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(x,y,z,np.array([-1.,0.,0.]),pathX,pathY,pathZ,mC,mW);
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
return A,Vx,Vy,Vz;
def OneWingOneTailVtailF(n,mC,mW,X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,ac):
m = n * (mC-1);
if ac.prop.bool:
nbE = len(ac.prop.D);
m += nbE;
A = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vx = np.zeros([m,n*(mC-1)],dtype = float);
Vy = np.zeros([m,n*(mC-1)],dtype = float);
Vz = np.zeros([m,n*(mC-1)],dtype = float);
for b in range(ac.wing.getR()/2*(mC-1)):
for j in range(ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR(),ac.wing.getR()+ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(2*ac.wing.getR(),n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()/2*(mC-1),ac.wing.getR()*(mC-1)):
j = ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()/2+1,ac.wing.getR()):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j = ac.wing.r+ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()+ac.wing.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()*(mC-1),ac.wing.getR()*(mC-1)+ac.wing.getR()/2*(mC-1)):
for j in range(ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR(),ac.wing.getR()+ac.wing.getR()/2-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(2*ac.wing.getR(),n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(ac.wing.getR()*(mC-1)+ac.wing.getR()/2*(mC-1),2*ac.wing.getR()*(mC-1)):
j = ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()/2+1,ac.wing.getR()):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j = ac.wing.r+ac.wing.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()+ac.wing.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range(2*ac.wing.getR()*(mC-1),(ac.htail.getR()/2+2*ac.wing.getR())*(mC-1)):
for j in range(ac.htail.getR()/2+2*ac.wing.getR()-1):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j += 1;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NR(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range((2*ac.wing.getR()+ac.htail.getR()/2)*(mC-1),n*(mC-1)):
for j in range(2*ac.wing.getR()):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j = 2*ac.wing.getR()+ac.htail.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(2*ac.wing.getR()+ac.htail.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for b in range((ac.wing.getR()+ac.htail.getR()/2)*(mC-1),n*(mC-1)):
for j in range(ac.wing.getR()):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
j = ac.wing.getR()+ac.htail.getR()/2;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(ac.wing.getR()+ac.htail.getR()/2+1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
if ac.prop.bool:
for b in range(n * (mC - 1),m):
x = ac.prop.xp[b-n* (mC - 1)];
y = ac.prop.yp[b-n* (mC - 1)];
z = ac.prop.zp[b-n* (mC - 1)];
for j in range(n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(x,y,z,np.array([-1.,0.,0.]),pathX,pathY,pathZ,mC,mW);
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
return A,Vx,Vy,Vz;
def vortxl(x,y,z,normal,pathX,pathY,pathZ,mC,mW):
""" Computing of the unit influence of the vortex on the colocation point
Initially Copyright (C) 2004 Mihai Pruna, Alberto Davila
Modified by Quentin Borlon (5 mai 2017)
Same as proposed by Mondher Yahyaoui
( International Journal of Mechanical, Aerospace, Industrial,
Mechatronic and Manufacturing Engineering Vol:8, No:10, 2014 ).
Exception : the influence of the vortex that goes to infinity."""
nbRing = len(pathX) -1; # number of wake elements on the outer ring
nbLine = nbRing + mC - 1;
r1r2x = np.zeros(nbLine,dtype = float);
r1r2y = np.zeros(nbLine,dtype = float);
r1r2z = np.zeros(nbLine,dtype = float);
square = np.zeros(nbLine,dtype = float);
r1 = np.zeros(nbLine,dtype = float);
r2 = np.zeros(nbLine,dtype = float);
ror1 = np.zeros(nbLine,dtype = float);
ror2 = np.zeros(nbLine,dtype = float);
coeff = np.zeros(nbLine,dtype = float);
# Contribution of the outer ring
x1 = pathX[:-1];
y1 = pathY[:-1];
z1 = pathZ[:-1];
x2 = pathX[1:];
y2 = pathY[1:];
z2 = pathZ[1:];
rcut = 1e-15;
r1r2x[:nbRing] = (y-y1)*(z-z2)-(z-z1)*(y-y2);
r1r2y[:nbRing] = -((x-x1)*(z-z2)-(z-z1)*(x-x2));
r1r2z[:nbRing] = (x-x1)*(y-y2)-(y-y1)*(x-x2);
square[:nbRing] = r1r2x[:nbRing]*r1r2x[:nbRing]+r1r2y[:nbRing]*r1r2y[:nbRing]+r1r2z[:nbRing]*r1r2z[:nbRing];
r1[:nbRing] = np.sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1) + (z-z1)*(z-z1));
r2[:nbRing] = np.sqrt((x-x2)*(x-x2) + (y-y2)*(y-y2) + (z-z2)*(z-z2));
ror1[:nbRing] = (x2-x1)*(x-x1)+(y2-y1)*(y-y1)+(z2-z1)*(z-z1);
ror2[:nbRing] = (x2-x1)*(x-x2)+(y2-y1)*(y-y2)+(z2-z1)*(z-z2);
x1T = pathX[2:mC+1];
y1T = pathY[2:mC+1];
z1T = pathZ[2:mC+1];
x2T = pathX[-2:-mC-1:-1];
y2T = pathY[-2:-mC-1:-1];
z2T = pathZ[-2:-mC-1:-1];
r1r2x[nbRing:] = (y-y1T)*(z-z2T)-(z-z1T)*(y-y2T);
r1r2y[nbRing:] = -((x-x1T)*(z-z2T)-(z-z1T)*(x-x2T));
r1r2z[nbRing:] = (x-x1T)*(y-y2T)-(y-y1T)*(x-x2T);
square[nbRing:] = r1r2x[nbRing:]*r1r2x[nbRing:]+r1r2y[nbRing:]*r1r2y[nbRing:]+r1r2z[nbRing:]*r1r2z[nbRing:];
r1[nbRing:] = np.sqrt((x-x1T)*(x-x1T) + (y-y1T)*(y-y1T) + (z-z1T)*(z-z1T));
r2[nbRing:] = np.sqrt((x-x2T)*(x-x2T) + (y-y2T)*(y-y2T) + (z-z2T)*(z-z2T));
ror1[nbRing:] = (x2T-x1T)*(x-x1T)+(y2T-y1T)*(y-y1T)+(z2T-z1T)*(z-z1T);
ror2[nbRing:] = (x2T-x1T)*(x-x2T)+(y2T-y1T)*(y-y2T)+(z2T-z1T)*(z-z2T);
indice = np.array([not ((r1[i]<rcut) or (r2[i]<rcut) or (square[i]<rcut) ) for i in range(nbLine)],dtype = bool);
coeff[indice] = 0.25/(m.pi*square[indice])*(ror1[indice]/r1[indice]-ror2[indice]/r2[indice]);
ax = r1r2x * coeff;
ay = r1r2y * coeff;
az = r1r2z * coeff;
a = np.zeros(mC-1,dtype = float);
a[0] = (ax[0] + ax[1] + ax[nbRing] + ax[nbRing-1]) * normal[0] + \
(ay[0] + ay[1] + ay[nbRing-1] + ay[nbRing]) * normal[1] + \
(az[0] + az[1] + az[nbRing-1] + az[nbRing]) * normal[2];
a[1:-1] = (ax[2:mC-1] + ax[nbRing+1:nbRing+mC-2] + ax[nbRing-2:nbRing-mC+1:-1] - ax[nbRing : nbRing + mC - 3]) * normal[0] + \
(ay[2:mC-1] + ay[nbRing+1:nbRing+mC-2] + ay[nbRing-2:nbRing-mC+1:-1] - ay[nbRing : nbRing + mC - 3]) * normal[1] + \
(az[2:mC-1] + az[nbRing+1:nbRing+mC-2] + az[nbRing-2:nbRing-mC+1:-1] - az[nbRing : nbRing + mC - 3]) * normal[2];
a[-1] = (np.dot(r1r2x[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]) - ax[nbRing + mC - 3]) * normal[0] + \
(np.dot(r1r2y[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]) - ay[nbRing + mC - 3]) * normal[1] + \
(np.dot(r1r2z[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]) - az[nbRing + mC - 3]) * normal[2];
# vi = np.array([np.dot(r1r2x[mC:mC+2*mW+1],coeff[mC:mC+2*mW+1]),np.dot(r1r2y[mC:mC+2*mW+1],coeff[mC:mC+2*mW+1]),np.dot(r1r2z[mC:mC+2*mW+1],coeff[mC:mC+2*mW+1])]);
vix = np.zeros(mC-1,dtype = float);
viy = np.zeros(mC-1,dtype = float);
viz = np.zeros(mC-1,dtype = float);
vix[:-1] = (ax[1:mC-1] + ax[nbRing-1:nbRing-mC+1:-1]);
viy[:-1] = (ay[1:mC-1] + ay[nbRing-1:nbRing-mC+1:-1]);
viz[:-1] = (az[1:mC-1] + az[nbRing-1:nbRing-mC+1:-1]);
vix[-1] = np.dot(r1r2x[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]);
viy[-1] = np.dot(r1r2y[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]);
viz[-1] = np.dot(r1r2z[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]);
return a,vix,viy,viz;
def vortxl_NL(x,y,z,normal,pathX,pathY,pathZ,mC,mW):
""" Computing of the unit influence of the vortex on the colocation point
Initially Copyright (C) 2004 Mihai Pruna, Alberto Davila
Modified by Quentin Borlon (5 mai 2017)
Same as proposed by Mondher Yahyaoui
( International Journal of Mechanical, Aerospace, Industrial,
Mechatronic and Manufacturing Engineering Vol:8, No:10, 2014 ).
Exception : the influence of the vortex that goes to infinity."""
nbRing = len(pathX) -1; # number of wake elements on the outer ring
nbLine = nbRing + mC - 1;
r1r2x = np.zeros(nbLine,dtype = float);
r1r2y = np.zeros(nbLine,dtype = float);
r1r2z = np.zeros(nbLine,dtype = float);
square = np.zeros(nbLine,dtype = float);
r1 = np.zeros(nbLine,dtype = float);
r2 = np.zeros(nbLine,dtype = float);
ror1 = np.zeros(nbLine,dtype = float);
ror2 = np.zeros(nbLine,dtype = float);
coeff = np.zeros(nbLine,dtype = float);
# Contribution of the outer ring
x1 = pathX[:-1];
y1 = pathY[:-1];
z1 = pathZ[:-1];
x2 = pathX[1:];
y2 = pathY[1:];
z2 = pathZ[1:];
rcut = 1e-15;
r1r2x[:nbRing] = (y-y1)*(z-z2)-(z-z1)*(y-y2);
r1r2y[:nbRing] = -((x-x1)*(z-z2)-(z-z1)*(x-x2));
r1r2z[:nbRing] = (x-x1)*(y-y2)-(y-y1)*(x-x2);
square[:nbRing] = r1r2x[:nbRing]*r1r2x[:nbRing]+r1r2y[:nbRing]*r1r2y[:nbRing]+r1r2z[:nbRing]*r1r2z[:nbRing];
r1[:nbRing] = np.sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1) + (z-z1)*(z-z1));
r2[:nbRing] = np.sqrt((x-x2)*(x-x2) + (y-y2)*(y-y2) + (z-z2)*(z-z2));
ror1[:nbRing] = (x2-x1)*(x-x1)+(y2-y1)*(y-y1)+(z2-z1)*(z-z1);
ror2[:nbRing] = (x2-x1)*(x-x2)+(y2-y1)*(y-y2)+(z2-z1)*(z-z2);
x1T = pathX[2:mC+1];
y1T = pathY[2:mC+1];
z1T = pathZ[2:mC+1];
x2T = pathX[-2:-mC-1:-1];
y2T = pathY[-2:-mC-1:-1];
z2T = pathZ[-2:-mC-1:-1];
r1r2x[nbRing:] = (y-y1T)*(z-z2T)-(z-z1T)*(y-y2T);
r1r2y[nbRing:] = -((x-x1T)*(z-z2T)-(z-z1T)*(x-x2T));
r1r2z[nbRing:] = (x-x1T)*(y-y2T)-(y-y1T)*(x-x2T);
square[nbRing:] = r1r2x[nbRing:]*r1r2x[nbRing:]+r1r2y[nbRing:]*r1r2y[nbRing:]+r1r2z[nbRing:]*r1r2z[nbRing:];
r1[nbRing:] = np.sqrt((x-x1T)*(x-x1T) + (y-y1T)*(y-y1T) + (z-z1T)*(z-z1T));
r2[nbRing:] = np.sqrt((x-x2T)*(x-x2T) + (y-y2T)*(y-y2T) + (z-z2T)*(z-z2T));
ror1[nbRing:] = (x2T-x1T)*(x-x1T)+(y2T-y1T)*(y-y1T)+(z2T-z1T)*(z-z1T);
ror2[nbRing:] = (x2T-x1T)*(x-x2T)+(y2T-y1T)*(y-y2T)+(z2T-z1T)*(z-z2T);
indice = np.array([not ((r1[i]<rcut) or (r2[i]<rcut) or (square[i]<rcut) ) for i in range(nbLine)],dtype = bool);
coeff[indice] = 0.25/(m.pi*square[indice])*(ror1[indice]/r1[indice]-ror2[indice]/r2[indice]);
coeff[mC+mW+1:nbRing] *= 0.3;
ax = r1r2x * coeff;
ay = r1r2y * coeff;
az = r1r2z * coeff;
a = np.zeros(mC-1,dtype = float);
a[0] = (ax[0] + ax[1] + ax[nbRing] + ax[nbRing-1]) * normal[0] + \
(ay[0] + ay[1] + ay[nbRing-1] + ay[nbRing]) * normal[1] + \
(az[0] + az[1] + az[nbRing-1] + az[nbRing]) * normal[2];
a[1:-1] = (ax[2:mC-1] + ax[nbRing+1:nbRing+mC-2] + ax[nbRing-2:nbRing-mC+1:-1] - ax[nbRing : nbRing + mC - 3]) * normal[0] + \
(ay[2:mC-1] + ay[nbRing+1:nbRing+mC-2] + ay[nbRing-2:nbRing-mC+1:-1] - ay[nbRing : nbRing + mC - 3]) * normal[1] + \
(az[2:mC-1] + az[nbRing+1:nbRing+mC-2] + az[nbRing-2:nbRing-mC+1:-1] - az[nbRing : nbRing + mC - 3]) * normal[2];
a[-1] = (np.dot(r1r2x[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]) - ax[nbRing + mC - 3]) * normal[0] + \
(np.dot(r1r2y[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]) - ay[nbRing + mC - 3]) * normal[1] + \
(np.dot(r1r2z[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]) - az[nbRing + mC - 3]) * normal[2];
# vi = np.array([np.dot(r1r2x[mC:mC+2*mW+1],coeff[mC:mC+2*mW+1]),np.dot(r1r2y[mC:mC+2*mW+1],coeff[mC:mC+2*mW+1]),np.dot(r1r2z[mC:mC+2*mW+1],coeff[mC:mC+2*mW+1])]);
vix = np.zeros(mC-1,dtype = float);
viy = np.zeros(mC-1,dtype = float);
viz = np.zeros(mC-1,dtype = float);
vix[:-1] = (ax[1:mC-1] + ax[nbRing-1:nbRing-mC+1:-1]);
viy[:-1] = (ay[1:mC-1] + ay[nbRing-1:nbRing-mC+1:-1]);
viz[:-1] = (az[1:mC-1] + az[nbRing-1:nbRing-mC+1:-1]);
vix[-1] = np.dot(r1r2x[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]);
viy[-1] = np.dot(r1r2y[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]);
viz[-1] = np.dot(r1r2z[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]);
return a,vix,viy,viz;
def vortxl_NR(x,y,z,normal,pathX,pathY,pathZ,mC,mW):
""" Computing of the unit influence of the vortex on the colocation point
Initially Copyright (C) 2004 Mihai Pruna, Alberto Davila
Modified by Quentin Borlon (5 mai 2017)
Same as proposed by Mondher Yahyaoui
( International Journal of Mechanical, Aerospace, Industrial,
Mechatronic and Manufacturing Engineering Vol:8, No:10, 2014 ).
Exception : the influence of the vortex that goes to infinity."""
nbRing = len(pathX) -1; # number of wake elements on the outer ring
nbLine = nbRing + mC - 1;
r1r2x = np.zeros(nbLine,dtype = float);
r1r2y = np.zeros(nbLine,dtype = float);
r1r2z = np.zeros(nbLine,dtype = float);
square = np.zeros(nbLine,dtype = float);
r1 = np.zeros(nbLine,dtype = float);
r2 = np.zeros(nbLine,dtype = float);
ror1 = np.zeros(nbLine,dtype = float);
ror2 = np.zeros(nbLine,dtype = float);
coeff = np.zeros(nbLine,dtype = float);
# Contribution of the outer ring
x1 = pathX[:-1];
y1 = pathY[:-1];
z1 = pathZ[:-1];
x2 = pathX[1:];
y2 = pathY[1:];
z2 = pathZ[1:];
rcut = 1e-15;
r1r2x[:nbRing] = (y-y1)*(z-z2)-(z-z1)*(y-y2);
r1r2y[:nbRing] = -((x-x1)*(z-z2)-(z-z1)*(x-x2));
r1r2z[:nbRing] = (x-x1)*(y-y2)-(y-y1)*(x-x2);
square[:nbRing] = r1r2x[:nbRing]*r1r2x[:nbRing]+r1r2y[:nbRing]*r1r2y[:nbRing]+r1r2z[:nbRing]*r1r2z[:nbRing];
r1[:nbRing] = np.sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1) + (z-z1)*(z-z1));
r2[:nbRing] = np.sqrt((x-x2)*(x-x2) + (y-y2)*(y-y2) + (z-z2)*(z-z2));
ror1[:nbRing] = (x2-x1)*(x-x1)+(y2-y1)*(y-y1)+(z2-z1)*(z-z1);
ror2[:nbRing] = (x2-x1)*(x-x2)+(y2-y1)*(y-y2)+(z2-z1)*(z-z2);
x1T = pathX[2:mC+1];
y1T = pathY[2:mC+1];
z1T = pathZ[2:mC+1];
x2T = pathX[-2:-mC-1:-1];
y2T = pathY[-2:-mC-1:-1];
z2T = pathZ[-2:-mC-1:-1];
r1r2x[nbRing:] = (y-y1T)*(z-z2T)-(z-z1T)*(y-y2T);
r1r2y[nbRing:] = -((x-x1T)*(z-z2T)-(z-z1T)*(x-x2T));
r1r2z[nbRing:] = (x-x1T)*(y-y2T)-(y-y1T)*(x-x2T);
square[nbRing:] = r1r2x[nbRing:]*r1r2x[nbRing:]+r1r2y[nbRing:]*r1r2y[nbRing:]+r1r2z[nbRing:]*r1r2z[nbRing:];
r1[nbRing:] = np.sqrt((x-x1T)*(x-x1T) + (y-y1T)*(y-y1T) + (z-z1T)*(z-z1T));
r2[nbRing:] = np.sqrt((x-x2T)*(x-x2T) + (y-y2T)*(y-y2T) + (z-z2T)*(z-z2T));
ror1[nbRing:] = (x2T-x1T)*(x-x1T)+(y2T-y1T)*(y-y1T)+(z2T-z1T)*(z-z1T);
ror2[nbRing:] = (x2T-x1T)*(x-x2T)+(y2T-y1T)*(y-y2T)+(z2T-z1T)*(z-z2T);
indice = np.array([not ((r1[i]<rcut) or (r2[i]<rcut) or (square[i]<rcut) ) for i in range(nbLine)],dtype = bool);
coeff[indice] = 0.25/(m.pi*square[indice])*(ror1[indice]/r1[indice]-ror2[indice]/r2[indice]);
coeff[1:mW+mC] *= 0.3;
ax = r1r2x * coeff;
ay = r1r2y * coeff;
az = r1r2z * coeff;
a = np.zeros(mC-1,dtype = float);
a[0] = (ax[0] + ax[1] + ax[nbRing] + ax[nbRing-1]) * normal[0] + \
(ay[0] + ay[1] + ay[nbRing-1] + ay[nbRing]) * normal[1] + \
(az[0] + az[1] + az[nbRing-1] + az[nbRing]) * normal[2];
a[1:-1] = (ax[2:mC-1] + ax[nbRing+1:nbRing+mC-2] + ax[nbRing-2:nbRing-mC+1:-1] - ax[nbRing : nbRing + mC - 3]) * normal[0] + \
(ay[2:mC-1] + ay[nbRing+1:nbRing+mC-2] + ay[nbRing-2:nbRing-mC+1:-1] - ay[nbRing : nbRing + mC - 3]) * normal[1] + \
(az[2:mC-1] + az[nbRing+1:nbRing+mC-2] + az[nbRing-2:nbRing-mC+1:-1] - az[nbRing : nbRing + mC - 3]) * normal[2];
a[-1] = (np.dot(r1r2x[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]) - ax[nbRing + mC - 3]) * normal[0] + \
(np.dot(r1r2y[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]) - ay[nbRing + mC - 3]) * normal[1] + \
(np.dot(r1r2z[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]) - az[nbRing + mC - 3]) * normal[2];
# vi = np.array([np.dot(r1r2x[mC:mC+2*mW+1],coeff[mC:mC+2*mW+1]),np.dot(r1r2y[mC:mC+2*mW+1],coeff[mC:mC+2*mW+1]),np.dot(r1r2z[mC:mC+2*mW+1],coeff[mC:mC+2*mW+1])]);
vix = np.zeros(mC-1,dtype = float);
viy = np.zeros(mC-1,dtype = float);
viz = np.zeros(mC-1,dtype = float);
vix[:-1] = (ax[1:mC-1] + ax[nbRing-1:nbRing-mC+1:-1]);
viy[:-1] = (ay[1:mC-1] + ay[nbRing-1:nbRing-mC+1:-1]);
viz[:-1] = (az[1:mC-1] + az[nbRing-1:nbRing-mC+1:-1]);
vix[-1] = np.dot(r1r2x[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]);
viy[-1] = np.dot(r1r2y[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]);
viz[-1] = np.dot(r1r2z[mC-1:mC+2*mW+2],coeff[mC-1:mC+2*mW+2]);
return a,vix,viy,viz;
def camber(naca,x):
""" Compute the camber of the naca 4-,5- and 6-digits.
# Taken over and lightly adapted by Quentin Borlon
# NACA Airfoil Generator
# This function generates a set of points containing the coordinates of a
# NACA airfoil from the 4 Digit Series, 5 Digit Series and 6 Series given
# its number and, as additional features, the chordt, the number of points
# to be calculated, spacing type (between linear and cosine spacing),
# opened or closed trailing edge and the angle of attack of the airfoil.
# It also plots the airfoil for further comprovation if it is the required
# one by the user.
#
# -------------------------------------------------------------------------
#
# MIT License
#
# Copyright (c) 2016 Alejandro de Haro"""
try:
naca = float(naca);
Cam = np.zeros(len(x));
# 6-digits
if m.floor(naca/(1e5)):
a=(m.floor(naca/10000)%10)/10; # Chordwise position of minimum pressure (2nd digit)
c_li=(m.floor(naca/100)%10)/10; # Design lift coefficient (4th digit)
g=-1./(1-a)*(a**2*(0.5*m.log(a)-0.25)+0.25); # G constant calculation
h=1./(1-a)*(0.5*(1-a)**2*m.log(1-a)-0.25*(1-a)**2)+g; # H constant calculation
#----------------------- CAMBER ---------------------------------------
indice = np.array([not ((x[i] == 0. or x[i] == 1. or x[i] == a)) for i in range(len(x))],dtype = bool);
for i in range(len(x)):
if indice[i]:
Cam[i]=c_li/(2*m.pi*(a+1))*(1./(1-a)*(0.5*(a-x[i])**2*np.log(np.abs(a-x[i]))-0.5*(1-x[i])**2*np.log(1-x[i])+0.25*(1-x[i])**2-0.25*(a-x[i])**2)-x[i]*np.log(x[i])+g-h*x[i]); # Mean camber y coordinate
# 5-digits
elif m.floor(naca/(1e4)):
p=(m.floor(naca/1000)%10)/20; # Location of maximum camber (2nd digit)
rn=(m.floor(naca/100)%10); # Type of camber (3rd digit)
if rn==0:
#----------------------- STANDARD CAMBER ------------------------------
#----------------------- CONSTANTS --------------------------------
r=3.33333333333212*p**3+0.700000000000909*p**2+1.19666666666638*p-0.00399999999996247; # R constant calculation by interpolation
k1=1514933.33335235*p**4-1087744.00001147*p**3+286455.266669048*p**2-32968.4700001967*p+1420.18500000524; # K1 constant calculation by interpolation
#----------------------- CAMBER -----------------------------------
for i in range(len(x)):
if x[i]<r:
Cam[i]=k1/6*(x[i]**3-3*r*x[i]**2+r**2*(3-r)*x[i]); # Mean camber y coordinate
else:
Cam[i]=k1*r**3/6*(1-x[i]); # Mean camber y coordinate
elif rn==1:
#----------------------- REFLEXED CAMBER ------------------------------
#----------------------- CONSTANTS --------------------------------
r=10.6666666666861*p**3-2.00000000001601*p**2+1.73333333333684*p-0.0340000000002413; # R constant calculation by interponation
k1=-27973.3333333385*p**3+17972.8000000027*p**2-3888.40666666711*p+289.076000000022; # K1 constant calculation by interpolation
k2_k1=85.5279999999984*p**3-34.9828000000004*p**2+4.80324000000028*p-0.21526000000003; # K1/K2 constant calculation by interpolation
#----------------------- CAMBER -----------------------------------
for i in range(len(x)):
if x[i]<r:
Cam[i]=k1/6*((x[i]-r)**3-k2_k1*(1-r)**3*x[i]-r**3*x[i]+r**3); # Mean camber y coordinate
else:
Cam[i]=k1/6*(k2_k1*(x[i]-r)**3-k2_k1*(1-r)**3*x[i]-r**3*x[i]+r**3); # Mean camber y coordinate
# 4-digits
else:
maxt=m.floor(naca/1e3)/100; # Maximum camber (1st digit)
p=(m.floor(naca/100)%10)/10;
#----------------------- CAMBER ---------------------------------------
for i in range(len(x)):
if x[i]<p:
Cam[i]=maxt*x[i]/p**2*(2*p-x[i]); # Mean camber y coordinate
else:
Cam[i]=maxt*(1-x[i])/(1-p)**2*(1+x[i]-2*p); # Mean camber y coordinate
except ValueError:
Cam = getCamFromDataFile(naca,x);
return Cam;
def getCamFromDataFile(filePath,x):
""" Function that loads the 2D polar data from the path"""
section = u.justLoad(filePath,0);
if (section[0,0] !=1. or section[0,1] !=0.):
section[0,0] = 1.;
section[0,1] = 0.;
if (section[-1,0] !=1 or section[-1,1] !=0):
section[-1,0] = 1.;
section[-1,1] = 0.;
n = np.where(np.logical_and(section[:,0] ==0,section[:,1] == 0.));
if not(np.any(n)):
n = m.floor(np.size(section,axis=0)/2);
section = np.concatenate([section[:n,:],[np.array([0.,0.])],section[n:,:]],0);
else:
n = n[0][0];
inf = np.interp(x,np.flipud(section[:n+1,0]),np.flipud(section[0:n+1,1]));
sup = np.interp(x,section[n:,0],section[n:,1]);
Cam = (inf+sup)*0.5
Cam[0] = 0;
return Cam;
def getCamF(xF):
section = u.justLoad('./PolarFiles/flaps.dat',0);
Cam = np.interp(xF,section[:,0],section[:,1]);
return Cam
def setTable(table,dim2,pan,val):
i0 = pan*dim2;
for i in range(len(val)):
table[i0+i] = val[i];
def getVal(table,dim2,pan):
i0 = pan*dim2;
return table[i0:i0+dim2];
def vortxlV(x,y,z,x1,y1,z1,x2,y2,z2):
""" Computing of the unit influence of the vortex on the colocation point
Initially Copyright (C) 2004 Mihai Pruna, Alberto Davila
Modified by Quentin Borlon (5 mai 2017)
Same as proposed by Mondher Yahyaoui
( International Journal of Mechanical, Aerospace, Industrial,
Mechatronic and Manufacturing Engineering Vol:8, No:10, 2014 ).
Exception : the influence of the vortex that goes to infinity."""
nbPoint = len(x1);
rcutSq=1e-8;
rcut = 1e-8;
r1r2x = (y-y1)*(z-z2)-(z-z1)*(y-y2);
r1r2y = -((x-x1)*(z-z2)-(z-z1)*(x-x2));
r1r2z = (x-x1)*(y-y2)-(y-y1)*(x-x2);
square = r1r2x*r1r2x+r1r2y*r1r2y+r1r2z*r1r2z;
r1 = np.sqrt((x-x1)*(x-x1) + (y-y1)*(y-y1) + (z-z1)*(z-z1));
r2 = np.sqrt((x-x2)*(x-x2) + (y-y2)*(y-y2) + (z-z2)*(z-z2));
indice = np.array([not ((r1[i]<rcut) or (r2[i]<rcut) or (square[i]<rcutSq) ) for i in range(nbPoint)],dtype = bool);
ror1 = np.zeros(len(r1));
ror2 = np.zeros(len(r1));
ror1[indice] = (x2[indice]-x1[indice])*(x-x1[indice])+(y2[indice]-y1[indice])*(y-y1[indice])+(z2[indice]-z1[indice])*(z-z1[indice]);
ror2[indice] = (x2[indice]-x1[indice])*(x-x2[indice])+(y2[indice]-y1[indice])*(y-y2[indice])+(z2[indice]-z1[indice])*(z-z2[indice]);
coeff = np.zeros(len(r1));
coeff[indice] = 0.25/(m.pi*square[indice])*(ror1[indice]/r1[indice]-ror2[indice]/r2[indice]);
a = np.array([np.dot(r1r2x,coeff),np.dot(r1r2y,coeff),np.dot(r1r2z,coeff)]);
coeff[0] = 0.;
vi = np.array([np.dot(r1r2x,coeff),np.dot(r1r2y,coeff),np.dot(r1r2z,coeff)]);
return a,vi;
def ICMatrixV(vtail,cla,flow):
""" Prediction of aerodynamic characteristics of the vertical tail.
Assumed to be independant of the flow on the lifting surfaces to avoid
too strong coupling with vortex of the horizontal tail. If interactions
with HTP must be neglected to avoid infinite values, not necessary to
compute interaction because of the little influence of the wing on it.
Allows to have smaller matrix and reduces a lot the cpu costs.
Autor : Quentin borlon
Date : 28 october 2017
Function that predicts the aerodynamic coefficients for a given vtail.
Based on the vtail geometry and the sectional 2D aerodynamic datas.
INPUT:
clAlpha : vertical array with clAlphas(i) is the lift curve slope of the
panel from wing.y(i) to wing.y(i+1);
vtail : a structral object with as fields:
b : span
chord : vertical array with the chord at the root (1) any
discontinuity of taper ratio (2:end-1) and at the tip (end)
airfoil : a cell-array with each cell gives the airfoil naca number
representation, cell 1 correspond to first panel after root.
sweep : vertical array with wing.sweep(i) is the sweep angle of
the panel from wing.y(i) to wing.y(i+1) (rad)
deltasFlaps : vertical array with wing.deltasFlaps(i) is the
flaps defection of the panel from wing.y(i) to wing.y(i+1)
(deg)
r : number of spanwise panel along the vtail;
cFlaps_cLoc : vertical array with wing.cFlaps_cLocs(i) is the
local flaps to chord ratio
z : the spanwise location of the limits of the panels
discY : vertical array of the complete set of the spanwise location
airfoilIndex : vertical array with wing.airfoilIndex(i) is the index of
the airfoil (wing.airfoil) to use for the section at wing.y(i)
chordDistrib : vertical array with wing.chordDistrib(i) is the chord length of
the section at wing.y(i)
OUTPUT:
A : the influence coefficient matrix [n x n] such that A*{GAMMA/2} + {Q}*{normal} = 0
normal : a [3 x (wing.getR()/2+1)] matrix that provides the normal downward
of the panel."""
# Recover the numerical parameters
n = vtail.getR(); # spanwise discretisation number of panel
mC = vtail.mC; # chordwise discretisation number of checkpoint for the wake
mW = flow.mW;
beta = -flow.beta * m.pi/180;
aoa = flow.at;
# Recover the vtail parameters
c = vtail.getChordDist();
cf = vtail.getCF();
x = vtail.getX();
z = vtail.getZ();
# Rudder, Assumed to be as plain flaps
cf = vtail.getCF();
if cf != 0:
xT = np.unique(np.concatenate([(1.-cf)*0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.),[0.25]]));
mC = len(xT);
else:
xT = np.unique(np.concatenate([0.5*(np.cos(np.linspace(m.pi,0.,mC))+1.),[0.25]]));
mC = len(xT);
yT = np.zeros([mC,len(vtail.getAF())],dtype = float);
for ii in range(len(vtail.getAF())):
yT[:,ii-1]= camber(vtail.getAF(ii),xT);
X = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
Y = np.zeros(n * (2 * (mC + mW)+1),dtype = float); # initialization
Z = np.zeros(n * (2 * (mC + mW)+1),dtype = float);
COLOCX=np.zeros((mC-1)*n);
COLOCY=np.zeros((mC-1)*n);
COLOCZ=np.zeros((mC-1)*n);
normal = np.zeros([3,(mC-1)*n]);
coef = 0.25+cla*0.25/m.pi;
ds = np.zeros((mC-1)*n); # vector of area of any panel
dS = np.zeros(n); # vector of area of a spanwise section
xvl = np.zeros(mC + mW,dtype = float);
yvl = np.zeros(mC + mW,dtype = float);
zvl = np.zeros(mC + mW,dtype = float);
xvt = np.zeros(mC + mW,dtype = float);
yvt = np.zeros(mC + mW,dtype = float);
zvt = np.zeros(mC + mW,dtype = float);
dydx = np.zeros(mW-1,dtype = float);
dzdx = np.zeros(mW-1,dtype = float);
for i in range(n):
camb = yT[:,vtail.getAFI(i)]
il = i;
cl = c[il];
xl = (xT - 0.25) * cl + x[il];
yl = camb * cl;
zl = z[il] * np.ones(mC);
if vtail.getDF(i) != 0.:
delta = vtail.getDF(i);
RotF = u.rotz(delta);
center = np.array([xl[-2],yl[-2],zl[-2]]);
point = np.array([xl[-1],yl[-1],zl[-1]])-center;
point = np.dot(RotF,point) + center;
xl[-1] = point[0];
yl[-1] = point[1];
zl[-1] = point[2];
xvl[:mC-1] = 0.75 * xl[:-1] + 0.25 * xl[1:];
yvl[:mC-1] = 0.75 * yl[:-1] + 0.25 * yl[1:];
zvl[:mC-1] = 0.75 * zl[:-1] + 0.25 * zl[1:];
xvl[mC-1] = xvl[mC-2] + (xl[-1]-xl[-2]);
yvl[mC-1] = yvl[mC-2] + (yl[-1]-yl[-2]);
zvl[mC-1] = zvl[mC-2] + (zl[-1]-zl[-2]);
# End of chord vortex = begining of wake vortex
xvl[mC:-1] = xvl[mC-1] + 2.5 * cl * (1.+np.array(range(mW-1),dtype = float))/mW;
xvl[-1] = 50. * vtail.b;
dydxl = (yl[mC-1]-yl[mC-2])/(xl[mC-1]-xl[mC-2]);
dydx = dydxl * np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC]))\
+ m.tan(beta) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
dzdx = m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvl[mC:-1] - xvl[mC]))/(xvl[-2] - xvl[mC])));
for ii in range(mW-1):
zvl[mC+ii] = zvl[mC+(ii-1)] + dzdx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
yvl[mC+ii] = yvl[mC+(ii-1)] + dydx[ii] * (xvl[mC+ii] - xvl[mC+(ii-1)]);
zvl[-1] = zvl[-2] + m.tan(aoa) * (xvl[-1] - xvl[-2]);
yvl[-1] = yvl[-2] + m.tan(beta) * (xvl[-1] - xvl[-2]);
it = i+1;
ct = c[it];
xt = (xT - 0.25) * ct + x[it];
yt = camb * ct;
zt = z[it] * np.ones(mC);
if vtail.getDF(i) != 0.:
delta = vtail.getDF(i);
RotF = u.rotz(-delta);
center = np.array([xt[-2],yt[-2],zt[-2]]);
point = np.array([xt[-1],yt[-1],zt[-1]])-center;
point = np.dot(RotF,point) + center;
xt[-1] = point[0];
yt[-1] = point[1];
zt[-1] = point[2];
xvt[:mC-1] = 0.75 * xt[:-1] + 0.25 * xt[1:];
yvt[:mC-1] = 0.75 * yt[:-1] + 0.25 * yt[1:];
zvt[:mC-1] = 0.75 * zt[:-1] + 0.25 * zt[1:];
xvt[mC-1] = xvt[mC-2] + (xt[-1]-xt[-2]);
yvt[mC-1] = yvt[mC-2] + (yt[-1]-yt[-2]);
zvt[mC-1] = zvt[mC-2] + (zt[-1]-zt[-2]);
# End of chord vortex = begining of wake vortex
xvt[mC:-1] = xvt[mC-1] + 2.5 * ct * (1.+np.array(range(mW-1),dtype = float))/mW;
xvt[-1] = 50. * vtail.b;
dydxt = (yt[mC-1]-yt[mC-2])/(xt[mC-1]-xt[mC-2]);
dydx = dydxt * np.exp(-3.*(np.array(xvt[mC:-1] - xvt[mC]))/(xvt[-2] - xvt[mC]))\
+ m.tan(beta) * (1.-np.exp(-3.*(np.array(xvt[mC:-1] - xvl[mC]))/(xvt[-2] - xvt[mC])));
dzdx = m.tan(aoa) * (1.-np.exp(-3.*(np.array(xvt[mC:-1] - xvl[mC]))/(xvt[-2] - xvt[mC])));
for ii in range(mW-1):
zvt[mC+ii] = zvt[mC+(ii-1)] + dzdx[ii] * (xvt[mC+ii] - xvt[mC+(ii-1)]);
yvt[mC+ii] = yvt[mC+(ii-1)] + dydx[ii] * (xvt[mC+ii] - xvt[mC+(ii-1)]);
zvt[-1] = zvt[-2] + m.tan(aoa) * (xvt[-1] - xvt[-2]);
yvt[-1] = yvt[-2] + m.tan(beta) * (xvt[-1] - xvt[-2]);
setTable(X,2*(mC+mW)+1,i,np.concatenate([[xvl[0]],xvt,xvl[::-1]]));
setTable(Y,2*(mC+mW)+1,i,np.concatenate([[yvl[0]],yvt,yvl[::-1]]));
setTable(Z,2*(mC+mW)+1,i,np.concatenate([[zvl[0]],zvt,zvl[::-1]]));
for j in range(mC-1):
val = [xvl[j],xvt[j], 0.5* (xl[j+1] + xt[j+1]),0.5* (xl[j] + xt[j])];
COLOCX[i * (mC-1) + j] = val[3] * (1.-coef[i]) + val[2] * coef[i];
cpx1 = val[1] - val[0];
cpx2 = val[3] - val[2];
val = [yvl[j],yvt[j], 0.5* (yl[j+1] + yt[j+1]),0.5* (yl[j] + yt[j])];
COLOCY[i * (mC-1) + j] = val[3] * (1.-coef[i]) + val[2] * coef[i];
cpy1 = val[1] - val[0];
cpy2 = val[3] - val[2];
val = [zvl[j],zvt[j], 0.5* (zl[j+1] + zt[j+1]),0.5* (zl[j] + zt[j])];
COLOCZ[i * (mC-1) + j] = val[3] * (1.-coef[i]) + val[2] * coef[i];
cpz1 = val[1] - val[0];
cpz2 = val[3] - val[2];
cp= np.cross(np.array([cpx1,cpy1,cpz1]),np.array([cpx2,cpy2,cpz2]));
cpmag= m.sqrt(cp[1]*cp[1]+cp[2]*cp[2]+cp[0]*cp[0]);
ds[i * (mC-1) + j] = cpmag;
normal[:, i * (mC-1) + j] = cp/cpmag;
dS[i] = sum(ds[i * (mC-1):(i+1) * (mC-1)]);
select = np.zeros([vtail.r,n * (mC-1)]); # rechercher intensité du dernier vortex uniquement
select2 = np.zeros([n * (mC-1),vtail.r]); # pour chaque paneau sur même section y, même velocity triangle
select3 = np.zeros([vtail.r,n * (mC-1)]); #
for i in range(vtail.r):
select[i,(mC-2) + (mC-1)*i] = 1.;
select2[(mC-1)*i:(mC-1)*(i+1),i] = 1.;
select3[i,(mC-1)*i:(mC-1)*(i+1)] = ds[(mC-1)*i:(mC-1)*(i+1)]/dS[i];
##
Ao,Vxo,Vyo,Vzo = ICM_V(X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,n,mC,mW);
A = np.linalg.inv(Ao);
Vx = np.dot(select3,Vxo);
Vy = np.dot(select3,Vyo);
Vz = np.dot(select3,Vzo);
return A,normal,Vx,Vy,Vz,select,select2;
def ICM_V(X,Y,Z,COLOCX,COLOCY,COLOCZ,normal,n,mC,mW):
A = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vx = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vy = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
Vz = np.zeros([n*(mC-1),n*(mC-1)],dtype = float);
for b in range(n * (mC - 1)):
j = 0;
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl_NL(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
for j in range(1,n):
pathX = getVal(X,2*(mW+mC)+1,j);
pathY = getVal(Y,2*(mW+mC)+1,j);
pathZ = getVal(Z,2*(mW+mC)+1,j);
a,vix,viy,viz = vortxl(COLOCX[b],COLOCY[b],COLOCZ[b],normal[:,b],pathX,pathY,pathZ,mC,mW);
A[b,j*(mC-1) : (j+1) *(mC-1)] = a;
Vx[b,j*(mC-1) : (j+1) *(mC-1)] = vix;
Vy[b,j*(mC-1) : (j+1) *(mC-1)] = viy;
Vz[b,j*(mC-1) : (j+1) *(mC-1)] = viz;
return A,Vx,Vy,Vz;
|
[
"Quentin@QuentinBorlon.local"
] |
Quentin@QuentinBorlon.local
|
4211b1fa093def536158860e2735e6301f133337
|
9abdf71e6fd5e97a317356ce3ce60cca940af8fd
|
/Problems/Piggy bank/task.py
|
0b7075060f3858c6f6853a3f70174adf713fc422
|
[] |
no_license
|
Omkar-M/Coffee-Machine
|
4184ac289c1bf36e855bb21c4115dbce4e492b6f
|
a21a0cb43a0ab52669619af88e1e82050029c8b5
|
refs/heads/master
| 2022-11-25T14:32:47.155087
| 2020-08-01T20:51:54
| 2020-08-01T20:51:54
| 274,185,317
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 562
|
py
|
class PiggyBank:
# create __init__ and add_money methods
def __init__(self, dollars, cents):
self.dollars = dollars
self.cents = cents
def add_money(self, deposit_dollars, deposit_cents):
self.dollars += deposit_dollars
self.cents += deposit_cents
if self.cents > 99:
quotient = self.cents / 100
remainder = self.cents % 100
self.dollars += int(quotient)
self.cents = remainder
# bank = PiggyBank(1,1)
# bank.add_money(0,99)
# print(bank.dollars, bank.cents)
|
[
"62263578+Omkar-M@users.noreply.github.com"
] |
62263578+Omkar-M@users.noreply.github.com
|
c258d2b4a615c8ebf5ba30e66090c50c26b84a98
|
523af11028940092341a676270614e0becb53001
|
/LongTerm.py
|
de939fa73ab92c84d68a6c77686504d656cde818
|
[] |
no_license
|
Christy42/FUMBBLStats
|
c54090c53f4f11e472661c548f9bbaaa5eda78c4
|
aa4a6c3c16300e1e352311dbabf58195195bb5a5
|
refs/heads/master
| 2020-03-17T03:54:42.564806
| 2019-11-02T14:52:21
| 2019-11-02T14:52:21
| 133,255,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,001
|
py
|
import os
import requests
import yaml
import xml.etree.ElementTree as Et
from DataGrab import team_name
def get_matches(player_folder, team_folder, rerun_folder=None):
b = requests.get("https://fumbbl.com/xml:group?id=3449&op=matches")
text = Et.fromstring(b.text)
performances, team_games = matches_in_division(text, rerun_folder)
add_player_attribs(player_folder, performances)
add_team_performances(performances, team_folder, team_games)
next_page = text.find("nextPage").text
count = 0
while next_page:
print(next_page)
print(count)
b = requests.get("https://fumbbl.com/xml:group?id=3449&op=matches&paging={}".format(next_page))
text = Et.fromstring(b.text)
performances, team_games = matches_in_division(text, rerun_folder)
add_player_attribs(player_folder, performances)
add_team_performances(performances, team_folder, team_games)
try:
next_page = text.find("nextPage").text
except AttributeError:
next_page = None
count += 1
def matches_in_division(root_text, rerun_folder=None):
matches = root_text.find("matches")
teams_found = {}
performances = []
already_run = []
if rerun_folder:
with open(rerun_folder, "r") as rerun:
already_run = yaml.safe_load(rerun)
for match in matches:
if match.attrib["id"] in already_run:
# print("Not grabbing {} as it is already done or too late a round".format(match.attrib["id"]))
continue
already_run.append(match.attrib["id"])
for element in ["home", "away"]:
section = match.find(element)
team_id = section.attrib["id"]
if team_id not in teams_found:
teams_found[team_id] = {"id": team_name(team_id), "games": 0}
teams_found[team_id]["games"] += 1
name = teams_found[team_id]
team_perf = section.find("performances")
for child in team_perf:
individual = child.attrib
individual.update({"team": name, "team id": team_id})
performances.append(individual)
if rerun_folder:
with open(rerun_folder, "w") as file:
yaml.safe_dump(already_run, file)
return performances, teams_found
def add_player_attribs(player_folder, performances):
players = open_files(player_folder, "Player")
for element in performances:
ident = element["player"]
if ident not in players:
name, star, skills, position = get_name(ident)
ident = name if star else ident
players[ident] = {"team": "Star Player" if star else element["team"]["id"],
"name": name, "position name": position, "skills": skills, "team id": element["team id"]}\
if ident not in players else players[ident]
print(ident)
for stat in element:
if stat not in ["player", "team", "team id"]:
try:
players[ident][stat] = int(players[ident].get(stat, 0)) + int(element.get(stat, 0))
except ValueError:
players[ident][stat] = int(players[ident].get(stat, 0))
players[ident]["games"] = int(players[ident].get("games", 0)) + 1
print("YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY")
write_file(players, player_folder, "Player")
def open_files(folder, base):
dictionary = {}
for filename in os.listdir(folder):
if base in filename:
with open(folder + "//" + filename, "r") as file:
dictionary.update(yaml.safe_load(file))
return dictionary
def write_file(dictionary, folder, base):
print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
count = 1
print_dict = []
file_number = 0
print(dictionary)
for element in dictionary:
if count % 500 == 0:
file_number += 1
try:
print_dict[file_number].update({element: dictionary[element]})
except IndexError:
print_dict.append({})
count += 1
print("VVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV")
print(file_number)
for i in range(file_number + 1):
# print("printing")
with open(folder + "//" + base + str(i) + ".yaml", "w+") as file:
yaml.safe_dump(print_dict[i], file)
def add_team_performances(performances, team_file, team_games):
teams_accessed = []
with open(team_file, "r") as t_file:
teams = yaml.safe_load(t_file)
for player in performances:
if player["team id"] not in teams:
teams[player["team id"]] = {"name": player["team"]["id"]}
teams_accessed.append(player["team id"])
for stat in player:
if stat not in ["name", "team", "team id", "player"]:
try:
teams[player["team id"]][stat] = int(teams[player["team id"]].get(stat, 0)) + int(player[stat])
except ValueError:
teams[player["team id"]][stat] = int(teams[player["team id"]].get(stat, 0))
for team in set(teams_accessed):
teams[team]["games"] = teams[team].get("games", 0) + team_games[team]["games"]
with open(team_file, "w") as file:
yaml.safe_dump(teams, file)
def get_name(player_id):
print("https://fumbbl.com/api/player/get/" + str(player_id) + "/xml")
player_details = requests.get("https://fumbbl.com/api/player/get/" + str(player_id) + "/xml").text
root = Et.fromstring(player_details)
star = True if int(root.find("number").text) >= 90 else False
pos = root.find("position")
position = pos.find("name").text
base_skills = []
section = root.find("skills")
for child in section:
base_skills.append(child.text)
return root.find("name").text, star, base_skills, position
get_matches("LongTerm", "LongTerm//Team.yaml", "LongTerm//run_file.yaml")
|
[
"mark.christiansen@nuim.ie"
] |
mark.christiansen@nuim.ie
|
266e73c87f7eebadf9016230fac89ac05d834981
|
eac611ff1a3910aae25e06549e965b2743dd5b93
|
/Math/Probability.py
|
45fe3b4ab45b20abac6a8daab736f42be5b611e2
|
[] |
no_license
|
jizhi/jizhipy
|
30cb7032fb9ad7ee8e11498d468d2b125ac8cb42
|
b49777105a76b5ae03555a9f93f116454c8245a9
|
refs/heads/master
| 2020-06-05T14:51:51.165710
| 2019-06-18T15:00:49
| 2019-06-18T15:00:49
| 192,464,118
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,678
|
py
|
class Probability( object ) :
def Bins( self, array, nbins, weight=None, wmax2a=None, nsigma=None ) :
'''
nbins:
(1) ==list/ndarray with .size==3
** nbins, bmin, bmax = bins
nbins: number of bins
bmin, bmax: min and max of bins, NOT use the whole bin
(2) ==int_number:
** Then use weight and wmax2a
Give the total number of the bins, in this case, x.size=bins+1, xc.size=bins
nsigma:
float | None
When generate the bins, won't use the whole range of array, set nsigma, will use |array| <= nsigma*array.std()
weight:
** Use this only when bins==int_number
'G?', 'K?' | None | ndarray with size=bins
(1) ==None: each bin has the same weight => uniform bins
(2) ==ndarray: give weights to each bins
(3) =='G?':
'?' should be an value, for example, 'G1', 'G2.3', 'G5.4', 'G12', use Gaussian weight, and obtain it from np.linspace(-?, +?, bins)
=='K?':
'?' should be an value, for example, 'K1', 'K2.3', 'K5.4', 'K12', use modified Bessel functions of the second kind, and obtain it from np.linspace(-?, +?, bins)
wmax2a:
Use it when weight is not None
float | None
(1) ==float: weight.max() corresponds to which bin, the bin which value wmax2a is in
'''
import numpy as np
from jizhipy.Basic import IsType
from jizhipy.Array import Invalid, Asarray
from jizhipy.Math import Gaussian
#---------------------------------------------
array = Asarray(array)
if (nsigma is not None) :
mean, sigma = array.mean(), array.std()
array = array[(mean-nsigma*sigma<=array)*(array<=mean+nsigma*sigma)]
amin, amax = array.min(), array.max()
#---------------------------------------------
if (Asarray(nbins).size==3) : nbins, bmin, bmax = nbins
else : bmin, bmax = amin, amax
#---------------------------------------------
# First uniform bins
bins = np.linspace(bmin, bmax, nbins+1)
bstep = bins[1] - bins[0]
#---------------------------------------------
# weight
if (weight is not None) :
if (IsType.isstr(weight)) :
w, v = str(weight[0]).lower(), abs(float(weight[1:]))
if (v == 0) : v = 1
x = np.linspace(-v, v, nbins)
if (w == 'k') :
import scipy.special as spsp
weight = spsp.k0(abs(x))
weight = Invalid(weight)
weight.data[weight.mask] = 2*weight.max()
else : # Gaussian
weight =Gaussian.GaussianValue1(x, 0, 0.4)
#--------------------
# wmax2a
if (wmax2a is not None) :
nmax = int(round(np.where(weight==weight.max())[0].mean()))
nb = abs(bins - wmax2a)
nb = np.where(nb==nb.min())[0][0]
for i in range(bins.size-1) :
if (bins[i] <= wmax2a < bins[i+1] ) :
nb = i
break
d = abs(nmax - nb)
if (nmax < nb) : weight = np.append(weight[-d:], weight[:-d])
elif (nmax > nb) : weight = np.append(weight[d:], weight[:d])
#--------------------
weight = weight[:nbins]
if (weight.size < nbins) : weight = np.concatenate([weight]+(nbins-weight.size)*[weight[-1:]])
weight = weight.max() - weight + weight.min()
weight /= weight.sum()
weight = weight.cumsum()
#--------------------
c = bins[0] + (bmax-bmin) * weight
bins[1:-1] = c[:-1]
#--------------------
bins = list(bins)
n = 1
while(n < len(bins)) :
if (bins[n] - bins[n-1] < bstep/20.) :
bins = bins[:n] + bins[n+1:]
else : n += 1
bins = Asarray(bins)
#---------------------------------------------
return bins
def ProbabilityDensity( self, randomvariable, bins, weight=None, wmax2a=None, nsigma=6, density=True ) :
'''
Return the probability density or number counting of array.
Return:
[xe, xc, y]
xe is the edge of the bins.
xc is the center of the bins.
y is the probability density of each bin,
randomvariable==array:
Input array must be flatten()
bins:
(1) ==list/ndarray with .size>3:
** Then ignore brange, weight, wmax2a
use this as the edge of the bins
total number of the bins is bins.size-1 (x.size=bins.size, xc.size=bins.size-1)
(2) ==list/ndarray with .size==3
** nbins, bmin, bmax = bins
nbins: number of bins
bmin, bmax: min and max of bins, NOT use the whole bin
(3) ==int_number:
** Then use weight and wmax2a
Give the total number of the bins, in this case, x.size=bins+1, xc.size=bins
weight:
** Use this only when bins==int_number
'G', 'K0' | None | ndarray with size=bins
(1) ==None: each bin has the same weight => uniform bins
(2) ==ndarray: give weights to each bins
(3) =='G': use Gaussian weight
=='K0': use modified Bessel functions of the second kind
wmax2a:
** Use this only when bins==int_number and weight is not None
float | None
(1) ==None: means weight[0]=>bins[0], weight[1]=>bins[1], weight[i]=>bins[i]
(2) ==float:
uniform bin b = np.linspace(array.min(), array.max(), bins+1)
value wmax2a is in nb-th bin: b[nb] <= wmax2a <= b[nb+1]
weight.max() => weight[nmax]
!!! Give weight[nmax] to the bin b[nb] (then reorder the weight array)
nsigma:
float | None (use all data)
When generate the bins, won't use the whole range of array, set nsigma, will throw away the points beyond the mean
density:
If True, return the probability density = counting / total number / bin width
If False, return the counting number of each bin
Return:
[xe, xc, y]
xe is the edge of the bins.
xc is the center of the bins.
y is the probability density of each bin,
'''
import numpy as np
from jizhipy.Process import Edge2Center
from jizhipy.Array import Asarray
#---------------------------------------------
# nsigma
# Throw away the points beyond the mean
try : nsigma = float(nsigma)
except : nsigma = None
array = Asarray(randomvariable).flatten()
sigma, mean = array.std(), array.mean()
if (nsigma is not None) : array = array[(mean-nsigma*sigma<=array)*(array<=mean+nsigma*sigma)]
amin, amax = array.min(), array.max()
#---------------------------------------------
if (Asarray(bins).size <= 3) :
bins =self.Bins(array, bins, weight, wmax2a, None)
bins = Asarray(bins)
#---------------------------------------------
bins = bins[bins>=amin]
bins = bins[bins<=amax]
tf0, tf1 = False, False
if (abs(amin-bins[0]) > 1e-6) :
bins = np.append([amin], bins)
tf0 = True
if (abs(amax-bins[-1])> 1e-6) :
bins = np.append(bins, [amax])
tf1 = True
#---------------------------------------------
y, bins=np.histogram(array, bins=bins,density=density)
if (tf0) : y, bins = y[1:], bins[1:]
if (tf1) : y, bins = y[:-1], bins[:-1]
x = Edge2Center(bins)
return [bins, x, y]
def RandomVariable( self, shape, x, pdf, norm=True) :
'''
Invert operation of ProbabilityDensity()
Provide probability density, return random variable
shape:
The shape of generated random variable
pdf==fx, norm:
fx:
isfunc | isndarray
(1) isfunc: fx = def f(x), f(x) is the probability density function
(2) isndarray: fx.size = x.size
norm:
True | False
fx must be
1. fx >= 0
2. \int_{-\inf}^{+\inf} fx dx = 1
Only if norm=False, not normal it, otherwise, always normal it.
x:
isndarray, must be 1D
Use fx and x to obtain the inverse function of the cumulative distribution function, x = F^{-1}(y)
return:
1D ndarray with shape, random variable
'''
import numpy as np
from jizhipy.Array import Asarray
from jizhipy.Basic import IsType, Raise
from jizhipy.Optimize import Interp1d
#---------------------------------------------
x = Asarray(x).flatten()
if (not IsType.isfunc(fx)) :
fx = Asarray(fx).flatten()
if (x.size != fx.size) : Raise(Exception, 'fx.size='+str(fx.size)+' != x.size='+str(x.size))
else : fx = fx(x)
fx *= 57533.4
#---------------------------------------------
# sort x from small to large
x = np.sort(x + 1j*fx)
fx, x = x.imag, x.real
#---------------------------------------------
dx = x[1:] - x[:-1]
dx = np.append(dx, dx[-1:])
#---------------------------------------------
# Normal fx
if (norm is not False) :
fxmin = fx.min()
if (fxmin < 0) : fx -= fxmin
fx /= (fx.sum() * dx)
#---------------------------------------------
# Cumulative distribution function
fx = fx.cumsum() * dx
#---------------------------------------------
# Inverse function
F_1 = Interp1d(fx, x, None)
#---------------------------------------------
# Uniform random with shape
x = np.random.random(shape)
#---------------------------------------------
# Random variable with f(x)
b = F_1(x)
return b
Probability = Probability()
|
[
"huang2qizhi@qq.com"
] |
huang2qizhi@qq.com
|
d308359d2c35c01589212599a5fa4f4cdea19c8c
|
909928849a10b26b445d3f32ec157ca33f94b9e4
|
/models/__init__.py
|
36d0df9f7a23f6a471aeb68ced40035180c3441d
|
[] |
no_license
|
jorgeviz/rliable
|
1f634ab40ab2f933a50b6614465a6d88980957af
|
90086f93de9d3d4a7833247e131211b4bcfd61c8
|
refs/heads/master
| 2023-03-29T05:03:37.927586
| 2021-03-24T18:29:23
| 2021-03-24T18:29:23
| 307,858,823
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
from models.base_model import BaseModel
from models.dqn import DQN
from models.count_model import CountModel
models = {
"BaseModel": BaseModel,
"DQN": DQN,
"CountModel": CountModel
}
|
[
"javg44@hotmail.com"
] |
javg44@hotmail.com
|
f7c4f4f2a4201c2c82f769072380cedf2610cee7
|
15996cf938dd4c2e2aabed9b463ffe8cbf286d30
|
/decard/generate_targets.py
|
5dac7f1e3aa02a3f3d62a37c10f3a6b2d19081f1
|
[] |
no_license
|
jgolob/decard
|
c20f0b06020c1fb5020fb591745f3aa243715b65
|
30f4864177683adc88c741d3728d0bbecf156617
|
refs/heads/master
| 2021-07-16T22:46:13.532168
| 2020-10-13T17:36:36
| 2020-10-13T17:36:36
| 207,618,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,715
|
py
|
#!/usr/bin/env python
"""
This module generates specific targets for desired distribution, and a given set of reference sequences
INPUTS:
# distribution: In CSV format.
Genus, Fraction, STD, Species_n, Species_std, Species_slope, Species_intercept, Species_p
# Genera Directory:
A directory in which each genus has it's own subdirectory.
Within that directory is a fasta file for each species within that genus
Each fasta file should have one or more representitive sequences
eg:
/path/to/genera_dir/Streptococcus/Streptococcus pyogenes.fasta
# Count:
Number of targets per community
# Number of communities:
How many communities to generate, with the given count and distribution provided.
# Prefix (opt): Prefix to add to the start of the community name
# Suffix (opt): Suffix to add to the end of the community name
OUTPUTs (for use in generate_amplicons):
# targets_csv: A CSV file with the following columns to store the generated community targets.
community_name, source_file, species, sequence_id, weight
"""
import argparse
from Bio import SeqIO, pairwise2
from Bio.SeqRecord import SeqRecord
import re
import os
import csv
import numpy as np
import random
import uuid
header_re = re.compile('description=\"(?P<description>[^\"]+)\" organism=\"(?P<organism>[^\"]+)\" taxonomy=\"(?P<taxonomy>[^\"]+)\" ncbi_tax_id=\"(?P<ncbi_tax_id>[^\"]+)\"')
def read_distro_goal(distribution_fn):
"""
Given a filename, read in the distribution goal from a properly formatted CSV
Should be:
Genus, Fraction, STD, Species_n, Species_std, Species_slope, Species_intercept, Species_p
Outputs a list of dicts
"""
goal = []
with open(distribution_fn,'rU') as distro_f:
reader = csv.DictReader(distro_f)
for row in reader:
for k in row:
try:
row[k]=float(row[k])
except:
pass
goal.append(row)
distro_f.close()
return goal
def communities_generateGoals(num, distribution_goal, distribution_name="", prefix="", suffix="",offset=0):
communities = []
for i in xrange(offset,offset+num):
community = {
'num': i,
'name': prefix+"CM"+str(i)+suffix,
'distribution_name': distribution_name,
}
print "_____ ", i, " ________"
community_fract = []
# For each goal fraction and std, generate a random value based on the normal distribution for what proportion the sample should be for this community
for genus_goal in distribution_goal:
# Use the typical fractional abundance of this genus, plus the std of this mean to give us a random fraction (abundance)
fract = np.max([0.0,np.random.normal(loc = genus_goal['Fraction'], scale = genus_goal['STD'])])
if fract > 0.0:
# Then decide how many species to pull (richness)
# If the log regression is good enough, use it
if float(genus_goal['Species_p'] <= 0.05):
species = int(np.round(np.max([1, float(genus_goal['Species_slope']*np.log(fract)+float(genus_goal['Species_intercept']))])))
# If the log model isn't great, see if we have a std deviation to use with the mean number to get our next estimate.
elif int(genus_goal['Species_std']) > 0:
species = int(np.round(np.max([1, np.random.normal(loc=genus_goal['Species_n'], scale=genus_goal['Species_std'])])))
else: # Just use the mean number
species = np.max([int(genus_goal['Species_n']),1])
community_fract.append({
'genus': genus_goal['Genus'],
'fraction': fract,
'species_n': species})
# Get the total proportion to be able to normalize
total_fract = np.sum([cf['fraction'] for cf in community_fract])
# Figure out the goal number of sequences per genera, using the proportions (normalized) and the goal total number of sequenesc
genus_seqs = [{'genus': cf['genus'], 'fraction': cf['fraction']/total_fract, 'species_n': cf['species_n'] } for cf in community_fract ]
# Get rid of any that end up with a zero count
genus_seqs = [gs for gs in genus_seqs if gs['fraction'] > 0]
# Sort for prettiness
genus_seqs.sort(key=lambda g: -g['fraction'])
# Print for niceness to our user
for gs in genus_seqs:
print gs['genus'], gs['fraction'], gs['species_n']
community['goal']= genus_seqs
communities.append(community)
return communities
def calculate_species_count(nth_species, total_num_species , total_count):
"""
Goal: Use a log model to get a count for the nth species, given a total num of species, and overall count
nth_species: 1 to total_num (starts at 1)
total_num_species: total num of species for total_Count
total_count: total num for this genus
In log10 space, we want to scale down to 1 - 10. Log10(1) = 0, Log10(10) = 1. We can then take the difference of nth_species - n-1th species scaled to get the count
The net result at the end due to rounding may be off. That's fine.
"""
""" First scale to [1,10], and get where we are on this scaled value
Why? We want our values to vary from 1-10 linearly
s = mX + b.
we want s = 1 when X = 0
so b = 1
we want s = 10 when X = MAX
so solving for m
10 = m(MAX) + 1
9 = m(MAX)
m = 9 / MAX
"""
m = 9 / float(total_num_species)
b = 1.0
s = m * nth_species + b
s0 = m*(nth_species-1) + b
# Next log transform our S
s_l = np.log10(s)
s0_l = np.log10(s0)
return total_count*(s_l- s0_l)
def communities_pickSequences(communities, genera_dir):
# The list into which we will put our targeted sequences
sequences = []
for comm in communities:
for genus_goal in comm['goal']:
print "Loading sequences for ", genus_goal['genus']
if not os.path.isdir(genera_dir+"/"+genus_goal['genus']):
print "Missing "+genus_goal['genus']+"'s directory"
else:
# Get the filenames of each species fasta file in this genus
fasta_fns = os.listdir(genera_dir+"/"+genus_goal['genus'])
if len(fasta_fns) < 1:
print "No species available for ", genus_goal['genus']
else:
# Randomize the order of the filenames
random.shuffle(fasta_fns)
# Can't take more species than we have, so take the min of the two
species_n = min(len(fasta_fns), genus_goal['species_n'])
# Cut the list down to the wanted number of species
fasta_fns = fasta_fns[:species_n]
# Great, for each file representing a sequence.....
for n,fasta_fn in enumerate(fasta_fns):
# Figure out how many copies of this species we should have
species_count = calculate_species_count(n+1, species_n, genus_goal['fraction']*100)
# Grab the file.....
seqs = SeqIO.parse(genera_dir+'/'+genus_goal['genus']+'/'+fasta_fn,'fasta')
# Load all the strains / representatives into an array
species_srs = []
seqs = SeqIO.parse(genera_dir+'/'+genus_goal['genus']+'/'+fasta_fn,'fasta')
for sr in seqs:
# load the records
species_srs.append(sr)
# Pick a random sequence for this species
sr = random.choice(species_srs)
# Add it to our list
sequences.append({
'community_name': comm['name'],
'distribution_name': comm['distribution_name'],
'source_file': os.path.abspath(genera_dir+'/'+genus_goal['genus']+'/'+fasta_fn),
'species': fasta_fn.replace('.fasta',''),
'sequence_id': sr.id,
'weight': species_count/100,
})
return sequences
def main():
args_parser = argparse.ArgumentParser()
args_parser.add_argument('--genera_fasta', '-g', help='Directory where to find genera', required=True)
args_parser.add_argument('--distribution','-d', help='CSV file(s) with desired distribution(s), by genus', nargs='*', required=True)
args_parser.add_argument('--mock', '-m', help='Mock run. Do not modify the FASTA file and limit how many records we go after', action='store_true')
args_parser.add_argument('--number', '-n', help='How many communities to generate (per distribution)', nargs='*', default = [1])
args_parser.add_argument('--output','-o', help="Output file for targets for PCR step / generate amplicons", required=True)
args_parser.add_argument('--prefix','-p', help="Prefix to prepend to community names", default="")
args_parser.add_argument('--suffix','-s', help="Suffix to append to community names", default="")
args = args_parser.parse_args()
# First handle our distros
distribution_files = args.distribution
distribution_goals = [read_distro_goal(distro) for distro in distribution_files]
# Unpack and tidy up our num per distro
numbers = [int(num) for num in args.number]
if len(numbers) != len(distribution_goals):
if len(numbers) == 1:
numbers = numbers*len(distribution_goals)
else:
print "Please match number of distributions to number of communities per distribution"
return -1
# See if our genera dir exists (and could do some validation testing too if we wanted)
if not os.path.isdir(args.genera_fasta):
print "Directory for genus fasta files "+args.genera_fasta+" does not exist"
return -1
# Implicit else we're good to go
genera_dir = args.genera_fasta
# If we're not in mock mode, output to files
if not args.mock:
out_f = open(args.output,'w')
else: # We are mock, output to stdout
import sys
out_f = sys.stdout
# Set up writers.
target_writer = csv.DictWriter(out_f, ['community_name', 'distribution_name', 'source_file','species', 'sequence_id', 'weight'])
target_writer.writeheader()
communities = []
offset = 0
for i, (distribution_goal, number) in enumerate(zip(distribution_goals,numbers)):
distro_name = distribution_files[i]
communities = communities+communities_generateGoals(number, distribution_goal, distribution_name=distro_name, prefix=args.prefix, suffix=args.suffix, offset=offset)
offset+=number
sequences = communities_pickSequences(communities, genera_dir)
target_writer.writerows(sequences)
out_f.close()
if __name__ == "__main__":
main()
|
[
"j-dev@golob.org"
] |
j-dev@golob.org
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.