blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2bbed7eea919f0c708c7562990fc6ce521973e8d
|
a6b2dfa36483571382569a5649e42456e6bd6542
|
/Pygame4/Course_Project/Game/Scenes/HighScoreScene.py
|
c1454b62036ca3ea20c1b8758e2fb21d100842d6
|
[] |
no_license
|
LauRivero150920/PygameTutorial
|
0c89bda28ff4c30ae352fc6649fae514e8b74e4a
|
2d4bb4a0c683f252f5e4aaace35e36fe2eced5b5
|
refs/heads/main
| 2023-07-31T17:47:40.617202
| 2021-09-23T06:52:22
| 2021-09-23T06:52:22
| 352,852,639
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
import pygame
from Scenes.Scene import Scene
from Highscore import *
from Shared import *
class HighscoreScene(Scene):
def __init__(self, game):
super(HighscoreScene, self).__init__(game)
self.__highscoreSprite = pygame.image.load(GameConstants.SPRITE_HIGHSCORE)
def render(self):
self.getGame().screen.blit(self.__highscoreSprite, (10, 10))
self.clearText()
highscore = Highscore()
x = 350
y = 100
for score in highscore.getScores():
self.addText(score[0], x, y, size = 30)
self.addText(str(score[1]), x + 200, y, size = 30)
y += 30
self.addText("Press F1 to start a new game", x, y + 60, size = 30)
super(HighscoreScene, self).render()
def handleEvents(self, events):
super(HighscoreScene, self).handleEvents(events)
for event in events:
if event.type == pygame.QUIT:
exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F1:
self.getGame().reset()
self.getGame().changeScene(GameConstants.PLAYING_SCENE)
|
[
"A01274144@itesm.mx"
] |
A01274144@itesm.mx
|
9a3d0e73bd72f7ffb40f57fed3cbc23b22de6a32
|
9a70bb77c7cd6d9930b81d3023ee6f9738175eff
|
/publish.py
|
bbb9a13376701326f6fae026103c443dbdc6c496
|
[] |
no_license
|
bdemers/mqtt-pwsweather-publish
|
8ab583024ff2c212111b8d6094a72042c121ae5e
|
efca766b31344ed42aaef9a2a7ac36a86521acd1
|
refs/heads/master
| 2023-03-06T21:41:28.856479
| 2020-11-23T15:19:56
| 2020-11-23T15:19:56
| 315,348,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,646
|
py
|
#!/usr/bin/python
"""
publish.py
Simple MQTT subscriber of weather data then publishing it to the WeatherUnderground API.
Uploads the current temperature, humidity, wind speed and wind direction from a given Personal Weather Station
"""
# IMPORTS
import urllib.request as urllib2
import urllib.parse
import json
import paho.mqtt.client as paho
import os
import logging
import sys
import datetime
# Log to STDOUT
logger = logging.getLogger("mqtt-wunderground")
logger.setLevel(logging.INFO)
consoleHandler = logging.StreamHandler()
logger.addHandler(consoleHandler)
# Component config
config = {}
config['pws_id'] = ""
config['pws_pass'] = ""
sub_topics = {}
sub_topics['wind_dir_deg'] = "winddir"
sub_topics['wind_avg_mi_h'] = "windspeedmph"
sub_topics['humidity'] = "humidity"
sub_topics['temperature_F'] = "tempf"
sub_topics['time'] = "dateutc"
sub_topics['dewpoint'] = 'dewptf'
# Get MQTT servername/address
# Supports Docker environment variable format MQTT_URL = tcp://#.#.#.#:1883
MQTT_URL = os.environ.get('MQTT_URL')
if MQTT_URL is None:
logger.info("MQTT_URL is not set, using default localhost:1883")
config['broker_address'] = "localhost"
config['broker_port'] = 1883
else:
config['broker_address'] = MQTT_URL.split("//")[1].split(":")[0]
config['broker_port'] = 1883
# Get config topic
config['config_topic'] = os.environ.get('CONFIG_TOPIC')
if config['config_topic'] is None:
logger.info("CONFIG_TOPIC is not set, exiting")
raise sys.exit()
# Get Weather Underground PWS ID
config['pws_id'] = os.environ.get('CONFIG_PWS_ID')
if config['pws_id'] is None:
logger.info("CONFIG_PWS_ID is not set, exiting")
raise sys.exit()
# Get Weather Underground PWS KEY
config['pws_pass'] = os.environ.get('CONFIG_PWS_PASS')
if config['pws_pass'] is None:
logger.info("CONFIG_PWS_PASS is not set, exiting")
raise sys.exit()
# Create the callbacks for Mosquitto
def on_connect(client, userdata, flags, rc):
if rc == 0:
logger.info("Connected to broker " + str(config['broker_address'] + ":" + str(config['broker_port'])))
# Subscribe to device config
logger.info("Subscribing to device config at " + config['config_topic'])
client.subscribe(config['config_topic'])
def on_subscribe(mosq, obj, mid, granted_qos):
logger.info("Subscribed with message ID " + str(mid) + " and QOS " + str(granted_qos) + " acknowledged by broker")
def on_message(mosq, obj, msg):
payload_as_string = msg.payload.decode("utf-8")
logger.info("Received message: " + msg.topic + ": " + payload_as_string)
if msg.topic == config['config_topic']:
parsed_json = json.loads(payload_as_string)
# Calculate dew point
parsed_json['dewpoint'] = parsed_json['temperature_F'] - ((100.0 - parsed_json['humidity']) / 2.788 )
pws_url = "http://www.pwsweather.com/pwsupdate/pwsupdate.php?" + \
"&ID=" + urllib.parse.quote(config['pws_id']) + \
"&PASSWORD=" + urllib.parse.quote(config['pws_pass'])
for key in parsed_json:
# logger.info('item: ' + key + ' - ' + str(parsed_json[key]))
if key in sub_topics:
arg_name = sub_topics[key]
value = urllib.parse.quote(str(parsed_json[key])) # 2020-11-15T21:00:10
if "time" == key:
time = datetime.datetime.fromisoformat(parsed_json[key])
value = urllib.parse.quote_plus(time.strftime("%Y-%m-%d %H:%M:%S")) # YYYY-MM-DD HH:MM:SS
pws_url += ('&' + arg_name + '=' + value)
# logger.info('url: '+ pws_url)
try:
resonse = urllib2.urlopen(pws_url)
except urllib2.URLError as e:
logger.error('URLError: ' + str(pws_url) + ': ' + str(e.reason))
return None
except:
import traceback
logger.error('Exception: ' + traceback.format_exc())
return None
resonse.close()
def on_publish(mosq, obj, mid):
# logger.info("Published message with message ID: "+str(mid))
pass
# Create the Mosquitto client
mqttclient = paho.Client()
# Bind the Mosquitte events to our event handlers
mqttclient.on_connect = on_connect
mqttclient.on_subscribe = on_subscribe
mqttclient.on_message = on_message
mqttclient.on_publish = on_publish
# Connect to the Mosquitto broker
logger.info("Connecting to broker " + config['broker_address'] + ":" + str(config['broker_port']))
mqttclient.connect(config['broker_address'], config['broker_port'], 60)
# Start the Mosquitto loop in a non-blocking way (uses threading)
mqttclient.loop_forever()
|
[
"bdemers@apache.org"
] |
bdemers@apache.org
|
3e8a78d8393d93f4f672eb9e62d45a1c0501be31
|
ea5c8f4250dcd2a148c06f0b9bee04fbb030d088
|
/prova-servidor.py
|
f7e830192a4f20df2fb7105241a83270e275a37b
|
[] |
no_license
|
pnunis/programacao2018
|
f5c17e1e18311486fc2ae3a2aaa47ee32e6dddba
|
71275f20bc936e00a390198eda5516c56d7e1683
|
refs/heads/master
| 2021-04-27T21:27:52.710379
| 2018-05-02T23:19:33
| 2018-05-02T23:19:33
| 122,401,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 511
|
py
|
import socket
HOST = socket.gethostbyname('localhost')
PORT = 22222
tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_server_socket.bind((HOST,PORT))
tcp_server_socket.listen()
client, addr = tcp_server_socket.accept()
print('Conexão de:', addr)
while True:
data = client.recv(1024)
message = "Recebida"
byte_msg = message.encode('utf-8')
client.send(byte_msg)
if not data: break
print("\n Mensagem recebido:", data)
client.close()
tcp_server_socket.close()
|
[
"noreply@github.com"
] |
pnunis.noreply@github.com
|
c2086683255ba3a12682adfadd6fe6869bf47c6e
|
38b0dcf8500cea90cd5e691d0fdebcdaf0de3d7e
|
/alerts_test.py
|
495601c86febe35ebb1ed71616f5fcefad226822
|
[] |
no_license
|
Cloudxtreme/monitors
|
b96f20f039a9726cc75cf454a2363a3ff66edf55
|
13a0e0b7c4e3daffbbce4e2adda0266af03fb5af
|
refs/heads/master
| 2021-05-28T02:16:59.429366
| 2015-01-09T19:54:47
| 2015-01-09T19:58:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,741
|
py
|
#!/usr/bin/env python
import ConfigParser
import logging
import tempfile
import unittest
import alerts
class ConfigureLoggingTest(unittest.TestCase):
@staticmethod
def _config(**data):
config = ConfigParser.RawConfigParser()
config.add_section('logging')
for k, v in data.iteritems():
config.set('logging', k, v)
return config
def test_get_logging_handler(self):
def assertHandler(mode, expected_class):
with tempfile.NamedTemporaryFile() as f:
config = self._config(mode=mode, file=f.name,
syslog_addr='/dev/log')
self.assertTrue(
isinstance(
alerts._get_logging_handler(config), expected_class))
assertHandler('file', logging.FileHandler)
assertHandler('stderr', logging.StreamHandler)
# we can count on the alerts module importing logging.handlers
assertHandler('syslog', logging.handlers.SysLogHandler)
self.assertRaises(ValueError, assertHandler, 'asdf', None)
def test_get_logging_formatter(self):
f = alerts._get_logging_formatter(self._config(mode='syslog'))
self.assertFalse(isinstance(f, alerts.StreamLoggingFormatter))
f = alerts._get_logging_formatter(self._config(mode='not-syslog'))
self.assertTrue(isinstance(f, alerts.StreamLoggingFormatter))
def test_get_logging_level(self):
config = self._config()
self.assertEquals(logging.INFO, alerts._get_logging_level(config))
config = self._config(level='DEBUG')
self.assertEquals('DEBUG', alerts._get_logging_level(config))
if __name__ == '__main__':
unittest.main()
|
[
"logan@reddit.com"
] |
logan@reddit.com
|
573df7cfe64f1d9000784755f7df17cf47c9023d
|
7bfe63be710ac2541ced8692b27c52e988df65f0
|
/fruits_b2c/df_cart/models.py
|
cd93c91f65254e3d9f5e013ed9fdc4ccb16db93a
|
[] |
no_license
|
GitHubQinDong/django_b2c
|
abdd57a47ae7533478e47a3c384909d88794c0da
|
13219d0cf390d95d73298748f44d35bb9f560403
|
refs/heads/master
| 2021-01-25T14:04:55.566146
| 2017-07-15T09:06:29
| 2017-07-15T09:06:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
from django.db import models
# Create your models here.
#用户 与 商品的 联系 是多对多的
#购物车 充当中间 环节,与 用户 一对多,同时 与 商品一对多
class Cart(models.Model):
user = models.ForeignKey('df_user.User')
goods = models.ForeignKey('df_goods.GoodInfo')
count = models.IntegerField()
|
[
"915522927@qq.com"
] |
915522927@qq.com
|
dbeab7665bf6cbaf5f4ccb0cfc2703f38f23512e
|
3027d2d9bf91747c1fa2d4ff6202029e6fab6c80
|
/1359/c/c.py
|
c16120e3495a24ab54dac985fadd19260eba173d
|
[] |
no_license
|
brenopoggiali/rounds-cf
|
54d3e4f54fccc34cce9b30db46b869a550019ee5
|
fb62e602220402231ee15855d42579ae9ed12071
|
refs/heads/master
| 2022-11-15T07:46:56.933526
| 2020-07-13T18:01:24
| 2020-07-13T18:01:24
| 268,101,483
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
from sys import stdin
import fractions
input = stdin.readline
def i(): return input()
def ii(): return int(input())
def iis(): return map(int, input().split())
def liis(): return list(map(int, input().split()))
def print_array(a): print(" ".join(map(str, a)))
def eq(x):
return F((h*x + c*(x-1)), (x+x-1))
def bin_ser(lb, ub, lv, uv, target):
elm_mid = lb
while abs(lb-ub) > 1:
elm_mid = (lb+ub)//2
mid = eq(elm_mid)
if mid < target:
ub = elm_mid
uv = mid
elif mid > target:
lb = elm_mid
lv = mid
else:
break
return elm_mid
F = fractions.Fraction
t = ii()
for _ in range(t):
h, c, t = iis()
if t <= (h+c)/2:
print(2)
elif t >= h:
print(1)
else:
i = 1
while True:
i *= 2
calc = eq(i)
if calc < t:
mid = (bin_ser(1, i, h, calc, t))
ops = [[mid-1, eq(mid-1)], [mid, eq(mid)], [mid+1, eq(mid+1)]]
mini = float("inf")
resp = float("inf")
for i, j in ops:
if abs(t-j) < mini:
mini = abs(t-j)
resp = i
print(resp*2-1)
break
|
[
"brenopoggiali@gmail.com"
] |
brenopoggiali@gmail.com
|
8405aea4f75fe8129ff7801d16d85255dce935c7
|
85de8e8063d043de97c039b78a4bb963ea2e43c6
|
/daisychain/channel_dropbox/migrations/0001_initial.py
|
2b6dba45b5c19209feb9daa13f3142d50bc022b8
|
[
"MIT"
] |
permissive
|
daisychainme/daisychain
|
9ff498d051103e5ee5ada64fdc14db7359665551
|
245d0041f1efd2d6cc110f60aebf2e2dee98bcdb
|
refs/heads/master
| 2022-05-01T02:14:07.550629
| 2022-03-29T08:17:16
| 2022-03-29T08:17:16
| 69,340,137
| 5
| 0
|
MIT
| 2022-03-29T08:17:17
| 2016-09-27T09:22:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,283
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-08-17 09:06
# Generated by Django 1.9.6 on 2016-07-14 17:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DropboxAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('access_token', models.CharField(max_length=255, verbose_name='Access Token')),
('cursor', models.CharField(max_length=255, verbose_name='Cursor')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Dropbox Account',
'verbose_name_plural': 'Dropbox Accounts',
},
),
migrations.CreateModel(
name='DropboxUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dropbox_userid', models.CharField(max_length=255, verbose_name='DropBox User ID')),
('display_name', models.CharField(max_length=100, verbose_name='DropBox Display Name')),
('email', models.CharField(max_length=100, verbose_name='email')),
('profile_photo_url', models.CharField(max_length=255, null=True, verbose_name='email')),
('disk_used', models.DecimalField(decimal_places=4, max_digits=12, verbose_name='Used Disk Space')),
('disk_allocated', models.DecimalField(decimal_places=4, max_digits=12, verbose_name='Total Allocated Disk Usage')),
('dropbox_account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='channel_dropbox.DropboxAccount')),
],
options={
'verbose_name': 'Dropbox User',
'verbose_name_plural': 'Dropbox Users',
},
),
]
|
[
"lukas.spranger@fau.de"
] |
lukas.spranger@fau.de
|
b3c0285177073316dea91db140034537df72eb1f
|
b304a47e862eee982193f183d4dde270099ea651
|
/libs/module18.py
|
229820f02d1661ff17b3ef1387723b3000701c83
|
[] |
no_license
|
and3rson/osx-coverage-testcase
|
507c2cbd7927bcc519c14be48b6f28bedd7d0af0
|
245ef0cd0c19938cdcd43135985b03e87d40d6e2
|
refs/heads/master
| 2021-01-13T09:43:50.232051
| 2016-10-06T09:13:39
| 2016-10-06T09:13:39
| 70,139,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33
|
py
|
def method18():
return 'bar'
|
[
"anderson@Andrews-iMac.local"
] |
anderson@Andrews-iMac.local
|
a7eae49c44d40be0a18e0a75481551a44ddc4f23
|
ea05617b5d33a641bb60b735e936e8f0ba6e57a7
|
/unittests/test_tipwin.py
|
2994d0b2aa27724323853e4cc441959d723bd528
|
[] |
no_license
|
bbpatil/Phoenix
|
18716744f5a3f5dbd805520baf3edc14ebde9529
|
4d05434a6c9e9effb2ade8085e2bfa83775575ed
|
refs/heads/master
| 2022-02-23T21:40:34.510672
| 2016-06-12T05:26:06
| 2016-06-12T05:26:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
import unittest
import wtc
import wx
#---------------------------------------------------------------------------
class tipwin_Tests(wtc.WidgetTestCase):
def test_tipwinCtor(self):
w = wx.TipWindow(self.frame, "This is a tip message")
w.SetBoundingRect(self.frame.GetRect())
self.waitFor(100)
w.Show()
self.waitFor(100)
w.Close()
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
[
"robin@alldunn.com"
] |
robin@alldunn.com
|
a1acb9f510191d6a98922457994ca61fbc5fa67d
|
8f10319da47da9fb2bd2c0470ab3041fb09ae2c6
|
/python-for-Data-science/code.py
|
9b67a93ea2bdace74469ff404b276affd38f1ff3
|
[
"MIT"
] |
permissive
|
sunitha1999/ga-learner-dsb-repo
|
f0481499465b6422b07e19226b5a2bd2990bf072
|
80c9b23fd273b62dc54b4ef51dde3f54b987d02d
|
refs/heads/master
| 2022-05-24T07:04:54.842490
| 2020-04-27T17:53:02
| 2020-04-27T17:53:02
| 259,409,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
# --------------
# Code starts here
class_1=['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2=['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class=class_1+class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses={'Math':65,'English':70,'History':80,'French':70,'Science':60}
#total=sum(courses['Math']+courses[]+courses[Math]+courses[Math]+courses[Math])
total=sum(courses.values())
print(total)
percentage=total/500*100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics={'Geoffrey Hinton':78,
'Andrew Ng':95,
'Sebastian Raschka': 65,
'Yoshua Benjio': 50,
'Hilary Mason': 70,
'Corinna Cortes': 66,
'Peter Warden': 75}
topper=max(mathematics,key=mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
# Code starts here
first_name=topper.split()[0]
last_name=topper.split()[1]
full_name= last_name+ " "+first_name
certificate_name=full_name.upper()
print(certificate_name)
# Code ends here
|
[
"sunitha1999@users.noreply.github.com"
] |
sunitha1999@users.noreply.github.com
|
cc0d17e7de7e1585d6de1a16555659ac4a8a3d6e
|
25d111148f08bb1d0b1c2e652f095733dde125b2
|
/GIP/HistoriaClinica/forms.py
|
ff191a78c87b51d050d0083b2df8f37994b0b73a
|
[] |
no_license
|
Naukas1/GIP_Final
|
b5990254cf53375ed5d5a973e280efeac74f32ec
|
65e880270090ae11e5fd177a0598e87b9608f9e6
|
refs/heads/master
| 2021-01-12T04:17:41.098975
| 2017-03-19T23:21:43
| 2017-03-19T23:21:43
| 77,568,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
from django import forms
from .models import HistoriaClinica, HistoriaClinicaDetalle
import datetime
class HistoriaClinica_Form(forms.ModelForm):
class Meta:
model = HistoriaClinica
fields = [
"Nombre",
"Paciente",
"Profesional",
"Antecedentes",
"Alergias",
"Diagnostico",
]
class HistoriaClinicaDetalle_Form(forms.ModelForm):
class Meta:
model = HistoriaClinicaDetalle
fields = [
"Descripcion",
"Tratamiento",
]
|
[
"thindholwen@gmail.com"
] |
thindholwen@gmail.com
|
5554b55cb0d5330cc443a9f4009854af8fe84d55
|
4dfc45597fa45ba4fcfada73f6bc718dabc6d1bb
|
/python_ex262.py
|
fd3d092a6242d774da23489bba78b027fe49acd4
|
[] |
no_license
|
tschoi6712/HelloProgrammer
|
3b5cd3ac733d7e34d746cd0fe2958eb713de527c
|
f814982a76eacfd6d88208444777f767b9c06d3b
|
refs/heads/master
| 2020-07-14T10:04:49.203106
| 2019-09-05T00:23:27
| 2019-09-05T00:23:27
| 206,437,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
class Dog():
def __init__(self,
name,
breed,
owner):
self.name = name
self.breed = breed
self.owner = owner
class Person():
def __init__(self, name):
self.name = name
mick = Person("Mick Jagger")
stan = Dog("Stanley",
"Bulldog",
mick)
print(stan.owner.name)
|
[
"tschoi6712@gmail.com"
] |
tschoi6712@gmail.com
|
94b9068d13399c1b0ee57a43c85bc1a4c82ba618
|
fad702beb35d587278010e570a923bc84a4dda4a
|
/code/pyseg/scripts/picking/pre_seg_star.py
|
69ce678ef129b2047c285373ab7548b2b14ed085
|
[
"Apache-2.0"
] |
permissive
|
anmartinezs/pyseg_system
|
f7769ec3dcaf243895ec1cf13ac6e1da1ab2a92a
|
1370bfedae2ad5e6cdd1dc08395eb9e95b4a8596
|
refs/heads/master
| 2023-02-23T06:23:10.087737
| 2023-01-30T13:24:36
| 2023-01-30T13:24:36
| 227,147,753
| 15
| 4
|
NOASSERTION
| 2023-02-10T17:18:20
| 2019-12-10T14:58:22
|
C
|
UTF-8
|
Python
| false
| false
| 12,883
|
py
|
"""
Pre-processing for star_graph_batch.py of un-oriented membranes from TomoSegMemTV output
Input: - STAR file with 3 columns:
+ _rlnMicrographName: tomogram original
+ _rlnImageName: TomoSegMemTV density map output
+ _psSegImage: (optional) binary mask to focus the segmentation analysis
+ _mtMtubesCsv: (optional) a .csv file with microtubule center lines
- Setting for segmenting the membranes from TomoSegMemTV density map:
+ Density threshold
+ Size threshold
- Sub-volume splitting settings
Output: - A STAR file with 3 columns:
+ _rlnMicrographName: tomogram original
+ _rlnImageName: sub-volumes
+ _psSegImage: Un-oriented membrane segmentations for each subvolume
+ Columns for localizing the sub-volumes within each original tomogram
"""
################# Package import
import gc
import os
import sys
import math
import time
import pyseg as ps
import scipy as sp
import skimage as sk
import numpy as np
###### Global variables
__author__ = 'Antonio Martinez-Sanchez'
MB_LBL, MB_NEIGH = 1, 2
########################################################################################
# PARAMETERS
########################################################################################
ROOT_PATH = '/fs/pool/pool-lucic2/in_situ_mitoo'
# Input STAR file
in_star = ROOT_PATH + '/mbo_seg/mb_seg_mitoo.star'
# Output directory
out_dir = ROOT_PATH + '/mbo_seg/pre'
# Subvolume splitting settings
sp_split = (2, 2, 1)
sp_off_voxels = 5 # vox
# Membrane segmentation
sg_res = 1.368 # nm/voxel
sg_th = None # 8
sg_sz = None # 3e3
sg_mb_thick = 3 # nm
sg_mb_neigh = 15 # nm
# CSV file pre-processing
cv_coords_cools = (1, 2, 3)
cv_id_col = 4
# Microtubule settings
mt_rad = 30 # nm
mt_swap_xy = False
########################################################################################
# MAIN ROUTINE
########################################################################################
########## Print initial message
print('Pre-processing for SEG analysis of un-oriented membranes from TomoSegMemTV output.')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('Options:')
print('\tOutput directory: ' + str(out_dir))
print('\tInput STAR file: ' + str(in_star))
print('\tData resolution: ' + str(sg_res) + ' nm/vx')
if sg_th is not None:
print('\tSegmentation settings: ')
print('\t\t-Density threshold: ' + str(sg_th))
print('\t\t-Size threshold: ' + str(sg_sz) + ' vx')
print('\tSub-volume splitting settings: ')
print('\t\t-Number of splits (X, Y, Z): ' + str(sp_split))
print('\t\t-Offset voxels: ' + str(sp_off_voxels))
print('\tMicrotubule settings:')
print('\t\t-Microtube luminal radius: ' + str(mt_rad) + ' nm')
print('\tCSV pre-processing: ')
print('\t\t-Columns for samples coordinates (X, Y, Z): ' + str(cv_coords_cools))
print('\t\t-Column for microtubule ID: ' + str(cv_id_col))
print('')
######### Process
print('Parsing input parameters...')
sp_res, mt_rad, sp_off_voxels = float(sg_res), float(mt_rad), int(sp_off_voxels)
out_stem = os.path.splitext(os.path.split(in_star)[1])[0]
conn_mask = np.ones(shape=(3,3,3))
out_seg_dir = out_dir + '/segs'
if not os.path.isdir(out_seg_dir):
os.makedirs(out_seg_dir)
print('Loading input STAR file...')
gl_star = ps.sub.Star()
try:
gl_star.load(in_star)
except ps.pexceptions.PySegInputError as e:
print('ERROR: input STAR file could not be loaded because of "' + e.get_message() + '"')
print('Terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
star = ps.sub.Star()
star.add_column(key='_rlnMicrographName')
star.add_column(key='_rlnImageName')
star.add_column(key='_psSegImage')
star.add_column(key='_psSegRot')
star.add_column(key='_psSegTilt')
star.add_column(key='_psSegPsi')
star.add_column(key='_psSegOffX')
star.add_column(key='_psSegOffY')
star.add_column(key='_psSegOffZ')
print('Main Routine: tomograms loop')
tomo_id = 0
for row in range(gl_star.get_nrows()):
in_ref = gl_star.get_element('_rlnMicrographName', row)
print('\tProcessing tomogram: ' + in_ref)
out_ref_stem = os.path.splitext(os.path.split(in_ref)[1])[0]
in_mb = gl_star.get_element('_rlnImageName', row)
print('\t\t-Loading membrane segmentation: ' + in_mb)
tomo_mb = ps.disperse_io.load_tomo(in_mb)
tomo_ref = ps.disperse_io.load_tomo(in_ref, mmap=True)
off_mask_min_x, off_mask_max_x = 0, tomo_ref.shape[0]
off_mask_min_y, off_mask_max_y = 0, tomo_ref.shape[1]
off_mask_min_z, off_mask_max_z = 0, tomo_ref.shape[2]
if gl_star.has_column('_mtMtubesCsv'):
in_csv = gl_star.get_element('_mtMtubesCsv', row)
print('\tReading input CSV file: ' + in_csv)
mt_dic = ps.globals.read_csv_mts(in_csv, cv_coords_cools, cv_id_col, swap_xy=mt_swap_xy)
mts_points = list()
for mt_id, mt_samps in zip(iter(mt_dic.keys()), iter(mt_dic.values())):
mts_points += mt_samps
mts_points = np.asarray(mts_points, dtype=np.float32) * (1./sg_res)
print('\tSegmenting the microtubules...')
mt_mask = ps.globals.points_to_mask(mts_points, tomo_mb.shape, inv=True)
mt_mask = sp.ndimage.morphology.distance_transform_edt(mt_mask, sampling=sg_res, return_indices=False)
mt_mask = mt_mask > mt_rad
if sg_th is None:
tomo_mb = tomo_mb > 0
else:
tomo_mb = (tomo_mb >= sg_th).astype(dtype=int)
if gl_star.has_column('_mtMtubesCsv'):
tomo_mb *= mt_mask
del mt_mask
if gl_star.has_column('_psSegImage'):
print('\tApplying the mask...')
hold_mask = ps.disperse_io.load_tomo(gl_star.get_element('_psSegImage', row)) > 0
tomo_mb *= hold_mask
ids_mask = np.where(hold_mask)
off_mask_min_x, off_mask_max_x = ids_mask[0].min()-sp_off_voxels, ids_mask[0].max()+sp_off_voxels
if off_mask_min_x < 0:
off_mask_min_x = 0
if off_mask_max_x > hold_mask.shape[0]:
off_mask_max_x = hold_mask.shape[0]
off_mask_min_y, off_mask_max_y = ids_mask[1].min()-sp_off_voxels, ids_mask[1].max()+sp_off_voxels
if off_mask_min_y < 0:
off_mask_min_y = 0
if off_mask_max_y > hold_mask.shape[1]:
off_mask_max_y = hold_mask.shape[1]
off_mask_min_z, off_mask_max_z = ids_mask[2].min()-sp_off_voxels, ids_mask[2].max()+sp_off_voxels
if off_mask_min_z < 0:
off_mask_min_z = 0
if off_mask_max_z > hold_mask.shape[2]:
off_mask_max_z = hold_mask.shape[2]
del hold_mask
del ids_mask
# ps.disperse_io.save_numpy(tomo_mb, out_dir + '/hold.mrc')
if sg_th is not None:
print('\tMembrane thresholding...')
# tomo_mb, num_lbls = sp.ndimage.measurements.label(tomo_mb, structure=conn_mask)
tomo_mb, num_lbls = sk.measure.label(tomo_mb, connectivity=3, return_num=True)
tomo_sz = np.zeros(shape=tomo_mb.shape, dtype=np.int32)
for lbl in range(1, num_lbls + 1):
ids = tomo_mb == lbl
feat_sz = ids.sum()
if feat_sz >= sg_sz:
tomo_sz[ids] = feat_sz
tomo_mb = tomo_sz > 0
del tomo_sz
print('\tSegmenting the membranes...')
if sp_split is None:
svol_mb = tomo_mb[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z]
svol = tomo_ref[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z]
svol_dst = sp.ndimage.morphology.distance_transform_edt(np.invert(svol_mb), sampling=sg_res,
return_indices=False)
svol_seg = np.zeros(shape=svol.shape, dtype=np.int8)
svol_seg[svol_dst < sg_mb_neigh] = MB_NEIGH
svol_seg[svol_dst < sg_mb_thick] = MB_LBL
out_svol = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '_split_' + str(split_id) + '.mrc'
out_seg = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '_split_' + str(split_id) + '_seg.mrc'
ps.disperse_io.save_numpy(svol, out_svol)
ps.disperse_io.save_numpy(svol_seg, out_seg)
del svol_seg
del svol_dst
split_id += 1
row_dic = dict()
row_dic['_rlnMicrographName'] = in_ref
row_dic['_rlnImageName'] = out_svol
row_dic['_psSegImage'] = out_seg
row_dic['_psSegRot'] = 0
row_dic['_psSegTilt'] = 0
row_dic['_psSegPsi'] = 0
row_dic['_psSegOffX'] = 0
row_dic['_psSegOffY'] = 0
row_dic['_psSegOffZ'] = off_mask_min_z
star.add_row(**row_dic)
else:
print('\tSplitting into subvolumes:')
if sp_split[0] > 1:
offs_x = list()
pad_x = int(math.ceil((off_mask_max_x-off_mask_min_x) / sp_split[0]))
offs_x.append((off_mask_min_x, pad_x+sp_off_voxels))
lock = False
while not lock:
hold = offs_x[-1][1] - sp_off_voxels + pad_x
if hold >= off_mask_max_x:
offs_x.append((offs_x[-1][1] - sp_off_voxels, off_mask_max_x))
lock = True
else:
offs_x.append((offs_x[-1][1]-sp_off_voxels, offs_x[-1][1]+pad_x+sp_off_voxels))
else:
offs_x = [(off_mask_min_x, off_mask_max_x),]
if sp_split[1] > 1:
offs_y = list()
pad_y = int(math.ceil((off_mask_max_y-off_mask_min_y) / sp_split[1]))
offs_y.append((off_mask_min_x, pad_y + sp_off_voxels))
lock = False
while not lock:
hold = offs_y[-1][1] - sp_off_voxels + pad_y
if hold >= off_mask_max_y:
offs_y.append((offs_y[-1][1] - sp_off_voxels, off_mask_max_y))
lock = True
else:
offs_y.append((offs_y[-1][1] - sp_off_voxels, offs_y[-1][1] + pad_y + sp_off_voxels))
else:
offs_y = [(off_mask_min_x, off_mask_max_x),]
if sp_split[2] > 1:
offs_z = list()
pad_z = int(math.ceil((off_mask_max_z-off_mask_min_z) / sp_split[2]))
offs_z.append((off_mask_min_z, pad_z + sp_off_voxels))
lock = False
while not lock:
hold = offs_z[-1][1] - sp_off_voxels + pad_z
if hold >= off_mask_max_z:
offs_z.append((offs_z[-1][1] - sp_off_voxels, off_mask_max_z))
lock = True
else:
offs_z.append((offs_z[-1][1] - sp_off_voxels, offs_z[-1][1] + pad_z + sp_off_voxels))
else:
offs_z = [(off_mask_min_z, off_mask_max_z),]
split_id = 1
for off_x in offs_x:
for off_y in offs_y:
for off_z in offs_z:
print('\t\t-Splitting subvolume: [' + str(off_x) + ', ' + str(off_y) + ', ' + str(off_z) +']')
svol_mb = tomo_mb[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]]
svol = tomo_ref[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]]
svol_dst = sp.ndimage.morphology.distance_transform_edt(np.invert(svol_mb), sampling=sg_res, return_indices=False)
svol_seg = np.zeros(shape=svol.shape, dtype=np.int8)
svol_seg[svol_dst < sg_mb_neigh] = MB_NEIGH
svol_seg[svol_dst < sg_mb_thick] = MB_LBL
out_svol = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '.mrc'
out_seg = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '_mb.mrc'
ps.disperse_io.save_numpy(svol, out_svol)
ps.disperse_io.save_numpy(svol_seg, out_seg)
del svol_seg
del svol_dst
split_id += 1
row_dic = dict()
row_dic['_rlnMicrographName'] = in_ref
row_dic['_rlnImageName'] = out_svol
row_dic['_psSegImage'] = out_seg
row_dic['_psSegRot'] = 0
row_dic['_psSegTilt'] = 0
row_dic['_psSegPsi'] = 0
row_dic['_psSegOffX'] = off_x[0]
row_dic['_psSegOffY'] = off_y[0]
row_dic['_psSegOffZ'] = off_z[0]
star.add_row(**row_dic)
# Prepare next iteration
gc.collect()
tomo_id += 1
out_star = out_dir + '/' + out_stem + '_pre.star'
print('\tStoring output STAR file in: ' + out_star)
star.store(out_star)
print('Terminated. (' + time.strftime("%c") + ')')
|
[
"an.martinez.s.sw@gmail.com"
] |
an.martinez.s.sw@gmail.com
|
657f735a01c037231f569992af3283926a4de5b9
|
9738c28fc1a900d628a7f0a7e7486986142f9eac
|
/test/level.py
|
73567f0f533a362d72301dc4f67d09d6f90921ee
|
[] |
no_license
|
Hyp-ed/workshop
|
bd16bb8d0c46c808676e18d46ac3fbc4a471176e
|
70dcb1376bf41c324031bb303c901162920cd339
|
refs/heads/master
| 2021-01-11T09:10:01.730992
| 2017-02-22T04:30:14
| 2017-02-22T04:30:14
| 81,372,433
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,194
|
py
|
#!/usr/bin/python
import smbus
import math
import pygame
from OpenGL.GL import *
from OpenGL.GLU import *
from pygame.locals import *
from time import time
import mpu6050
SCREEN_SIZE = (1200, 900)
#SCALAR = .5
#SCALAR2 = 0.2
#AVERAGE = 0
GYRO_W = 0.0
ACCL_W = 1.0 - GYRO_W
def get_y_rotation(x, z):
radians = math.atan2(-x, z)
return math.degrees(radians)
def get_x_rotation(y, z):
radians = math.atan2(y, z)
return math.degrees(radians)
def get_offset():
xsum = 0
ysum = 0
zsum = 0
n = 1000.0
print 'Calibrating gyro...'
t = time()
for i in xrange(0, int(n)):
(x, y, z) = mpu6050.read_gyro_data()
xsum += x
ysum += y
zsum += z
t = time() - t
print 'Calibrated on {} readings in {} seconds'.format(int(n), t)
return (xsum/n, ysum/n, zsum/n)
def get_gyro_scaled():
(x, y, z) = mpu6050.read_gyro_data()
return ((x - X_OFF)/mpu6050.GYRO_SCALE, (y - Y_OFF)/mpu6050.GYRO_SCALE, (z - Z_OFF)/mpu6050.GYRO_SCALE)
def get_accl_scaled():
(x, y, z) = mpu6050.read_accl_data()
return (x/mpu6050.ACCL_SCALE, y/mpu6050.ACCL_SCALE, z/mpu6050.ACCL_SCALE)
def resizeScreen(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(width) / height, 0.001, 10.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(0.0, 1.0, -5.0,
0.0, 0.0, 0.0,
0.0, 1.0, 0.0)
def init():
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 0.0)
glShadeModel(GL_SMOOTH)
glEnable(GL_BLEND)
glEnable(GL_POLYGON_SMOOTH)
glHint(GL_POLYGON_SMOOTH_HINT, GL_NICEST)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glLightfv(GL_LIGHT0, GL_AMBIENT, (0.3, 0.3, 0.3, 1.0));
(X_OFF, Y_OFF, Z_OFF) = get_offset()
def run():
#global gyro_total_x, gyro_total_y, last_x, last_y
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE, HWSURFACE | OPENGL | DOUBLEBUF)
resizeScreen(*SCREEN_SIZE)
init()
#clock = pygame.time.Clock()
cube = Cube((0.0, 0.0, 0.0), (.5, .5, .7))
(x_angle, y_angle, z_angle) = (0, 0, 0)
t0 = time()
r0 = get_gyro_scaled()
while True:
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == KEYUP and event.key == K_ESCAPE:
return
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor((1.,1.,1.))
glLineWidth(1)
glBegin(GL_LINES)
for x in range(-20, 22, 2):
glVertex3f(x/10.,-1,-1)
glVertex3f(x/10.,-1,1)
for x in range(-20, 22, 2):
glVertex3f(x/10.,-1, 1)
glVertex3f(x/10., 1, 1)
for z in range(-10, 12, 2):
glVertex3f(-2, -1, z/10.)
glVertex3f( 2, -1, z/10.)
for z in range(-10, 12, 2):
glVertex3f(-2, -1, z/10.)
glVertex3f(-2, 1, z/10.)
for z in range(-10, 12, 2):
glVertex3f( 2, -1, z/10.)
glVertex3f( 2, 1, z/10.)
for y in range(-10, 12, 2):
glVertex3f(-2, y/10., 1)
glVertex3f( 2, y/10., 1)
for y in range(-10, 12, 2):
glVertex3f(-2, y/10., 1)
glVertex3f(-2, y/10., -1)
for y in range(-10, 12, 2):
glVertex3f(2, y/10., 1)
glVertex3f(2, y/10., -1)
#(angle, (x, y, z)) = get_rotation(accel_scaled_x, accel_scaled_y, accel_scaled_z)
(x, y, z) = mpu6050.read_accl_data()
accl_dx = get_x_rotation(y, z) - x_angle
accl_dy = get_y_rotation(x, z) - y_angle
t = time()
r = get_gyro_scaled()
dt = t - t0
gyro_dx = (r0[0] + r[0])/2 * dt
gyro_dy = (r0[1] + r[1])/2 * dt
gyro_dz = (r0[2] + r[2])/2 * dt
t0 = t
r0 = r
#print 'gx={0:10.6f}| ax={1:10.6f}| gy={2:10.6f}| ay={3:10.6f}'.format(gyro_x, accl_x, gyro_y, accl_y)
x_angle += ACCL_W*accl_dx + GYRO_W*gyro_dx
y_angle += ACCL_W*accl_dy + GYRO_W*gyro_dy
z_angle += gyro_dz
print '{:10.6f} {:10.6f} {:10.6f}'.format(x_angle, y_angle, z_angle)
glEnd()
glPushMatrix()
glRotate(x_angle, 1, 0, 0)
glRotate(y_angle, 0, 0, 1)
glRotate(z_angle, 0, 1, 0)
#glRotate(angle, x, z, y)
cube.render()
glPopMatrix()
pygame.display.flip()
class Cube(object):
def __init__(self, position, color):
self.position = position
self.color = color
# Cube information
num_faces = 6
vertices = [ (-1.0, -0.05, 0.5),
(1.0, -0.05, 0.5),
(1.0, 0.05, 0.5),
(-1.0, 0.05, 0.5),
(-1.0, -0.05, -0.5),
(1.0, -0.05, -0.5),
(1.0, 0.05, -0.5),
(-1.0, 0.05, -0.5) ]
normals = [ (0.0, 0.0, +1.0), # front
(0.0, 0.0, -1.0), # back
(+1.0, 0.0, 0.0), # right
(-1.0, 0.0, 0.0), # left
(0.0, +1.0, 0.0), # top
(0.0, -1.0, 0.0) ] # bottom
vertex_indices = [ (0, 1, 2, 3), # front
(4, 5, 6, 7), # back
(1, 5, 6, 2), # right
(0, 4, 7, 3), # left
(3, 2, 6, 7), # top
(0, 1, 5, 4) ] # bottom
def render(self):
#then = pygame.time.get_ticks()
glColor(self.color)
colors = [(1,0,0),self.color,(0,1,0),self.color,(0,0,1),self.color]
vertices = self.vertices
# Draw all 6 faces of the cube
glBegin(GL_QUADS)
for face_no in xrange(self.num_faces):
glNormal3dv(self.normals[face_no])
v1, v2, v3, v4 = self.vertex_indices[face_no]
glColor(colors[face_no])
glVertex(vertices[v1])
glVertex(vertices[v2])
glVertex(vertices[v3])
glVertex(vertices[v4])
glEnd()
if __name__ == "__main__":
run()
|
[
"s1556515@sms.ed.ac.uk"
] |
s1556515@sms.ed.ac.uk
|
0f30ca85f7153e54c25ea9d9cd17ebd8c3b941ee
|
2f395afe2278e2248dc9970f28948d8adbac1c77
|
/_builds/scons/tools/powershell.py
|
22105fc6a67d197b1173c2b870971332b501503d
|
[] |
no_license
|
eliezerq/CycleTime
|
f225190aa40e6a267fcf9b962de257664c51b3db
|
700b50b00dcd9b7b01999f4e9d74d2efad39795a
|
refs/heads/main
| 2023-07-13T16:15:07.879500
| 2021-08-25T02:09:24
| 2021-08-25T02:09:24
| 399,654,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 763
|
py
|
import os
from SCons.Script import *
from SCons.Util import is_Sequence, Selector
from SCons.Variables import BoolVariable
PowershellAction = Action('${PSHELL_CMD}', '${PSHELL_STR}')
PowershellBuilder = Builder(
action = PowershellAction
, multi = 1
, source_suffix = '.ps1'
)
def generate(env):
env["POWERSHELL"] = env.WhereIs('POWERSHELL.EXE')
env["PSHELL_OPTS"] = '-NoLogo -NonInteractive -NoProfile'
env["PSHELL_CMD"] = '${POWERSHELL} ${PSHELL_OPTS} -command "& { ${SOURCE.path} ${ARGS} >${OUTPUT.path} }"'
env["PSHELL_STR"] = '= Running powershell script ${SOURCE.name} ='
env.InstallBuilder('PowerShell', PowershellBuilder)
def exists(env):
return env.WhereIs('POWERSHELL.EXE')
|
[
"noreply@github.com"
] |
eliezerq.noreply@github.com
|
7799593dc1300616402385bffeef1d506f97a0aa
|
2be519c35d3bc1e6b7ef4384e0d7467c7172313b
|
/ROI/ROI_evaluate.py
|
20cf1ab8e9f42ea74f7c6e94cfc4b89687f71779
|
[] |
no_license
|
cescigl/IPython2
|
65b173575179cbc6625f537feb4287ed89817e3b
|
46dcf6a351991cc3bfe3f1cb2a440afe39a0be4b
|
refs/heads/master
| 2020-06-18T04:29:00.727065
| 2017-07-17T10:38:02
| 2017-07-17T10:38:02
| 74,948,571
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,148
|
py
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import datetime
import MySQLdb
import time
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.metrics import mean_absolute_error, median_absolute_error
class baseModel(object):
"""docstring for baseModel
基类初始化数据库链接
"""
def __init__(self):
super(baseModel, self).__init__()
self.db = None
self.host = "test-dataverse-web.c0poh9vgjxya.rds.cn-north-1.amazonaws.com.cn"
self.username = "datauser"
self.passwd = "ebOUeWSin3hAAKKD"
self.database = "dataverse"
def conn(self):
"""
mysql连接
"""
try:
if self.db == None:
self.db = MySQLdb.connect(self.host,self.username,self.passwd,self.database)
return self.db
except Exception, e:
print "open db error"
def close(self):
"""
关闭数据库连接
"""
if self.db is None:
return True
try:
self.db.close()
except Exception, e:
print "close db error"
class Evaluate(object):
"""docstring for Evaluate"""
def __init__(self):
super(Evaluate, self).__init__()
self.today = datetime.date.today().strftime("%Y%m%d")
self.date_3day_ago = (datetime.date.today() - datetime.timedelta(days=3)).strftime("%Y%m%d")
self.date_7day_ago = (datetime.date.today() - datetime.timedelta(days=7)).strftime("%Y%m%d")
self.makeTimeList(self.date_7day_ago, self.date_3day_ago)
self.db = baseModel().conn()
self.cursor = self.db.cursor()
self.f=open('log/evaluate.log','a')
self.sql_retain = "select date,retain,retain_predict from ROI_Retained_Predict_GroupBy_c where date>=" + self.date_7day_ago + " and date<=" + self.date_3day_ago + " and pdtid in (600001,600004,600007,600008,600018,600020,600022,600025,600027,600029,600030) and retain>10 and retain_predict>0;"
self.sql_spec_retain = "select date,spec_retain,spec_retain_predict from ROI_Retained_Predict_GroupBy_c where date>=" +self.date_7day_ago + " and date<=" + self.date_3day_ago + " and pdtid in (600001,600004,600007,600008,600018,600020,600022,600025,600027,600029,600030) and spec_retain>10 and spec_retain_predict>0;"
self.sql_imp_num = "select revenue_date,imp_num,imp_num_predict from ROI_News_Predict_GroupBy_c where revenue_date>=" + self.date_7day_ago + " and revenue_date<=" + self.date_3day_ago + " and pdtid in (600001,600004,600007,600008,600018,600020,600022,600025,600027,600029,600030) and imp_num>10 and imp_num_predict>0;"
self.sql_click_num = "select revenue_date,click_num,click_num_predict from ROI_News_Predict_GroupBy_c where revenue_date>=" + self.date_7day_ago + " and revenue_date<=" + self.date_3day_ago + " and pdtid in (600001,600004,600007,600008,600018,600020,600022,600025,600027,600029,600030) and click_num>10 and click_num_predict>0;"
self.sql_imp_revenue = "select revenue_date,imp_revenue,imp_revenue_predict from ROI_News_Predict_GroupBy_c where revenue_date>=" + self.date_7day_ago + " and revenue_date<=" + self.date_3day_ago + " and pdtid in (600001,600004,600007,600008,600018,600020,600022,600025,600027,600029,600030) and imp_revenue>10 and imp_revenue_predict>0;"
self.sql_click_revenue = "select revenue_date,click_revenue,click_revenue_predict from ROI_News_Predict_GroupBy_c where revenue_date>=" + self.date_7day_ago + " and revenue_date<=" + self.date_3day_ago + " and pdtid in (600001,600004,600007,600008,600018,600020,600022,600025,600027,600029,600030) and click_revenue>10 and click_revenue_predict>0;"
self.sql_purchase = "select revenue_day,purchase,purchase_predict from ROI_Purchase_Predict_GroupBy_c where revenue_day>=" + self.date_7day_ago + " and revenue_day<=" + self.date_3day_ago + " and pdtid in (600001,600004,600007,600008,600018,600020,600022,600025,600027,600029,600030) and purchase>10 and purchase_predict>0;"
def makeTimeList(self, begin, end):
self.timeList = [begin]
plus = 1
while(True):
date = datetime.datetime.strptime(str(begin),"%Y%m%d") + datetime.timedelta(days=plus)
string = date.strftime("%Y%m%d")
plus = plus + 1
self.timeList.append(string)
if string == end:
break
print self.timeList
def getData(self, sql):
self.cursor.execute(sql)
data = self.cursor.fetchall()
data = np.array(data)
return data
def saveFile(self, l):
result = ''
for i in range(len(l)-1):
result = result + str(l[i]) + '|'
result = result + str(l[len(l)-1]) + '\n'
self.f.write(result)
def evaluate(self, sql, key):
data = self.getData(sql)
for times in self.timeList:
loc = np.where(data[:,0]==times)
tmp = data[loc]
#print tmp
y_true = tmp[:,1].astype('float32')
y_pred = tmp[:,2].astype('float32')
r2 = r2_score(y_true, y_pred)
mean_se = mean_squared_error(y_true, y_pred)
mean_ae = mean_absolute_error(y_true, y_pred)
median_ae = median_absolute_error(y_true, y_pred)
print key,times,r2,mean_ae,median_ae
l=[key,times,r2,mean_ae,median_ae]
self.saveFile(l)
def close(self):
self.f.close()
self.cursor.close()
def main(self):
self.evaluate(self.sql_retain, 'Retain')
self.evaluate(self.sql_spec_retain, 'Spec_Retain')
self.evaluate(self.sql_imp_num, 'imp_num')
self.evaluate(self.sql_click_num, 'click_num')
self.evaluate(self.sql_imp_revenue, 'imp_revenue')
self.evaluate(self.sql_click_revenue, 'click_revenue')
self.evaluate(self.sql_purchase, 'purchase')
self.close()
if __name__ == '__main__':
eva = Evaluate()
eva.main()
|
[
"cescigl@163.com"
] |
cescigl@163.com
|
f889f4a1cff3bed27ae632a801fbd3016fcc7bec
|
3d70d9205a85cd3ffb01cce0a62442e1a8cb5a49
|
/venv/Scripts/pip-script.py
|
3c8025f10a9c4e4ec0e1db4a4cb75e6614689e7f
|
[] |
no_license
|
nulijiushimeili/third_library_train
|
ff5789aaf725d22e883190d7b60200594441712e
|
37803431a6442e268b9b88a34c386406d8bbbf14
|
refs/heads/master
| 2020-03-18T05:57:48.356403
| 2018-06-23T17:06:46
| 2018-06-23T17:06:46
| 134,369,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
#!D:\mycode1\program\third_library_train\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip')()
)
|
[
"dewei123@foxmail.com"
] |
dewei123@foxmail.com
|
6d3ffe24bc943feff5ee89b39da221468771c2de
|
f1ff691de9554d9b770837f5326055ee2b8487c3
|
/ex31/ex31.py
|
50b665eaf748b86bb56b732fe28762acee874c6a
|
[] |
no_license
|
jypark711/jypark
|
eb6da4621602b2a137d1c6e357af77701302c9a5
|
933a4bcc643eb283bda55993d6c1cb876e18b893
|
refs/heads/master
| 2021-05-02T14:24:57.892788
| 2017-04-10T01:14:43
| 2017-04-10T01:14:43
| 54,092,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
# -*- coding: utf-8 -*- #한글패치
# -*- coding: cp949 -*-
if "raw_input" not in dir(__builtins__):
raw_input = input
#builtins?
print("You enter a dark room with two doors. Do you go through door #1 or door #2?")
door = raw_input(">")
if door =="1":
print("There's a giant bear here eating a cheese cake. What do you do?")
print"1. Take the cake."
print"2. Scream at the bear"
bear = raw_input(">")
if bear == "1":
print("The bear eats you face off. Good job ")
elif bear == "2":
print("The bear eats you legs off. Good job")
else:
print("Well, doing %s is probably better. Bear runs away." % bear)
elif door == "2":
print("You share into the endless abyss at cthulhu`s reina.")
print("1. Blueberries.")
print("2. Yeollow jaket clothespins.")
print("3. Understanding revolvers yelling melodies.")
insanity = raw_input(">")
if insanity == "1" or insanity == "2":
print("Your body survives powered by a mind of jello. Good job!")
else:
print("The insanity rots your eyes into a pool of munk. Good job! ")
else:
print("You stumble around and fall on a knife and die. Good job!")
|
[
"CAD Client"
] |
CAD Client
|
ea3f529765b0d8e3cf6944c2691dc186da2a6ee5
|
4b62c01a15feac5f643a1613b01bfdbb96c088b8
|
/app/timelapser/management/commands/take_picture.py
|
1a61b30726fc77f8be7e7a9d58438c24f5937e59
|
[] |
no_license
|
NanoDano/Pi-Timelapser
|
9ee7ca31b856ec6c33af3d7bf2d0956d7a2e1232
|
623a2bffd575ad304f117e105cfb52d2b0116444
|
refs/heads/master
| 2022-11-26T17:14:13.749560
| 2020-08-11T05:00:31
| 2020-08-11T05:00:31
| 270,156,121
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
import logging
from os import system, makedirs
from os.path import join
from django.core.management import BaseCommand
from django.utils.datetime_safe import datetime
from timelapser.models import Photo
from app.settings import MEDIA_ROOT
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Take photo with Pi camera'
def handle(self, *args, **options):
logger.info(self.style.SUCCESS('Taking photo with Pi Camera'))
trigger_time = datetime.now() # Most important time is what time it was triggered
day = trigger_time.strftime("%Y-%m-%d")
full_image_dir = join(MEDIA_ROOT, day) # YYYY-MM-DD
makedirs(full_image_dir, exist_ok=True) # Ensure daily folder exists
image_filename = f'image-{trigger_time.strftime("%Y-%m-%d-%H-%M-%S")}.jpg' # YYYY-MM-DD-HH-MM-SS
output_filename = join(
full_image_dir,
image_filename,
)
logger.info(self.style.SUCCESS(f'Taking picture to {output_filename}'))
# Take the photo
code = system(f'raspistill -o "{output_filename}" --annotate 12 --quality 100')
if code != 0:
logger.info(self.style.ERROR(f'Error taking picture with raspistill.'))
raise Exception('Failure during `raspistill`. Does it exist?')
# Store the photo reference in database
Photo.objects.create(time_taken=trigger_time, image_file=join(day, image_filename)).save()
logger.info(self.style.SUCCESS(f'Took photo {output_filename}'))
|
[
"nanodano@devdungeon.com"
] |
nanodano@devdungeon.com
|
a6a6cbd5ad983c4867ac5037ae760a57a2cd18aa
|
2f03a08645c91722a28c27577121e3d69d77e8d2
|
/egpo_utils/human_in_the_loop_env.py
|
35fb78a8a8a0eaed1efa91b034c46c0284a32bb6
|
[
"MIT"
] |
permissive
|
MetaVai/EGPO
|
6eada2b0f3daaa68fac427d3b0c70a398d5205eb
|
dfb4fab891a6e5ceb3d7028a190a1a74f8b75eda
|
refs/heads/main
| 2023-08-31T19:12:58.482165
| 2021-10-23T08:08:50
| 2021-10-23T08:08:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,200
|
py
|
from metadrive.envs.safe_metadrive_env import SafeMetaDriveEnv
from metadrive.policy.manual_control_policy import TakeoverPolicy
from metadrive.engine.core.onscreen_message import ScreenMessage
ScreenMessage.SCALE = 0.1
class HumanInTheLoopEnv(SafeMetaDriveEnv):
"""
This Env depends on the new version of MetaDrive
"""
def default_config(self):
config = super(HumanInTheLoopEnv, self).default_config()
config.update(
{
"environment_num": 1,
"start_seed": 10,
"map": "Cr",
"cost_to_reward": True,
"manual_control": True,
"controller": "joystick",
"agent_policy": TakeoverPolicy
},
allow_add_new_key=True
)
return config
def reset(self, *args, **kwargs):
self.t_o = False
self.total_takeover_cost = 0
return super(HumanInTheLoopEnv, self).reset(*args, **kwargs)
def _get_step_return(self, actions, step_infos):
o, r, d, step_infos = super(HumanInTheLoopEnv, self)._get_step_return(actions, step_infos)
controller = self.engine.get_policy(self.vehicle.id)
last_t_o = self.t_o
self.t_o = controller.takeover if hasattr(controller, "takeover") else False
step_infos["takeover"] = self.t_o
if step_infos["takeover"] and not last_t_o:
self.total_takeover_cost += 1
step_infos["takeover_cost"] = 1 if step_infos["takeover"] else 0
step_infos["total_takeover_cost"] = self.total_takeover_cost
step_infos["native_cost"] = step_infos["cost"]
step_infos["total_native_cost"] = self.episode_cost
return o, r, d, step_infos
def step(self, actions):
ret = super(HumanInTheLoopEnv, self).step(actions)
if self.config["use_render"]:
super(HumanInTheLoopEnv, self).render(text={
"Total Cost": self.episode_cost,
"Total Takeover Cost": self.total_takeover_cost,
"Takeover": self.t_o
})
return ret
if __name__ == "__main__":
env = HumanInTheLoopEnv(
{
"manual_control": False,
"use_render": True,
}
)
o = env.reset()
total_cost = 0
for i in range(1, 100000):
o, r, d, info = env.step(env.action_space.sample())
total_cost += info["cost"]
# env.render(
# text={
# "cost": total_cost,
# "seed": env.current_seed,
# "reward": r,
# "total_cost": info["total_cost"],
# "total_takeover_cost": info["total_takeover_cost"],
# "takeover": info["takeover"]
# }
# )
if info["crash_vehicle"]:
print("crash_vehicle:cost {}, reward {}".format(info["cost"], r))
if info["crash_object"]:
print("crash_object:cost {}, reward {}".format(info["cost"], r))
if d:
total_cost = 0
print("done_cost:{}".format(info["cost"]), "done_reward;{}".format(r))
print("Reset")
env.reset()
env.close()
|
[
"noreply@github.com"
] |
MetaVai.noreply@github.com
|
b7499060c0a79c00f38dceeb7d49f74d65caeb92
|
46ede75d991a5199bd0a9aa3e459a3f7b803e326
|
/src/hadoop/users/run_job.py
|
777f8590eac7e1d15e44625e615db7d1ebabf385
|
[] |
no_license
|
PiotrPilip/ec2018
|
001e7e9e5ac72dc1ddfab8ec78c5021872a866be
|
8ca00b5e333acac4966fc6415ecbea74704fe274
|
refs/heads/master
| 2020-03-09T02:01:16.311929
| 2018-04-07T13:43:12
| 2018-04-07T13:43:12
| 128,530,285
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
import subprocess
from configparser import ConfigParser
import sys
cfg = ConfigParser()
cfg.read('../../../main.conf')
data_path='/home/piotr/hackathon/ec-challenge-2018/data/users/'
number=sys.argv[1]
command = [
cfg['HADOOP']['hadoop_path'],
'jar',
cfg['HADOOP']['streamer_path'],
'-mapper', 'mapper.py',
'-reducer', 'reducer.py',
'-input', data_path+'/input/*',
'-output', data_path+'/output'+str(number)
]
print(command)
subprocess.run(command)
|
[
"piotr.pilip@gmail.com"
] |
piotr.pilip@gmail.com
|
2c847ac862a19504844ac990e08a08d707f2148c
|
a27adc4fd97e6d0ebbbb6e308d38789fee02a135
|
/model_lstm_selfattention.py
|
ebeee4992d52e5afb5d3116667983456ce1d9406
|
[] |
no_license
|
NUST-Machine-Intelligence-Laboratory/GASA
|
057d498dd99a81e5d10e8668d328325eda8df2e7
|
0e66894e63f980b60916803b3a25096bcb5cfa1f
|
refs/heads/main
| 2023-07-30T15:36:07.377226
| 2021-09-25T16:17:14
| 2021-09-25T16:17:14
| 410,225,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,629
|
py
|
import torch
from torch import nn
import torch.nn.functional as F
import argparse
import torch
import os
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.optim import lr_scheduler
import torch.backends.cudnn as cudnn
class LSTMClassifier(nn.Module):
def __init__(self, vocab_size=50000, emb_dim=100, emb_vectors=None,
emb_dropout=0.3,
lstm_dim=2048, lstm_n_layer=2, lstm_dropout=0.3,
bidirectional=True, lstm_combine='add',
n_linear=2, linear_dropout=0.5, n_classes=200,
crit=nn.CrossEntropyLoss()):
super().__init__()
vocab_size, emb_dim = emb_vectors.shape
n_dirs = bidirectional + 1
lstm_dir_dim = lstm_dim // n_dirs if lstm_combine == 'concat' else lstm_dim
self.lstm_n_layer = lstm_n_layer
self.n_dirs = n_dirs
self.lstm_dir_dim = lstm_dir_dim
self.lstm_dim = lstm_dim
self.lstm_combine = lstm_combine
self.embedding_layer = nn.Embedding(*emb_vectors.shape)
self.embedding_layer.from_pretrained(emb_vectors, padding_idx=1)
self.embedding_dropout = nn.Dropout(p=emb_dropout)
self.lstm = nn.LSTM(emb_dim, lstm_dir_dim,
num_layers=lstm_n_layer,
bidirectional=bidirectional,
batch_first=True)
if lstm_n_layer > 1: self.lstm.dropout = lstm_dropout
self.lstm_dropout = nn.Dropout(p=lstm_dropout)
self.att_w = nn.Parameter(torch.randn(1, lstm_dim, 1))
self.linear_layers = [nn.Linear(lstm_dim, lstm_dim) for _ in
range(n_linear - 1)]
self.linear_layers = nn.ModuleList(self.linear_layers)
self.linear_dropout = nn.Dropout(p=linear_dropout)
self.label = nn.Linear(lstm_dim, n_classes)
self.crit = crit
self.mylinear = nn.Sequential(
nn.Linear(2048, 128),
nn.ReLU(True),
nn.Linear(128, 1)
)
self.simplelinear = nn.Linear(128, 1)
self.opts = {
'vocab_size': vocab_size,
'emb_dim': emb_dim,
'emb_dropout': emb_dropout,
'emb_vectors': emb_vectors,
'lstm_dim': lstm_dim,
'lstm_n_layer': lstm_n_layer,
'lstm_dropout': lstm_dropout,
'lstm_combine': lstm_combine,
'n_linear': n_linear,
'linear_dropout': linear_dropout,
'n_classes': n_classes,
'crit': crit,
}
def attention_net(self, lstm_output, final_state):
"""
Now we will incorporate Attention mechanism in our LSTM model. In this new model, we will use attention to compute soft alignment score corresponding
between each of the hidden_state and the last hidden_state of the LSTM. We will be using torch.bmm for the batch matrix multiplication.
Arguments
---------
lstm_output : Final output of the LSTM which contains hidden layer outputs for each sequence.
final_state : Final time-step hidden state (h_n) of the LSTM
---------
Returns : It performs attention mechanism by first computing weights for each of the sequence present in lstm_output and and then finally computing the
new hidden state.
Tensor Size :
hidden.size() = (batch_size, hidden_size)
attn_weights.size() = (batch_size, num_seq)
soft_attn_weights.size() = (batch_size, num_seq)
new_hidden_state.size() = (batch_size, hidden_size)
"""
attn_weights = self.mylinear(lstm_output.view(-1, self.lstm_dim))
attn_weights = F.softmax(attn_weights.view(lstm_output.size(0), -1), dim=1).unsqueeze(2)
finall_output = torch.bmm(lstm_output.transpose(1, 2),attn_weights).squeeze(2)
return finall_output
def forward_self_attention(self, input):
batch_size,seq_len= input.shape
inp = self.embedding_layer(input)
inp = self.embedding_dropout(inp)
lstm_output, (final_h, final_c) = self.lstm(inp)
final_h = final_h.permute(1, 0, 2)
final_h = final_h.contiguous().view(batch_size, -1)
lstm_output = lstm_output.view(batch_size, seq_len, 2, self.lstm_dir_dim)
lstm_output = lstm_output.sum(dim=2)
attn_output = self.attention_net(lstm_output, final_h)
output = self.linear_dropout(attn_output)
for layer in self.linear_layers:
output = layer(output)
output = self.linear_dropout(output)
output = F.relu(output)
logits = self.label(output)
return logits
def loss(self, input,target):
logits = self.forward_self_attention(input)
logits_flat = logits.view(-1, logits.size(-1))
target_flat = target.view(-1)
loss = self.crit(logits_flat, target_flat)
return loss, logits_flat
def predict(self, input):
logits = self.forward_self_attention(input)
return logits
def loss_n_acc(self, input, target):
logits = self.forward_self_attention(input)
logits_flat = logits.view(-1, logits.size(-1))
target_flat = target.view(-1)
loss = self.crit(logits_flat, target_flat)
pred_flat = logits_flat.max(dim=-1)[1]
acc = (pred_flat == target_flat).sum()
return loss, acc.item()
|
[
"noreply@github.com"
] |
NUST-Machine-Intelligence-Laboratory.noreply@github.com
|
6dbdb9df23c1dcfcf17c78c471872da97f3068e0
|
a66a0152c7b37e7aa74ab7fff38704fb45fe3b9c
|
/util/test.py
|
4c5b59c49b7e39e42f64a95084693c909b9602ce
|
[] |
no_license
|
guoyy2017/python_util
|
9c7a1351a0fd20fe187ac39972832abffbce3bad
|
ec90baacd7ca79420a598b701ae960d9337772b8
|
refs/heads/master
| 2021-06-25T23:47:10.056534
| 2020-10-09T01:01:45
| 2020-10-09T01:01:45
| 129,709,653
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/13 上午9:59
# @Author : maidou
# @Site :
# @File : test.py
# @Software: PyCharm
# import random
# from tenacity import retry,stop_after_attempt,stop_after_delay,wait_random
#
#
# @retry(stop=stop_after_delay(2), wait=wait_random(2))
# def do_something_unreliable():
# if random.randint(0, 10) > 1:
# print 'error'
# raise IOError("Broken sauce, everything is hosed!!!111one")
# else:
# return "Awesome sauce!"
#
# print(do_something_unreliable())
import requests,time,hashlib
appKey = "TESTAPP"
custId = "20"
modeId = "20"
mobile = "13451909511"
ts = int(time.time())
appSecret = "123456"
h = hashlib.md5()
h.update("%s%s%s%s" % (modeId, ts, appKey, appSecret))
sign = h.hexdigest()
url = "http://172.16.1.14:8888/testVsms?appKey=%s&custId=%s&modeId=%s&mobile=%s&ts=%s&sign=%s" % (appKey, custId, modeId, mobile, ts, sign);
print url
resp = requests.get(url=url)
print resp.text
|
[
"zenglu@trioly.com"
] |
zenglu@trioly.com
|
21282bcc3899ca7438693cb8385d7f12b1b3344e
|
359eda321e365e824598da338bbbd09aeb2d43ff
|
/app/routes.py
|
3f98d5a0ad3e76a00ebf25fe40842e9f92d423cb
|
[] |
no_license
|
SamritiSharma2902/microblog
|
83d9f52669c90872148dbde45883287f56ad2467
|
cdfc477ec14f90c51828d6fc82f9096f0d492e6c
|
refs/heads/master
| 2020-06-11T16:21:59.325021
| 2019-06-27T04:26:55
| 2019-06-27T04:26:55
| 194,021,932
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from app import app
@app.route('/')
@app.route('/index')
def index():
return'''
<html>
<head><title>Home</title></head>
<body>hello everyone</body>
</html>'''
|
[
"noreply@github.com"
] |
SamritiSharma2902.noreply@github.com
|
7b8ffa80ccfccb5ef54996687b994643f54378e1
|
6967706d09732e0d7127a3947ef9c0e0054c876f
|
/Webscraper/__init__.py
|
f9d8143f6a908e95aacc4d470b46460e7029a7b7
|
[] |
no_license
|
IoT-master/SuryaTracker
|
3ba6b707a25ef5a3d49580637ad00988967fdc12
|
7f365bcc545ee20f32316c2402f4e26859378467
|
refs/heads/master
| 2023-07-19T11:43:43.039750
| 2021-09-18T01:38:39
| 2021-09-18T01:38:39
| 405,240,828
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,620
|
py
|
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options as FirefoxOptions
# TODO: Buildout FirefoxProfile
# from selenium.webdriver import FirefoxProfile
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException, ElementNotInteractableException
from pathlib import Path
import os
from abc import ABC
from PIL import Image
class UnrecognizedOSError(NotImplementedError):
pass
class ElementNotFound(Exception):
pass
class SimpleLocator:
def __init__(self, locator):
self.locator = locator
def __call__(self, browser):
try:
return browser.find_element(*self.locator).is_displayed()
except NoSuchElementException:
return False
class Clickable:
def __init__(self, locator):
self.locator = locator
def __call__(self, browser):
try:
return browser.find_element(*self.locator).is_enabled()
except StaleElementReferenceException:
return False
except ElementNotInteractableException:
return False
class SeleniumAddons(ABC):
def __init__(self) -> None:
super().__init__()
self.browser = WebDriver()
def get_tag(self, element_object):
return element_object.tag_name
def get_attribute(self, element_object, attribute):
return element_object.get_attribute(attribute)
def remove_elem(self, element_object):
self.browser.execute_script("arguments[0].remove()", element_object)
def highlight_element(self, element_object, border='1', border_color='red', bg_color='yellow'):
s = f"background: {bg_color}; border: {border}px solid {border_color};"
driver = element_object._parent
driver.execute_script(
"arguments[0].setAttribute('style', arguments[1]);", element_object, s)
def drag_and_drop(self, source_element_object, destination_element_object):
ActionChains(self.browser).drag_and_drop(
source_element_object, destination_element_object).perform()
def scroll_and_moves_mouse_to(self, element_object):
ActionChains(self.browser).move_to_element(element_object).perform()
def scroll_to(self, lines_down, lines_right=0):
self.browser.execute_script(
f"window.scrollTo({lines_right}, {lines_down})")
def scroll_to_element_location(self, element_object):
coord = element_object.location
self.browser.execute_script(
"window.scrollTo(arguments[0], arguments[1]);", coord['x'], coord['y'])
def scroll_into_view(self, element_object):
self.browser.execute_script(
"arguments[0].scrollIntoView();", element_object)
def remove_element_from_dom(self, element_object):
self.browser.execute_script("var element = arguments[0]; element.parentNode.removeChild(element);", element_object)
def screenshot_and_crop(self, element_object, filename="cropped_image.png"):
location = element_object.location
size = element_object.size
x = location['x']
y = location['y']
w = x + size['width']
h = y + size['height']
self.browser.save_screenshot(filename)
full_image = Image.open(filename)
cropped_image = full_image.crop((x, y, w, h))
cropped_image.save(filename)
def is_present(self, element_object):
try:
if element_object.is_displayed():
return True
except:
return False
def update_browser(self):
for handle in self.browser.window_handles:
self.browser.switch_to.window(handle)
def wait_for_possible_element(self, partial_dom, locator, wait_time=10):
wait = WebDriverWait(partial_dom, wait_time)
wait.until(SimpleLocator(locator))
def wait_until_css_element_object_found(self, partial_dom, css_param, wait_time=10):
wait = WebDriverWait(partial_dom, wait_time)
wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, css_param)))
def wait_until_css_elements_object_found(self, partial_dom, css_param_list, wait_time=10):
wait = WebDriverWait(partial_dom, wait_time)
for css_param in css_param_list:
wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, css_param)))
def wait_until_css_element_object_is_clickable(self, partial_dom, css_param, wait_time=10):
wait = WebDriverWait(partial_dom, wait_time)
wait.until(Clickable((By.CSS_SELECTOR, css_param)))
def wait_until_name_element_object_found(self, partial_dom, name_param, wait_time=10):
wait = WebDriverWait(partial_dom, wait_time)
wait.until(EC.visibility_of_element_located((By.NAME, name_param)))
def wait_until_partial_link_text_element_object_found(self, partial_dom, partial_link_text, wait_time=10):
wait = WebDriverWait(partial_dom, wait_time)
wait.until(EC.visibility_of_element_located(
(By.PARTIAL_LINK_TEXT, partial_link_text)))
def wait_until_class_name_element_object_found(self, partial_dom, class_name, wait_time=10):
wait = WebDriverWait(partial_dom, wait_time)
wait.until(EC.visibility_of_element_located(
(By.CLASS_NAME, class_name)))
def wait_until_id_element_object_found(self, partial_dom, id_object, wait_time=10):
wait = WebDriverWait(partial_dom, wait_time)
wait.until(EC.visibility_of_element_located((By.ID, id_object)))
def wait_until_partial_link_text_object_found(self, partial_dom, id_object, wait_time=10):
wait = WebDriverWait(partial_dom, wait_time)
wait.until(EC.visibility_of_element_located(
(By.PARTIAL_LINK_TEXT, id_object)))
def multi_select_in_list(self, element_objects, labels):
for option in element_objects:
if option.text in labels:
option.click()
def select_in_list(self, element_objects, labels):
for option in element_objects:
if option.text in labels:
option.click()
break
def __enter__(self):
return self
def __exit__(self, *args):
print('Closing browser instance')
self.browser.quit()
class CustomChrome(SeleniumAddons):
def __init__(self, incognito=True, path_to_chrome=None, headless=False, disable_gpu=False, window_size=False, disable_extensions=True) -> None:
options = ChromeOptions()
# https://stackoverflow.com/questions/64927909/failed-to-read-descriptor-from-node-connection-a-device-attached-to-the-system
options.add_experimental_option('excludeSwitches', ['enable-logging'])
if disable_extensions:
options.add_argument("disable-extensions")
if incognito:
options.add_argument("incognito")
if headless:
options.add_argument("headless")
if disable_gpu:
options.add_argument("disable-gpu")
if window_size:
options.add_argument('window-size=1200x1200')
# options.add_argument("remote-debugging-port=9222")
# options.add_argument("kiosk")
if path_to_chrome is None:
if os.name == 'nt':
# path_to_chrome = str(Path('./chromedriver.exe').relative_to('.'))
path_to_chrome = str(
Path('./ChromeDrivers/Windows/chromedriver.exe').absolute())
elif os.name == 'darwin':
path_to_chrome = str(
Path('./ChromeDrivers/Mac/chromedriver').absolute())
elif os.name == 'posix':
path_to_chrome = str(
Path('./ChromeDrivers/Linux/chromedriver').absolute())
else:
raise UnrecognizedOSError(
'Unable to recogized Operating System')
self.browser = Chrome(path_to_chrome, options=options)
class CustomBrave(SeleniumAddons):
def __init__(self, incognito=True, headless=False, disable_gpu=False) -> None:
options = ChromeOptions()
options.add_argument("disable-extensions")
if incognito:
options.add_argument("incognito")
if headless:
options.add_argument("headless")
if disable_gpu:
options.add_argument("disable-gpu")
if os.name == 'nt':
path_to_chrome = str(
Path('./ChromeDrivers/Windows/chromedriver.exe').absolute())
options.binary_location = str(
Path('/Program Files/BraveSoftware/Brave-Browser/Application/brave.exe'))
elif os.name == 'darwin':
path_to_chrome = str(
Path('./ChromeDrivers/Mac/chromedriver').absolute())
elif os.name == 'posix':
options.binary_location = "/Applications/Brave Browser.app/Contents/MacOS/Brave Browser"
path_to_chrome = str(
Path('./ChromeDrivers/Linux/chromedriver').absolute())
else:
raise UnrecognizedOSError('Unable to recogized Operating System')
self.browser = Chrome(path_to_chrome, options=options)
class CustomFirefox(SeleniumAddons):
def __init__(self, geckodriver_path=None, incognito=True, headless=False, service_log_path=None) -> None:
options = FirefoxOptions()
if incognito:
options.add_argument("--incognito")
if headless:
options.add_argument("--headless")
if os.name == 'nt':
if geckodriver_path is None:
geckodriver_path = str(
Path('./FirefoxDrivers/Windows/geckodriver.exe').absolute())
if service_log_path is None:
service_log_path = str(
Path('./FirefoxDrivers/Windows/gecko.log').absolute())
elif os.name == 'posix':
# TODO: Test out this case
raise UnrecognizedOSError(
'Selenium for Firefox not yet impliemented')
else:
raise UnrecognizedOSError('Unable to recogized Operating System')
self.browser = Firefox(executable_path=geckodriver_path,
options=options, service_log_path=service_log_path)
|
[
"IoT-master@users.noreply.github.com"
] |
IoT-master@users.noreply.github.com
|
27d022b940a95dfd27b4d8c3e0624874b64f09df
|
d619cc9b1b845a38dd7b8aa2a7e25c8e88e5e689
|
/servicex_for_trexfitter/__init__.py
|
61ac62a86a8e267aaaa79b3b0e3be95b807d909e
|
[
"BSD-3-Clause"
] |
permissive
|
kyungeonchoi/ServiceXforTRExFitter
|
bc7f9cbeb9b53e1ce9b69902874c9d3631efa1c9
|
9963e0d3232a284d5abaecc974d4645dbb6c48e4
|
refs/heads/master
| 2021-12-15T22:22:09.166158
| 2021-12-13T07:32:42
| 2021-12-13T07:32:42
| 239,620,027
| 3
| 0
|
BSD-3-Clause
| 2021-12-13T07:32:43
| 2020-02-10T21:45:49
|
Python
|
UTF-8
|
Python
| false
| false
| 137
|
py
|
#!/usr/bin/env python
from .servicex_for_trexfitter import ServiceXTRExFitter
__all__ = ['ServiceXTRExFitter', ]
__version__ = '1.1.0'
|
[
"kyungeonchoi@utexas.edu"
] |
kyungeonchoi@utexas.edu
|
8bf19dfd6e13414fdef0ececea1ea99b8cc8585e
|
cc196cf8223211760d18bbbb5ebf100a3357acf0
|
/blog/models.py
|
da036d969d9d1027cdb9af1739ba1b3eb38b18cc
|
[] |
no_license
|
yangsoonkyu/portfolio1
|
7c232929451f592a162f7aa1c0301315e851ccd2
|
a7816e91604dac2a0e267f2366121e0f8ecfe94a
|
refs/heads/master
| 2020-03-26T05:37:53.262179
| 2018-11-28T06:29:44
| 2018-11-28T06:29:44
| 144,566,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
from django.db import models
# Create your models here.
from django.urls import reverse
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default= timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:post_detail', args=[str(self.id)])
class Meta:
ordering = ['-id']
|
[
"t_j4331@naver.com"
] |
t_j4331@naver.com
|
9e776300b56dbcbf2a88c2990eded7d76a85d56a
|
8d9b194b8a973e4d66fa74fb700e8027e18fd147
|
/__init__.py
|
3726c980b93ee4bb737e5f996c8d4f761bd0df16
|
[] |
no_license
|
horton2009/neural_ANN_python
|
e20c2ac3179cd604702466c6132268963693f40d
|
67cc3a380b7fd469cba3d0765f8947f5e9c8e3aa
|
refs/heads/master
| 2016-09-06T02:38:10.502561
| 2014-03-14T01:50:04
| 2014-03-14T01:50:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
"""PyLibNeural import."""
from neural import *
|
[
"horton.hu.5@gmail.com"
] |
horton.hu.5@gmail.com
|
62c4e091b12511373a8830d65994e2532da4fae5
|
c7e2c9b8222ed2d76d0a689dd958691096b7cbdd
|
/tube4droid/__init__.py
|
2b4f26ab47fde493281b89861593f99e9b94f99d
|
[
"Apache-2.0"
] |
permissive
|
digsim/tube4droid
|
b3682a6cd1cc32ca2d7323e3f95053d25d971116
|
294ea717a7ef0a639780afbc4b73c6eb17d31d91
|
refs/heads/master
| 2021-01-17T06:02:54.409667
| 2017-01-12T19:21:59
| 2017-01-12T19:21:59
| 50,057,447
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
import pkg_resources
__version__ = pkg_resources.get_distribution("tube4droid").version
|
[
"andreas.ruppen@gmail.com"
] |
andreas.ruppen@gmail.com
|
1172bb2301104443cb4977f752f66a816a1d7c1a
|
97ca3a885a4eff415043d65c9330bea70e2477d9
|
/unique/level/tools/carve.py
|
409d13f149cd3d6c8287385ec1fe4aa92262bb90
|
[] |
no_license
|
nyeogmi/nyeoglike
|
d5b22e814549aaed3c9c3f7eacda5ddfcb37ff15
|
eebd8c0efc761a635943f12e49312298cdf33835
|
refs/heads/main
| 2023-02-16T16:43:43.643462
| 2021-01-03T00:15:11
| 2021-01-03T00:15:11
| 316,885,156
| 1
| 0
| null | 2020-12-04T03:04:45
| 2020-11-29T05:46:26
|
Python
|
UTF-8
|
Python
| false
| false
| 18,415
|
py
|
from typing import Set
from .carve_op import *
from .interior_designer import InteriorDesigner
from .recs import *
# TODO: Support random rotation/mirroring of this
# TODO: Veto if a room becomes too small
class Carve(object):
def __init__(self, grid: Grid):
self._grid = grid
self._rooms = FastGensym()
self._room_tiles: OneToMany[RoomHandle, V2] = OneToMany()
self._room_types: Dict[RoomHandle, RoomType] = {}
self._room_frozen = set()
self._links: List[Link] = []
self._hints: Dict[Hint, Set[V2]] = {}
self._operation_log = []
def _add_hint(self, v2: V2, hint: Hint):
self._hints[hint] = self._hints.get(hint, set())
self._hints[hint].add(v2)
def to_interior(self) -> InteriorDesigner:
return InteriorDesigner(self._room_tiles, self._room_types, self._hints)
def permute_at_random(self):
_room_tiles_2: OneToMany[RoomHandle, V2] = OneToMany()
_hints_2: Dict[Hint, Set[V2]] = {}
# TODO: Is one rotation more likely than another w/ this?
mul_x = random.choice([-1, 1])
mul_y = random.choice([-1, 1])
swap = random.choice([True, False])
for rh, v2 in self._room_tiles.all():
v2 = V2.new(mul_x * v2.x, mul_y * v2.y)
if swap:
v2 = V2.new(v2.y, v2.x)
_room_tiles_2.add(rh, v2)
for hint, set_ in self._hints.items():
set2 = set()
for v2 in set_:
v2 = V2.new(mul_x * v2.x, mul_y * v2.y)
if swap:
v2 = V2.new(v2.y, v2.x)
set2.add(v2)
_hints_2[hint] = set2
self._room_tiles = _room_tiles_2
self._hints = _hints_2
@contextmanager
def veto_point(self):
pt = len(self._operation_log)
box = VetoBox()
try:
yield box
box._vetoed = False
except Veto as v:
while len(self._operation_log) > pt:
self._undo_op(self._operation_log.pop())
box._vetoed = True
def veto(self):
raise Veto
def _create_room(self, room_type: RoomType) -> RoomHandle:
return self._do_log(CreateRoom(room_type))
def _carve_point(self, v2: V2, new_owner: Optional[RoomHandle]):
old_owner = self._room_tiles.get_a(v2)
self._do_log(CarveTile(position=v2, old_owner=old_owner, new_owner=new_owner))
def freeze(self, rh: RoomHandle):
if rh in self._room_frozen:
return
self._do_log(FreezeRoom(rh))
def link_rooms(self, link_type: LinkType, rh0: RoomHandle, rh1: RoomHandle):
self._do_log(LinkRooms(link_type, rh0, rh1))
def _do_log(self, operation: CarveOp):
self._operation_log.append(operation)
return self._do_op(operation)
def _do_op(self, operation: CarveOp):
if isinstance(operation, CreateRoom):
rh = RoomHandle(self._rooms.gen())
self._room_types[rh] = operation.room_type
return rh
elif isinstance(operation, CarveTile):
if operation.new_owner is None:
self._room_tiles.remove_b(operation.position)
else:
self._room_tiles.add(operation.new_owner, operation.position)
elif isinstance(operation, FreezeRoom):
self._room_frozen.add(operation.room)
elif isinstance(operation, LinkRooms):
self._links.append(
Link(operation.link_type, operation.room0, operation.room1)
)
else:
raise AssertionError("what is {}?".format(operation))
def _undo_op(self, operation: CarveOp):
if isinstance(operation, CreateRoom):
rh = self._rooms.ungen()
del self._room_types[RoomHandle(rh)]
elif isinstance(operation, CarveTile):
if operation.old_owner is None:
self._room_tiles.remove_b(operation.position)
else:
self._room_tiles.add(operation.old_owner, operation.position)
elif isinstance(operation, FreezeRoom):
self._room_frozen.discard(operation.room)
elif isinstance(operation, LinkRooms):
self._links.pop()
else:
raise AssertionError("what is {}?".format(operation))
def carve(
self, r: R2, room_type: RoomType, ignore: List[RoomHandle] = None
) -> RoomHandle:
ignore = ignore or []
assert isinstance(ignore, List)
assert isinstance(r, R2)
h = self._create_room(room_type)
affected_rooms = {}
for v in r.expand(V2.new(1, 1)):
existing_room = self._room_tiles.get_a(v)
if existing_room in ignore:
continue
if existing_room in self._room_frozen:
self.veto()
affected_rooms[existing_room] = len(list(self._room_tiles.get_bs(v)))
if v in r:
self._carve_point(v, h)
else:
self._carve_point(v, None)
for r, previous_area in affected_rooms.items():
new_area = len(list(self._room_tiles.get_bs(r)))
if self._ruined(r, previous_area, new_area):
self.veto()
return h
def _ruined(self, r, previous_area, new_area):
if r in self._room_frozen:
return True
if new_area < 0.5 * previous_area:
return True
if (previous_area > 6) and (new_area < 6):
return True
# TODO: Check for no longer contiguous?
# TODO: Check for wonky shape
return False
def expand_densely(self, r: RoomHandle):
claimed = lambda tile: self._room_tiles.get_a(tile) is not None
claimed_not_me = lambda tile: self._room_tiles.get_a(tile) not in [None, r]
claimed_me = lambda tile: self._room_tiles.get_a(tile) == r
change_made = True
while change_made:
to_add = set()
# TODO: Make sure all the neighbors of the tile we wanna get are either unclaimed or claimed by the current room,
# so we don't edge into a closet or something. I'm p. sure bugs related to this are happening.
for d in [V2.new(-1, 0), V2.new(0, -1), V2.new(1, 0), V2.new(0, 1)]:
for t in self._room_tiles.get_bs(r):
t1 = claimed(t + d)
if t1:
continue
for t2 in (t + d).neighbors():
if claimed_not_me(t2):
continue
if claimed_me(t + d + d):
to_add.add(t + d)
continue
t2 = claimed(t + d + d)
if t2:
continue
t3 = claimed(t + d + d + d)
t4 = claimed(t + d + d + d + d)
t5 = claimed(t + d + d + d + d + d)
if (t5 and not any([t4, t3])) or (t4 and not any([t3])) or t3:
to_add.add(t + d)
change_made = len(to_add) > 0
for t in to_add:
self._carve_point(t, r)
def erode(self, r: RoomHandle, iterations):
claimed = lambda tile: self._room_tiles.get_a(tile) is not None
directions1 = [V2.new(-1, 0), V2.new(0, -1), V2.new(1, 0), V2.new(0, 1)]
directions2 = directions1[1:] + directions1[:1]
for i in range(iterations):
to_remove = set()
for d1, d2 in zip(directions1, directions2):
for t in self._room_tiles.get_bs(r):
tu1 = claimed(t + d1)
tu2 = claimed(t + d1 + d1)
tl1 = claimed(t + d2)
tl2 = claimed(t + d2 + d2)
if not (tu1 or tu2 or tl1 or tl2):
to_remove.add(t)
for t in to_remove:
self._carve_point(t, None)
def erode_1tile_wonk(self, r: RoomHandle): # removes one-tile bacon strips
claimed = lambda tile: self._room_tiles.get_a(tile) is not None
to_remove = set()
directions = [V2.new(-1, 0), V2.new(0, -1)]
for d in directions:
for t in self._room_tiles.get_bs(r):
tl = claimed(t + d)
tr = claimed(t - d)
if not (tl or tr):
to_remove.add(t)
for t in to_remove:
self._carve_point(t, None)
def ident_rooms(self, room_type: RoomType) -> List[RoomHandle]:
found = []
for room in self._room_tiles.all_as():
if self._room_types[room] != room_type:
continue
found.append(room)
return found
def build_links(self):
claimed = lambda owner, tile: self._room_tiles.get_a(tile) == owner
not_claimed = lambda tile: claimed(None, tile)
links_hallway = self._create_room(RoomType.Hallway)
for link in self._links:
if link.link_type == LinkType.Ignore:
continue
room0 = link.room0
room1 = link.room1
dw = lambda label, direction: [
(label, t + direction)
for t in self._room_tiles.get_bs(room0)
if not_claimed(t + direction) and claimed(room1, t + direction * 2)
]
door_worthy = set(
dw("horiz", V2.new(0, 1))
+ dw("horiz", V2.new(0, -1))
+ dw("vert", V2.new(1, 0))
+ dw("vert", V2.new(-1, 0))
)
door_worthy_vecs = set(i[1] for i in door_worthy)
if len(door_worthy) == 0:
self.veto()
# Find the door segment
if link.link_type in [LinkType.Counter, LinkType.Door]:
if any(
self._room_types[r] in [RoomType.EntryZone, RoomType.Closet]
for r in [room0, room1]
):
# must be centered
# all of the xs will be the same or all the ys, so it doesn't matter
sorted_spots = sorted(door_worthy, key=lambda i: (i[1].x, i[1].y))
door = sorted_spots[len(sorted_spots) // 2][1]
else:
scores = [self._door_score(*v) for v in door_worthy]
max_score = max(scores)
spots_best = [
site
for site, score in zip(door_worthy, scores)
if score == max_score
]
door = random.choice(list(spots_best))[1]
if link.link_type == LinkType.Door:
self._carve_point(door, links_hallway)
elif link.link_type == LinkType.Counter:
self._carve_point(door, room0)
for v in door_worthy_vecs:
if v == door:
continue
self._carve_point(v, room0)
self._add_hint(v, Hint.Counter)
for n in v.ortho_neighbors():
if n in door_worthy_vecs:
continue
self._add_hint(n, Hint.Counterside)
elif link.link_type == LinkType.Complete:
for i in door_worthy:
# TODO: Go through neighbors and look for leftover "pillars" to get rid of.
# See the screenshot I sent to Bhijn as an example
self._carve_point(i[1], room0) # add to room0
else:
raise AssertionError(
"unrecognized link type: {}".format(link.link_type)
)
def _door_score(self, label: str, v2: V2):
if label == "horiz" and v2.x % self._grid.x == self._grid.cx:
return 1
if label == "vert" and v2.y % self._grid.y == self._grid.cy:
return 1
return 0
# == snake support
def snake(self, room: RoomHandle, direction: "Cardinal") -> "Snake":
from .snake import Snake
return Snake(self, room, direction)
def tunnel_east(
self,
room_handle: RoomHandle,
size: V2,
room_type: RoomType,
min_contact=None,
use_ignore=False,
rule: Rule = Rule.RNG,
) -> RoomHandle:
assert isinstance(room_handle, RoomHandle)
tiles = list(self._room_tiles.get_bs(room_handle))
if len(tiles) == 0:
self.veto()
max_x = max(t.x for t in tiles)
rhs_tiles = [t for t in tiles if t.x == max_x]
if min_contact is None:
min_contact = size.y if use_ignore else 1
return self._tunnel(
room_handle,
size,
room_type,
min_contact,
use_ignore,
rhs_tiles,
V2.new(1, 0),
rule,
)
def tunnel_south(
self,
room_handle: RoomHandle,
size: V2,
room_type: RoomType,
min_contact=None,
use_ignore=False,
rule: Rule = Rule.RNG,
) -> RoomHandle:
assert isinstance(room_handle, RoomHandle)
tiles = list(self._room_tiles.get_bs(room_handle))
if len(tiles) == 0:
self.veto()
max_y = max(t.y for t in tiles)
bot_tiles = [t for t in tiles if t.y == max_y]
if min_contact is None:
min_contact = size.x if use_ignore else 1
return self._tunnel(
room_handle,
size,
room_type,
min_contact,
use_ignore,
bot_tiles,
V2.new(0, 1),
rule,
)
def tunnel_west(
self,
room_handle: RoomHandle,
size: V2,
room_type: RoomType,
min_contact=None,
use_ignore=False,
rule: Rule = Rule.RNG,
) -> RoomHandle:
assert isinstance(room_handle, RoomHandle)
tiles = list(self._room_tiles.get_bs(room_handle))
if len(tiles) == 0:
self.veto()
min_x = min(t.x for t in tiles)
lhs_tiles = [t - V2.new(size.x - 1, 0) for t in tiles if t.x == min_x]
if min_contact is None:
min_contact = size.y if use_ignore else 1
return self._tunnel(
room_handle,
size,
room_type,
min_contact,
use_ignore,
lhs_tiles,
V2.new(-1, 0),
rule,
)
def tunnel_north(
self,
room_handle: RoomHandle,
size: V2,
room_type: RoomType,
min_contact=None,
use_ignore=False,
rule: Rule = Rule.RNG,
) -> RoomHandle:
assert isinstance(room_handle, RoomHandle)
tiles = list(self._room_tiles.get_bs(room_handle))
if len(tiles) == 0:
self.veto()
min_y = min(t.y for t in tiles)
lhs_tiles = [t - V2.new(0, size.y - 1) for t in tiles if t.y == min_y]
if min_contact is None:
min_contact = size.x if use_ignore else 1
return self._tunnel(
room_handle,
size,
room_type,
min_contact,
use_ignore,
lhs_tiles,
V2.new(0, -1),
rule,
)
def _tunnel(
self,
room_handle: RoomHandle,
size: V2,
room_type: RoomType,
min_contact: int,
use_ignore: bool,
tiles: List[V2],
direction: V2,
rule: Rule,
):
sites = []
for t in tiles:
if use_ignore:
sites.append((t + direction).sized(size))
else:
sites.append((t + direction * 2).sized(size))
# TODO: Only look at a representative set of tiles from the site
sites = [
site
for site in sites
if len(
[
t
for t in site
if self._has_contact(
room_handle, site, t, 1 if use_ignore else 2, -direction
)
]
)
>= min_contact
]
if len(sites) == 0:
self.veto()
return self.carve(
self._choose(sites, rule),
room_type,
ignore=[room_handle] if use_ignore else [],
)
def _choose(self, sites: List[R2], rule: Rule):
if rule == Rule.RNG:
return random.choice(sites)
if rule == Rule.Dense:
scores = [
len([t for t in site if self._has_contact(None, site, t, 2)])
for site in sites
]
max_score = max(scores)
sites_with_max = [
site for site, score in zip(sites, scores) if score == max_score
]
return random.choice(sites_with_max)
raise AssertionError("unknown rule: %s" % rule)
def _has_contact(
self,
room_handle: Optional[RoomHandle],
site: R2,
tile: V2,
distance: int,
direction: Optional[V2] = None,
) -> bool:
assert direction is None or isinstance(direction, V2)
for dir in (
[direction]
if direction is not None
else [
V2.new(1, 0),
V2.new(0, 1),
V2.new(-1, 0),
V2.new(0, -1),
]
):
t = tile
if room_handle is None:
# contact w/ any room
if self._room_tiles.get_a(t + dir * distance) is not None:
continue
else:
# contact period
if self._room_tiles.get_a(t + dir * distance) != room_handle:
continue
for i in range(distance - 1):
t = t + dir
if t in site or self._room_tiles.get_a(t) is not None:
break
else:
# ends in the right place and all the tiles on the way are empty
return True
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .snake import Snake
|
[
"59642025+nyeogmi@users.noreply.github.com"
] |
59642025+nyeogmi@users.noreply.github.com
|
1c89798fbe88f42b6a46f8f656a09907da3bef01
|
d4097c14850bda6ee05ddc7ff3a45ecbe3112b0d
|
/5.py
|
d90fcd642d0d2092670eac2d9ba104606306003e
|
[] |
no_license
|
om-100/assignment-
|
410b82e83747db04d19cbddf55c62579ff92f44c
|
d35b8bad1b4e4b823d982f78ff5c3a2d415391a7
|
refs/heads/master
| 2022-11-12T11:19:53.292393
| 2020-06-27T03:04:35
| 2020-06-27T03:04:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
# Question nummber 5
def adding(str):
length = len(str)
if length > 2:
if str[3:] == 'ing':
str = str + 'ly'
else:
str = str + 'ing'
return str
print(adding('ab'))
print(adding('abc'))
print(adding('string'))
|
[
"chelsea.om4@gmail.com"
] |
chelsea.om4@gmail.com
|
e0abd7a599b221a5c0edf5d93cd6b4d2fc66c7f5
|
b0a6159969977a024697da5c21cad1906f7ea092
|
/dynabic-python-platform/api/samples/TestApi3.py
|
841f300fd52e3847594dee5dd509dc85a57a39d4
|
[] |
no_license
|
dynabic/dynabic-python
|
244bd2305afe0ac5582aaf77064635a0481015b6
|
0b5e71525b1f95751e23b44f637ee91724d44681
|
refs/heads/master
| 2020-04-22T10:11:15.026362
| 2012-07-09T11:01:22
| 2012-07-09T11:01:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 853
|
py
|
import sys
import os
import hmac
from api.CustomersAPI import CustomersAPI
from api.APIClient3 import APIClient3
from model.CustomerRequest import CustomerRequest
def getCustomer(customerId):
response = CustomersAPI(apiClient).GetCustomer(customerId)
print(apiClient.serialize(response))
def addCustomer():
postData = CustomerRequest()
postData.first_name = "John"
postData.last_name = "Doe"
postData.email = "hopefullysome@nonexisting.email"
response = CustomersAPI(apiClient).AddCustomer("test", postData)
print(apiClient.serialize(response))
if __name__ == '__main__':
privateKey = "19c7a0d97d2d4413aba5"
clientKey = "19c7a0d97d2d4413aba5";
apiServer = "http://stage-api.dynabic.com/billing";
apiClient = APIClient3(privateKey, clientKey, apiServer)
getCustomer("14")
# addCustomer()
|
[
"github@dynabic.com"
] |
github@dynabic.com
|
fd7743331d63bab2a08005fdb42eb57b53948913
|
3d1765aa3a853faa91c8365a6d5536267c389132
|
/tonmoyplanet/tonmoy/migrations/0017_auto_20210627_0829.py
|
876766421caa0d94b2a401bc3f6f9970314b427d
|
[] |
no_license
|
waliul21/django-portfolio
|
55fa35aa00d8a4577c00d7abd5c060e77e58ec84
|
413aeadaa17bbefe267415baefd22f38287752cc
|
refs/heads/main
| 2023-06-09T14:53:14.159295
| 2021-06-27T11:23:18
| 2021-06-27T11:23:18
| 380,666,144
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,846
|
py
|
# Generated by Django 3.2.4 on 2021-06-27 02:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tonmoy', '0016_education'),
]
operations = [
migrations.RenameField(
model_name='contact',
old_name='email',
new_name='address',
),
migrations.RemoveField(
model_name='contact',
name='present_location',
),
migrations.AddField(
model_name='contact',
name='author_name',
field=models.CharField(blank=True, max_length=50),
),
migrations.AddField(
model_name='contact',
name='facebook',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='contact',
name='github',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='contact',
name='gmail',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='contact',
name='instra',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='contact',
name='linkdin',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='contact',
name='updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='contact',
name='youtube',
field=models.URLField(blank=True),
),
migrations.AlterField(
model_name='contact',
name='phone',
field=models.IntegerField(blank=True),
),
]
|
[
"waliul2621@gmail.com"
] |
waliul2621@gmail.com
|
b702b71dd27dffac2adfc140bfd108ccf26e42a6
|
e6b45b6cc01a921c3cc510f1a5fff3074dd6b2dd
|
/myExamples/Test/totest_save2mat.py
|
ef085d7e275bf3f6dbb316e39afdb629ed64f72a
|
[] |
no_license
|
yoczhang/FEALPyExamples
|
3bd339bd5f4576630f767a758da9590a1c068410
|
44d9acbecb528374bc67bba50c62711384228d39
|
refs/heads/master
| 2023-07-24T21:35:50.633572
| 2023-07-05T02:28:13
| 2023-07-05T02:28:13
| 208,667,003
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!/anaconda3/envs/FEALPy/bin python3.8
# -*- coding: utf-8 -*-
# ---
# @Software: PyCharm
# @File: totest_save2mat.py
# @Author: Yongchao Zhang
# @Institution: Northwest University, Xi'an, Shaanxi, China
# @E-mail: yoczhang@126.com, yoczhang@nwu.edu.cn
# @Site:
# @Time: Sep 06, 2022
# ---
from scipy.io import loadmat, savemat
import numpy as np
surf_x = np.arange(24).reshape(4, 6)
surf_y = surf_x * 0.1
surf_u = surf_x + surf_y
line_x = np.arange(12)
line_u = line_x * 0.2
savemat('LS.mat', {'line': (line_x, line_u), 'surf': [surf_x, surf_y, surf_u]})
# savemat('LS.mat', {'loss': [0.003, 0.342222, 0.005559]}, appendmat=True)
print('end of the file')
|
[
"yoczhang@126.com"
] |
yoczhang@126.com
|
b26a690893b73109ce8f579c4d67447cbb9ff060
|
29efa6309209df49cf06618b52b60fa76fc7ee96
|
/blog/forms.py
|
5e626e3ab63631499599db78f315dfabf1dbef64
|
[] |
no_license
|
priyankab001/portforlio-blog
|
516505b56d616ad20c204cda3fdd4b2d5a1280ab
|
62048b652eab4819f632a3a314bc9a2fc7a79cb9
|
refs/heads/master
| 2022-02-18T09:14:37.366631
| 2019-09-16T15:30:13
| 2019-09-16T15:30:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
from django import forms
class CommentForm(forms.Form):
author = forms.CharField(max_length=60,
widget=forms.TextInput(attrs={
"class":"form-control",
"placeholder":"Your Name"
}
))
body = forms.CharField(widget = forms.Textarea(
attrs={
"class":"form-control",
"placeholder":"Leave a comment!"
}
))
|
[
"msdaram@Anjans-MacBook-Pro.local"
] |
msdaram@Anjans-MacBook-Pro.local
|
582bb49f10f1dc35c944db64716dfcf8cc606416
|
2863d6fa5a9b1daba18345a5f87bbe0d89e1a39d
|
/render_functions.py
|
4bdc9213726c2d198d5f43be320b958ce370158d
|
[
"MIT"
] |
permissive
|
TheNicGard/DungeonStar
|
3f76ddfc7da44554d887d23f7d52f80cde3ec6f1
|
525aeb53217166d2ce83e4e91a3b8c1b102f0dcb
|
refs/heads/master
| 2020-06-18T01:21:58.829883
| 2020-02-23T10:04:15
| 2020-02-23T10:04:15
| 196,119,789
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,196
|
py
|
import tcod as libtcod
from enum import Enum
from game_states import GameStates
from math import sqrt
from menus import inventory_menu, level_up_menu, character_screen, help_screen, format_weight, confirmation_menu
from plot_gen import get_name
from rpg_mechanics import display_ability
import textwrap
class RenderOrder(Enum):
TRAP = 1
CORPSE = 2
STAIRS = 3
DOOR = 4
SIGN = 5
GOLD = 6
ITEM = 7
ACTOR = 8
def get_names_under_mouse(mouse, entities, fov_map):
(x, y) = (mouse.cx, mouse.cy)
names = [entity.get_name for entity in entities
if entity.x == x and entity.y == y and fov_map.fov[y][x]]
names = ', '.join(names)
return names.capitalize()
def render_bar(panel, x, y, total_width, value, maximum, bar_color, back_color):
bar_width = int(float(value) / maximum * total_width)
libtcod.console_set_default_background(panel, back_color)
libtcod.console_rect(panel, x, y, total_width, 1, False, libtcod.BKGND_SCREEN)
libtcod.console_set_default_background(panel, bar_color)
if bar_width > 0:
libtcod.console_rect(panel, x, y, bar_width, 1, False, libtcod.BKGND_SCREEN)
libtcod.console_set_default_foreground(panel, libtcod.white)
for tmp_x in range(total_width):
libtcod.console_put_char(panel, x + tmp_x, y, ' ', libtcod.BKGND_NONE)
libtcod.console_print_ex(panel, int(x + total_width / 2), y, libtcod.BKGND_NONE, libtcod.CENTER,
'{0}/{1}'.format(value, maximum))
libtcod.console_rect(panel, x, y, total_width, 1, False, libtcod.BKGND_NONE)
def get_health_color(hp_ratio):
if hp_ratio <= 0.5:
r_value = 255
else:
r_value = max(0, int((-511 * hp_ratio) + 511))
if hp_ratio >= 0.5:
g_value = 255
else:
g_value = max(0, int(511 * hp_ratio))
return [r_value, g_value, 0]
def render_status_panel(panel, x, y, width, height, player, game_state, entities, game_map, fov_map, turn, color_accessibility, cursor):
for tmp_x in range(width):
for tmp_y in range(height):
libtcod.console_put_char(panel, x + tmp_x, y + tmp_y, ' ', libtcod.BKGND_NONE)
libtcod.console_set_default_background(panel, libtcod.darkest_grey)
libtcod.console_rect(panel, x, y, width, height, False, libtcod.BKGND_SET)
libtcod.console_set_default_foreground(panel, libtcod.white)
libtcod.console_print_ex(panel, int(x + width / 2), y + 1, libtcod.BKGND_NONE, libtcod.CENTER, player.name)
libtcod.console_print_ex(panel, x + 1, y + 3, libtcod.BKGND_NONE, libtcod.LEFT, "HP")
render_bar(panel, x + 4, y + 3, width - 5, player.fighter.hp, player.fighter.max_hp,
libtcod.light_red, libtcod.darker_red)
libtcod.console_set_default_foreground(panel, libtcod.white)
libtcod.console_print_ex(panel, x + 1, height - 2, libtcod.BKGND_NONE, libtcod.LEFT,
'Dungeon level {0}'.format(game_map.dungeon_level))
libtcod.console_print_ex(panel, x + 1, height - 3, libtcod.BKGND_NONE, libtcod.LEFT,
'Turn {0}'.format(turn))
if player.hunger.status is not None:
libtcod.console_print_ex(panel, x + 1, height - 4, libtcod.BKGND_NONE, libtcod.LEFT,
'{0}'.format(player.hunger.status))
if game_state == GameStates.LOOK_AT:
libtcod.console_print_ex(panel, x + 1, height - 5, libtcod.BKGND_NONE, libtcod.LEFT,
'({0}, {1})'.format(cursor.x, cursor.y))
see_invisible = player.fighter.is_effect("see_invisible")
entities_in_fov = entity_in_fov_list(entities, game_map, fov_map, see_invisible)
entities_in_fov.sort(key = lambda e: sqrt((player.x - e.x) ** 2 + (player.y - e.y) ** 2))
index = 0
for e in entities_in_fov:
# Entity char
libtcod.console_set_default_foreground(panel, e.get_color)
libtcod.console_put_char(panel, x + 1, y + 6 + index, e.get_char, libtcod.BKGND_NONE)
# Entity health
health_ratio = e.fighter.hp / e.fighter.max_hp
if not color_accessibility:
libtcod.console_set_default_foreground(panel, get_health_color(health_ratio))
libtcod.console_put_char(panel, x + 3, y + 6 + index, chr(219), libtcod.BKGND_NONE)
else:
health_char = ' '
if health_ratio > 0.75:
health_char = chr(219)
elif health_ratio > 0.50:
health_char = chr(178)
elif health_ratio > 0.25:
health_char = chr(177)
else:
health_char = chr(176)
libtcod.console_set_default_foreground(panel, libtcod.white)
libtcod.console_put_char(panel, x + 3, y + 6 + index, health_char, libtcod.BKGND_NONE)
# Entity name
libtcod.console_set_default_foreground(panel, libtcod.white)
libtcod.console_print_ex(panel, x + 5, y + 6 + index, libtcod.BKGND_NONE, libtcod.LEFT,
e.get_name)
if e.ai.__class__.__name__ == "NeutralMonster":
libtcod.console_print_ex(panel, x + 5, y + 7 + index, libtcod.BKGND_NONE, libtcod.LEFT,
"(neutral)")
index += 3
# max number of entities who can comfortably fit on screen
if index >= 3 * 12:
break
libtcod.console_set_default_background(panel, libtcod.black)
def entity_in_fov_list(entities, game_map, fov_map, see_invisible):
entities_in_fov = []
for entity in entities:
if fov_map.fov[entity.y][entity.x] and game_map.tiles[entity.x][entity.y].explored:
if entity.fighter and entity.ai:
if entity.fighter.effects.get("invisible") and entity.fighter.effects.get("invisible").turns_remaining <= 0:
entities_in_fov.append(entity)
else:
entities_in_fov.append(entity)
# can't get always_visible status
return entities_in_fov
def render_tile(con, game_state, game_map, fov_map, cursor, x, y, colors, config):
visible = fov_map.fov[y][x]
wall = game_map.tiles[x][y].block_sight
window = game_map.tiles[x][y].window
if config.get("CLASSIC_COLOR"):
if visible:
if wall:
libtcod.console_set_default_foreground(con, colors.get('classic_light_wall'))
libtcod.console_put_char(con, x, y, '#', libtcod.BKGND_NONE)
elif window:
libtcod.console_set_default_foreground(con, colors.get('classic_light_window'))
libtcod.console_put_char(con, x, y, '#', libtcod.BKGND_NONE)
else:
libtcod.console_set_default_foreground(con, colors.get('classic_light_ground'))
libtcod.console_put_char(con, x, y, '.', libtcod.BKGND_NONE)
game_map.tiles[x][y].explored = True
elif game_map.tiles[x][y].explored:
if wall:
libtcod.console_set_default_foreground(con, colors.get('classic_dark_wall'))
libtcod.console_put_char(con, x, y, '#', libtcod.BKGND_NONE)
if window:
libtcod.console_set_default_foreground(con, colors.get('classic_dark_window'))
libtcod.console_put_char(con, x, y, '#', libtcod.BKGND_NONE)
else:
libtcod.console_set_default_foreground(con, colors.get('classic_dark_ground'))
libtcod.console_put_char(con, x, y, ' ', libtcod.BKGND_NONE)
else:
if visible:
if wall:
libtcod.console_set_char_background(con, x, y, colors.get('light_wall'), libtcod.BKGND_SET)
elif window:
libtcod.console_set_char_background(con, x, y, colors.get('light_window'), libtcod.BKGND_SET)
else:
libtcod.console_set_char_background(con, x, y, colors.get('light_ground'), libtcod.BKGND_SET)
game_map.tiles[x][y].explored = True
elif game_map.tiles[x][y].explored:
if wall:
libtcod.console_set_char_background(con, x, y, colors.get('dark_wall'), libtcod.BKGND_SET)
elif window:
libtcod.console_set_char_background(con, x, y, colors.get('dark_window'), libtcod.BKGND_SET)
else:
libtcod.console_set_char_background(con, x, y, colors.get('dark_ground'), libtcod.BKGND_SET)
def render_tile_in_fov(con, game_state, game_map, fov_map, x, y):
visible = fov_map.fov[y][x]
wall = game_map.tiles[x][y].block_sight
window = game_map.tiles[x][y].window
if visible:
if wall:
libtcod.console_set_char_background(con, x, y, libtcod.sepia, libtcod.BKGND_SET)
elif window:
libtcod.console_set_char_background(con, x, y, libtcod.darker_grey, libtcod.BKGND_SET)
else:
libtcod.console_set_char_background(con, x, y, libtcod.white, libtcod.BKGND_SET)
else:
libtcod.console_set_char_background(con, x, y, libtcod.black, libtcod.BKGND_SET)
def render_all(con, panel, status_screen, entities, player, game_map, fov_map, fov_recompute,
turn, message_log, screen_width, screen_height, panel_height, panel_y,
mouse, colors, game_state, cursor, config, status_screen_width, status_screen_height,
identities):
if config.get("DEBUG_SHOW_FOV"):
for y in range(game_map.height):
for x in range(game_map.width):
render_tile_in_fov(con, game_state, game_map, fov_map, x, y)
elif fov_recompute:
for y in range(game_map.height):
for x in range(game_map.width):
render_tile(con, game_state, game_map, fov_map, False, x, y, colors, config)
# ENTITIES
entities_in_render_order = sorted(entities, key=lambda x: x.render_order.value)
see_ai = player.fighter.is_effect("detect_aura")
see_items = player.fighter.is_effect("detect_items")
see_invisible = player.fighter.is_effect("see_invisible")
if config.get("DEBUG_SHOW_FOV"):
for entity in entities_in_render_order:
draw_entity_in_fov(con, entity, fov_map)
else:
for entity in entities_in_render_order:
if entity.animation:
draw_animated_entity(con, entity, fov_map, game_map, identities, see_ai, see_items, see_invisible)
else:
draw_entity(con, entity, fov_map, game_map, identities, see_ai, see_items, see_invisible)
# CURSOR
if game_state == GameStates.LOOK_AT:
cursor.animation.tick()
libtcod.console_set_default_foreground(con, libtcod.white)
libtcod.console_put_char(con, cursor.x, cursor.y, cursor.animation.get_char, libtcod.BKGND_NONE)
libtcod.console_blit(con, 0, 0, screen_width, screen_height, 0, 0, 0)
libtcod.console_set_default_background(panel, libtcod.black)
libtcod.console_clear(panel)
# MESSAGE LOG
y = 1
for message in message_log.messages:
libtcod.console_set_default_foreground(panel, message.color)
libtcod.console_print_ex(panel, message_log.x, y, libtcod.BKGND_NONE, libtcod.LEFT, message.text)
y += 1
#### DO I STILL WANT TO KEEP THIS FUNCTIONALITY? ###
"""
libtcod.console_set_default_foreground(panel, libtcod.light_grey)
libtcod.console_print_ex(panel, 1, 0, libtcod.BKGND_NONE, libtcod.LEFT,
get_names_under_mouse(mouse, entities, fov_map))
"""
libtcod.console_blit(panel, 0, 0, screen_width, panel_height, 0, 0, panel_y)
### STATUS PANEL ###
libtcod.console_set_default_background(status_screen, libtcod.black)
libtcod.console_clear(status_screen)
render_status_panel(status_screen, 0, 0, status_screen_width, status_screen_height, player, game_state, entities, game_map, fov_map, turn, False, cursor)
# THIS IS BUGGY AF, LIBTCOD IS SKETCHY
status_screen.blit(con, screen_width - status_screen_width, 0, 0, 0, status_screen_width, status_screen_height)
# MENUS
if game_state in (GameStates.SHOW_INVENTORY, GameStates.DROP_INVENTORY,
GameStates.IDENTIFY_INVENTORY, GameStates.CHARGE_INVENTORY,
GameStates.ENCHANT_INVENTORY):
if game_state == GameStates.SHOW_INVENTORY:
inventory_title = 'Inventory ({0}/{1})\n'.format(format_weight(player.inventory.current_weight, 1), format_weight(player.inventory.capacity, 1))
elif game_state == GameStates.DROP_INVENTORY:
inventory_title = 'Press the key next to an item to drop it, or Esc to cancel.\n'
elif game_state == GameStates.IDENTIFY_INVENTORY:
inventory_title = 'Press the key next to an item to identify it, or Esc to cancel.\n'
elif game_state == GameStates.CHARGE_INVENTORY:
inventory_title = 'Press the key next to an item to charge it, or Esc to cancel.\n'
elif game_state == GameStates.ENCHANT_INVENTORY:
inventory_title = 'Press the key next to an item to enchant it, or Esc to cancel.\n'
inventory_menu(con, inventory_title, player, 50, screen_width, screen_height)
elif game_state == GameStates.LEVEL_UP:
level_up_menu(con, 'Level up! Choose a stat to raise:', player, 40,
screen_width, screen_height)
elif game_state == GameStates.CHARACTER_SCREEN:
character_screen(player, 30, game_map.width, game_map.height)
elif game_state == GameStates.HELP_SCREEN:
help_screen(45, screen_width, screen_height)
creation_menu = {
"Ability scores": ["Strength", "Dexterity", "Constitution", "Intelligence", "Wisdom", "Charisma"],
"Inspiration:": ["Self", "Love", "Peace", "Prosperity", "The Arts", "The Stars"]
}
def render_character_creation(con, panel, screen_width, screen_height, menu_cursor, stat_diffs, points_available, stat_boosts, plot):
libtcod.console_clear(con)
libtcod.console_set_default_foreground(con, libtcod.white)
libtcod.console_print_ex(con, 1, 1, libtcod.BKGND_NONE, libtcod.LEFT, "{0} is born...".format(plot.protagonist.name))
# holy fuck don't look at the next section, you'll get lost in the spaghetti before you even get to the sauce
header_index = 1
menu_index = 0
table_margin = 12
current_num = menu_cursor.index[1]
#iterates over each column in the menu
for h, m in creation_menu.items():
index = 0
libtcod.console_set_default_foreground(con, libtcod.white)
libtcod.console_print_ex(con, header_index, index + 3, libtcod.BKGND_NONE, libtcod.LEFT, h)
# iterates over each item in a column of the menu
for item in m:
# draw highlighting rectangle and change text color
if menu_index == menu_cursor.index[0] and index == menu_cursor.index[1]:
libtcod.console_set_default_foreground(con, libtcod.black)
draw_background_rect(con, header_index, 4 + menu_cursor.index[1], len(h) + table_margin - 5, 1, libtcod.white)
else:
libtcod.console_set_default_foreground(con, libtcod.white)
# first column
if menu_index == 0:
# stat boost from second column
if index + 1 in stat_boosts[current_num] and menu_cursor.index[0] == 1:
libtcod.console_set_default_foreground(con, libtcod.cyan)
else:
libtcod.console_set_default_foreground(con, libtcod.white)
if menu_index == menu_cursor.index[0] and index == menu_cursor.index[1]:
libtcod.console_set_default_foreground(con, libtcod.black)
libtcod.console_print_ex(con, header_index, index + 4, libtcod.BKGND_NONE, libtcod.LEFT, item + ":")
if index + 1 in stat_boosts[current_num] and menu_cursor.index[0] == 1:
if stat_boosts[current_num][0] == index + 1:
libtcod.console_print_ex(con, len(h) + table_margin - 5, index + 4, libtcod.BKGND_NONE, libtcod.RIGHT, display_ability(8 + stat_diffs[index] + 2))
libtcod.console_print_ex(con, len(h) + table_margin - 2, index + 4, libtcod.BKGND_NONE, libtcod.RIGHT, "+2")
elif stat_boosts[current_num][1] == index + 1:
libtcod.console_print_ex(con, len(h) + table_margin - 5, index + 4, libtcod.BKGND_NONE, libtcod.RIGHT, display_ability(8 + stat_diffs[index] + 1))
libtcod.console_print_ex(con, len(h) + table_margin - 2, index + 4, libtcod.BKGND_NONE, libtcod.RIGHT, "+1")
else:
libtcod.console_print_ex(con, len(h) + table_margin - 5, index + 4, libtcod.BKGND_NONE, libtcod.RIGHT, display_ability(8 + stat_diffs[index]))
# second column
else:
libtcod.console_print_ex(con, header_index, index + 4, libtcod.BKGND_NONE, libtcod.LEFT, item)
index += 1
header_index += len(h) + table_margin
menu_index += 1
libtcod.console_set_default_foreground(con, libtcod.white)
libtcod.console_print_ex(con, 1, 11, libtcod.BKGND_NONE, libtcod.LEFT, "{0} points available".format(points_available))
libtcod.console_print_ex(con, 1, 13, libtcod.BKGND_NONE, libtcod.LEFT, "+/- to add/subtract points")
libtcod.console_print_ex(con, 1, 14, libtcod.BKGND_NONE, libtcod.LEFT, "Enter to accept changes")
# PLOT
y = 16
for line in plot.lines:
libtcod.console_print_ex(con, 1, y, libtcod.BKGND_NONE, libtcod.LEFT, line)
y += 1
libtcod.console_blit(con, 0, 0, screen_width, screen_height, 0, 0, 0)
def draw_background_rect(con, x, y, w, h, color):
for i in range (w):
for k in range(h):
libtcod.console_set_char_background(con, x + i, y + k, color, libtcod.BKGND_SET)
def clear_all(con, entities, cursor):
for entity in entities:
clear_entity(con, entity)
clear_entity(con, cursor)
def draw_entity(con, entity, fov_map, game_map, identities, see_ai, see_items, see_invisible):
if fov_map.fov[entity.y][entity.x] or ((entity.stairs or entity.door or entity.sign) and game_map.tiles[entity.x][entity.y].explored) or (entity.trap and entity.trap.revealed):
libtcod.console_set_default_foreground(con, entity.get_color)
if entity.fighter and entity.ai:
if not entity.fighter.is_effect("invisible") or see_invisible:
libtcod.console_put_char(con, entity.x, entity.y, entity.get_char, libtcod.BKGND_NONE)
else:
libtcod.console_put_char(con, entity.x, entity.y, entity.get_char, libtcod.BKGND_NONE)
elif see_ai and entity.ai:
libtcod.console_set_default_foreground(con, entity.get_color)
libtcod.console_put_char(con, entity.x, entity.y, entity.get_char, libtcod.BKGND_NONE)
elif see_items and entity.item:
libtcod.console_set_default_foreground(con, entity.get_color)
libtcod.console_put_char(con, entity.x, entity.y, entity.get_char, libtcod.BKGND_NONE)
def draw_animated_entity(con, entity, fov_map, game_map, identities, see_ai, see_items, see_invisible):
if fov_map.fov[entity.y][entity.x] or ((entity.stairs or entity.door or entity.sign) and game_map.tiles[entity.x][entity.y].explored) or (entity.trap and entity.trap.revealed):
libtcod.console_set_default_foreground(con, entity.animation.get_color)
if entity.fighter and entity.ai:
if not entity.fighter.is_effect("invisible") or see_invisible:
libtcod.console_put_char(con, entity.x, entity.y, entity.animation.get_char, libtcod.BKGND_NONE)
else:
libtcod.console_put_char(con, entity.x, entity.y, entity.animation.get_char, libtcod.BKGND_NONE)
elif see_ai and entity.ai:
libtcod.console_set_default_foreground(con, entity.animation.get_color)
libtcod.console_put_char(con, entity.x, entity.y, entity.animation.get_char, libtcod.BKGND_NONE)
elif see_items and entity.item:
libtcod.console_set_default_foreground(con, entity.animation.get_color)
libtcod.console_put_char(con, entity.x, entity.y, entity.animation.get_char, libtcod.BKGND_NONE)
entity.animation.tick()
def clear_entity(con, entity):
libtcod.console_put_char(con, entity.x, entity.y, ' ', libtcod.BKGND_NONE)
def draw_entity_in_fov(con, entity, fov_map):
if fov_map.fov[entity.y][entity.x]:
if entity.id == "player":
libtcod.console_set_default_foreground(con, libtcod.dark_blue)
libtcod.console_put_char(con, entity.x, entity.y, "@", libtcod.BKGND_NONE)
else:
libtcod.console_set_default_foreground(con, libtcod.dark_green)
libtcod.console_put_char(con, entity.x, entity.y, "!", libtcod.BKGND_NONE)
else:
libtcod.console_set_default_foreground(con, libtcod.dark_red)
libtcod.console_put_char(con, entity.x, entity.y, "?", libtcod.BKGND_NONE)
|
[
"nicolasg699@gmail.com"
] |
nicolasg699@gmail.com
|
15fb4253a3e13829a69ddf5e80deefc1a51677ef
|
c0cbe24a8a6fd86e692f6dc832980482b5826a17
|
/django_homework/wsgi.py
|
27efa4b6bf131f0227f3e3c629833bacee6f78a9
|
[] |
no_license
|
NickLennonLiu/sast-django
|
cb6ae241d2095a8e7d005cb7913c32e9ef5de40b
|
c5d0505e865649473e381e86b490f813333db8cf
|
refs/heads/master
| 2022-11-29T14:23:47.992931
| 2020-08-09T07:12:02
| 2020-08-09T07:12:02
| 284,487,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
"""
WSGI config for django_homework project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_homework.settings')
application = get_wsgi_application()
|
[
"1172236820@qq.com"
] |
1172236820@qq.com
|
18b8be2a966cf73487a056f20a5b10ebfc0a41c6
|
75c5da67adb4c799ab18fff0ada3b550278f0f97
|
/shift_model_pjb.py
|
6af9a6dca522cdc384dc850578bd01d7cd0e9b6d
|
[] |
no_license
|
peterbekins/Optimization
|
03a351a2cb24e9a16966914dd23c1186d77c60ef
|
73a9519b4b3406b6788a311dd73bd89bf745339e
|
refs/heads/main
| 2023-01-21T18:12:31.731969
| 2020-12-01T21:09:48
| 2020-12-01T21:09:48
| 316,029,318
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,270
|
py
|
from ortools.sat.python import cp_model
import numpy as np
import matplotlib.pyplot as plt
import time
np.random.seed(441)
# timer to measure computational time
time_start = time.perf_counter()
# Variable definitions
residents = 4
weeks = 4
shifts = weeks * 21
shift_requests = []
# initialize shift requests with random variable for now
# this algorithm has each resident rate each shift 1-3
# I choose without replacement to ensure that the requests are similar
# each week, each resident will have 7 shifts rated 3, etc.
week_ratings = [3,3,3,3,3,3,3,2,2,2,2,2,2,2,1,1,1,1,1,1,1]
for r in range(residents):
week_requests = []
for w in range(weeks):
week_requests.append(np.random.choice(week_ratings, 21, replace=False))
shift_requests.append(np.hstack(week_requests))
# vacation requests, 1 = available to work 0 = on vacation
v = []
v.append(np.tile(1, shifts))
v.append(np.tile(1, shifts))
v.append(np.tile(1, shifts))
v.append([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
# Set min/max for resident per shift
min_per_shift = 1
max_per_shift = 3
# Set max for resident total
max_per_week = weeks * 10
min_per_week = weeks * 6
# Create the model
shift_model = cp_model.CpModel()
# 1. Decision Variables
# a. x_rs is binary, 1 = resident r is scheduled for shift s
x = {}
for r in range(residents):
for s in range(shifts):
x[(r,s)] = shift_model.NewBoolVar('x_r%s%i' % (r, s))
# b. y_rs is binary, indicator variable used to turn on constraint for sequential shifts
y = {}
for r in range(residents):
for s in range(shifts):
y[(r,s)] = shift_model.NewBoolVar('y_r%s%i' % (r, s))
# c. z_rwd is binary, indicator variable turns on if resident r is scheduled on week w day d
z = {}
for r in range(residents):
for w in range(0, shifts, 21):
for d in range(w, w + 21, 3):
z[(r,w,d)] = shift_model.NewBoolVar('z_r%iw%id%i' % (r,w,d))
# 2. Contraints
# a. Min and max residents per shift
for s in range(shifts):
shift_model.Add(sum(x[(r,s)] for r in range(residents)) >= min_per_shift)
shift_model.Add(sum(x[(r,s)] for r in range(residents)) <= max_per_shift)
# b. Max shifts per week capped at 10 per resident but averaged over 4 weeks
for r in range(residents):
shift_model.Add(sum(x[(r,s)] for s in range(shifts)) <= max_per_week)
shift_model.Add(sum(x[(r,s)] for s in range(shifts)) >= min_per_week)
# c. Resident must have 16 hours off after three consecutive shifts
for r in range(residents):
for s in range(shifts-4):
pattern_1 = x[(r,s)] + x[(r,s+1)] + x[(r,s+2)]
# These constraints turn the indicator y[(s)]==1 for any pattern of three straight shifts
shift_model.Add(3 > pattern_1 + y[(r,s)]*1000)
shift_model.Add(3 <= pattern_1 + (1-y[(r,s)])*1000)
# If y[(s)] == 1, then these constraints force X[(s+3)] == 0 and X[(s+4)] == 0
shift_model.Add(y[(r,s)] + x[(r,s+3)] <= 1)
shift_model.Add(y[(r,s)] + x[(r,s+4)] <= 1)
# This also checks the edge so 4 shifts can't be scheduled at the end of the period
edge = x[(r, shifts-1)] + x[(r, shifts-2)]+ x[(r, shifts-3)] + x[(r, shifts-4)]
shift_model.Add(edge < 4)
# d. Resident must have one full day off per week
for r in range(residents):
for w in range(0, shifts, 21):
weekly_total = 0
for d in range(w, w+21, 3):
daily_total = x[(r, d)] + x[(r, d+1)] + x[(r,d+2)]
#Turns z on if r works day d
shift_model.Add(daily_total >= 1 - (1 - z[(r,w,d)]) * 100)
shift_model.Add(daily_total < 1 + z[(r,w,d)] * 100)
weekly_total += z[(r,w,d)]
#can work max 6 days a week
shift_model.Add(weekly_total <= 6)
# e. Block vacation
for r in range(residents):
for s in range(shifts):
shift_model.Add(x[(r,s)] <= v[r][s])
# 3. Objective function
# Maximize the number of requested shifts assigned to each resident
# Need to look into balancing across residents as well
shift_model.Minimize(
sum(shift_requests[r][s] * x[(r,s)]
for r in range(residents)
for s in range(shifts)))
# 4. Solver
solver = cp_model.CpSolver()
printer = cp_model.ObjectiveSolutionPrinter()
status = solver.SolveWithSolutionCallback(shift_model, printer)
if status == cp_model.FEASIBLE:
print("Feasible!")
print("Solution = ", solver.ObjectiveValue())
else:
print("no solution")
# 5. Grid plot of to show preference matrix
plt.figure(figsize=(8,3))
plt.imshow(shift_requests, cmap="Oranges",aspect = 3)
plt.axvline(20.5, color='black')
plt.axvline(41.5, color='black')
plt.axvline(62.5, color='black')
# add borders
for res in range(residents):
for shift in range(shifts):
r = plt.Rectangle((shift-0.5,res-0.5), 1,1, facecolor="none", edgecolor="white", linewidth=1)
plt.gca().add_patch(r)
for i in range(residents):
for j in range(shifts):
text = plt.text(j, i, shift_requests[i][j],
ha="center", va="center", color="w", fontsize=6)
week_set = [10.5,31.5,52.5,73.5]
plt.tick_params(axis='both', bottom=False)
plt.xticks(week_set,['Week 1', 'Week 2','Week 3','Week 4'],fontsize=10)
plt.yticks([0,1,2,3],['R1','R2','R3','R4'],fontsize=8)
plt.tick_params(axis = "both", which = "both", bottom = False, left = False)
# 6. Grid Plot of schedule
shift_matrix = []
value_matrix = []
for r in range(residents):
shift_result = []
value = 0
tot_shifts = 0
for s in range(shifts):
if solver.Value(x[(r, s)]) == 1:
value = value + shift_requests[r][s]
tot_shifts = tot_shifts + 1
if shift_requests[r][s] < 3:
shift_result.append((128,128,128)) # dark gray for shift on
else:
shift_result.append((204,51,0)) # reddish for on but didn't request
else:
shift_result.append((224,224,224)) # light gray for shift off
shift_matrix.append(shift_result)
value_matrix.append((r,tot_shifts, value))
plt.figure(figsize=(8,3))
plt.imshow(shift_matrix, aspect = 3)
plt.axvline(20.5, color='black')
plt.axvline(41.5, color='black')
plt.axvline(62.5, color='black')
# add borders
for res in range(residents):
for shift in range(shifts):
r = plt.Rectangle((shift-0.5,res-0.5), 1,1, facecolor="none", edgecolor="white", linewidth=1)
plt.gca().add_patch(r)
week_set = [10.5,31.5,52.5,73.5]
plt.tick_params(axis='both', bottom=False)
plt.xticks(week_set,['Week 1', 'Week 2','Week 3','Week 4'],fontsize=10)
plt.yticks([0,1,2,3],['R1','R2','R3','R4'],fontsize=8)
plt.tick_params(axis = "both", which = "both", bottom = False, left = False)
# 7. Some Summary Diagnostics
sum_value = 0
for row in value_matrix:
print("Resident ", row[0], "works ", row[1], " shifts at a value of ", row[2])
sum_value = sum_value + row[2]
print("total value was ", sum_value)
# Output for computational time
time_elapsed = (time.perf_counter() - time_start)
print(weeks, " weeks takes ", time_elapsed)
|
[
"noreply@github.com"
] |
peterbekins.noreply@github.com
|
448219443353fceb95f9381404e76fd448ac9321
|
34fb4074d38efc5f828abd8048fb80c676078f08
|
/stock_price_prediction.py
|
68b0636ef516c0f199478559516a64811c99cded
|
[] |
no_license
|
rhys1998/Stock-Price-Prediction
|
8c93d7c09eeb2d4906c073101f90eba366dbf730
|
f39e609cfc65abade79bace80903d018758e4f5d
|
refs/heads/master
| 2021-01-26T04:38:23.640540
| 2020-02-26T17:03:33
| 2020-02-26T17:03:33
| 243,311,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,228
|
py
|
# -*- coding: utf-8 -*-
"""stock_price_prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1KGTFvzstSWfv6MNQGqTk27o3gcvUZTk8
"""
#This program uses an artificial recurrent neural network called LOng Short term memory
#to predict share price
#import libraries
import math
import pandas_datareader as web
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense,LSTM
import matplotlib.pyplot as plt
from datetime import date
today = date.today()
#get the stock quote
df= web.DataReader('msft',data_source='yahoo', start='2012-01-01' , end= today)
#show the data
df
#get the no of rowes and columns
df.shape
#visualize the closing priice
plt.figure(figsize=(20,10))
plt.title('Closing price')
plt.plot(df['Close'])
plt.xlabel('Closing Price USD ($) ',fontsize=18)
plt.show()
#create a dataframe with only the 'close coloumn'
data = df.filter(['Close'])
#convert the data frame into numpy array
dataset=data.values
#get the no of rows to train the model on
training_data_len = math.ceil( len(dataset) * .8)
training_data_len
#scale the data
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(dataset)
scaled_data
#create the training dataset
#create the scaled training dataset
train_data = scaled_data[0:training_data_len, :]
#split the data into x_train and y_train data set
x_train = []
y_train = []
for i in range(60, len(train_data)):
x_train.append(train_data[i-60:i, 0])
y_train.append(train_data[i, 0])
if i<=61:
print(x_train)
print(y_train)
print()
#convert the x_train and y_train to numpy arrays
x_train, y_train = np.array(x_train),np.array(y_train)
#Reshape the data
x_train=np.reshape(x_train ,(x_train.shape[0],x_train.shape[1],1))
x_train.shape
#Build the LSTM model
model = Sequential()
model.add(LSTM(50,return_sequences=True,input_shape= (x_train.shape[1],1)))
model.add(LSTM(50,return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
#compile the model
model.compile(optimizer='adam' , loss='mean_squared_error')
#Train the model
model.fit(x_train,y_train, batch_size=1 , epochs=1)
#create the testing dataset
#create a new array containing scaled values from index 1543 to 2003
test_data = scaled_data[training_data_len - 60: , :]
#create the data sets x_test and y_test
x_test = []
y_test = dataset[training_data_len: , :]
for i in range(60, len(test_data)):
x_test.append(test_data[i-60:i, 0])
#convert the data to a numpy array
x_test= np.array(x_test)
#Reshape the datA
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
#Get the models predicted price values
predictions = model.predict(x_test)
predictions = scaler.inverse_transform(predictions)
#Get the root mean suared error(RMSE)
rmse = np.sqrt(np.mean(predictions - y_test)**2)
rmse
#plot the data
train = data[: training_data_len]
valid= data[training_data_len :]
valid['Prediction'] = predictions
#visualize the model
plt.figure(figsize=(20,10))
plt.title('Model')
plt.xlabel('Date',fontsize=18)
plt.ylabel('Close Price USD($)', fontsize=18)
plt.plot(train['Close'])
plt.show()
#show the valid and actual prices
valid
#get the quote
microsoft_quote = web.DataReader('msft',data_source='yahoo',start='2012-01-01',end='today')
print(microsoft_quote)
#create new dataframe
new_df = microsoft_quote.filter(['Close'])
#Get the last 60 days closing price values and convert the data frame into arrays
last_60_days = new_df[-60: ].values
#print(last_60_days)
last_60_days_scaled = scaler.transform(last_60_days)
#create an empty list
X_test = []
#Append the past 60 days
X_test.append(last_60_days_scaled)
#convert the X_test data into numpy array
X_test=np.array(X_test)
#print(X_test)
#Reshape the data
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
#Get the predicted scaled price
pred_price = model.predict(X_test)
#undo scalling
pred_price = scaler.inverse_transform(pred_price)
print (pred_price)
#get the quote
microsoft_quote2 = web.DataReader('msft',data_source='yahoo',start= today ,end= today )
print(microsoft_quote2['Close'])
|
[
"theroyalrajesh7@gmail.com"
] |
theroyalrajesh7@gmail.com
|
51a393a0f1518e3fa81412d8829e9b6192195b84
|
e11037e84bfdbcf307772f74e3b18dfc820eca77
|
/ros/src/tl_detector/tl_detector.py
|
8950b0e7c929d22387c214c33748c48839c85eb8
|
[
"MIT"
] |
permissive
|
MadridTeam/ROSCentral
|
ab1f24247ded92caa0bb112cb3edce2606ad8089
|
9bcedf04c64f3b3d992077ae02ac501b92063336
|
refs/heads/master
| 2020-03-30T19:07:56.944107
| 2018-10-19T22:17:58
| 2018-10-24T03:30:50
| 151,529,638
| 0
| 1
|
MIT
| 2018-10-24T03:30:52
| 2018-10-04T06:44:19
|
CMake
|
UTF-8
|
Python
| false
| false
| 6,422
|
py
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
self.waypoints_2d = None
self.waypoint_tree = None
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier(False)
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y]
for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
#rospy.logwarn('IMAGE Callback')
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_wp_idx = self.waypoint_tree.query([x, y], 1)[1]
return closest_wp_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if(not self.has_image):
self.prev_light_loc = None
return False
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
#Get classification
return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if not self.pose is None and not self.waypoint_tree is None:
car_position = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.x)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = temp_wp_idx - car_position
if d >= 0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
rospy.logerr("Manish: {}, {}".format(line_wp_idx, state))
return line_wp_idx, state
#self.waypoints = None
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
[
"chen_fengyu@hotmail.com"
] |
chen_fengyu@hotmail.com
|
dbc38eac44893b12655a94fd8a459a1811e090ec
|
42b59166c88dcdce910a2ac4dab45c88e3942eff
|
/test/test_cube_hareware.py
|
c56f88de50bcf3bb0777a07d5dbe1303c03ef75b
|
[
"MIT"
] |
permissive
|
vamoosebbf/MaixUI
|
5e74fd38d1c11ad274e4ce15f84376d5a79d0595
|
b8069509419e37fe767169a4996372c32a841ef9
|
refs/heads/master
| 2023-06-11T08:54:27.073072
| 2021-03-30T09:57:41
| 2021-03-30T09:57:41
| 314,427,404
| 1
| 0
|
MIT
| 2021-06-30T09:02:47
| 2020-11-20T02:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 39,064
|
py
|
# This file is part of MaixUI
# Copyright (c) sipeed.com
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
#
import time, gc, math, random, sensor, audio
from fpioa_manager import fm
from machine import I2C, SPI
from Maix import I2S, GPIO, FFT
from led import sipeed_led
from button import sipeed_button, button_io
from pmu_axp173 import AXP173, AXP173_ADDR
from sound import CubeAudio
from msa301 import MSA301, _MSA301_I2CADDR_DEFAULT
from shtxx import SHT3x, SHT3x_ADDR, SHT31_ADDR
from bme280 import BME280, BME280_I2CADDR
from qmcx983 import QMCX983, QMCX983_I2CADDR
from ui_catch import catch
from ui_canvas import ui
from ui_sample import sample_page
from core import agent
from wdt import protect
protect.keep()
class Report():
Key_Test = False
Led_Test = False
Touch_Test = False
Power_Test = False
Audio_Test = False
FrontSensor_Test = False
RearSensor_Test = False
Msa301_Test = False
Grove_Test = False
Spmod_Test = False
def __init__(self):
self.is_load = False
def load(self):
if self.is_load == False:
#print(case.load)
self.is_load = True
sample_page.btn.enable = False
self.btn = sipeed_button()
self.agent = agent()
self.agent.event(150, self.key_event)
def key_event(self):
self.btn.expand_event()
if self.btn.back() == 2:
sample_page.back()
elif self.btn.next() == 2:
sample_page.next()
elif self.btn.home() == 2:
axp173 = AXP173(I2C(I2C.I2C1, freq=100*1000, scl=30, sda=31))
axp173.__write_reg(0x32, 0x80) # turn off
def work(self):
self.agent.parallel_cycle()
#print(case.work)
y = 0
ui.canvas.draw_string(100, y, "Power", (127, 255, 255), scale=2)
ui.canvas.draw_string(10, y, "1 " + str(Report.Power_Test), (0, 255, 0) if (Report.Power_Test) else (255, 0, 0), scale=2)
ui.canvas.draw_line(10, y + 25, 240, y + 25, color=(255, 255, 255))
y += 30
ui.canvas.draw_string(100, y, "Msa301", (127, 255, 255), scale=2)
ui.canvas.draw_string(10, y, "2 " + str(Report.Msa301_Test), (0, 255, 0) if (Report.Msa301_Test) else (255, 0, 0), scale=2)
ui.canvas.draw_line(10, y + 25, 240, y + 25, color=(255, 255, 255))
y += 30
ui.canvas.draw_string(100, y, "Grove", (127, 255, 255), scale=2)
ui.canvas.draw_string(10, y, "3 " + str(Report.Grove_Test), (0, 255, 0) if (Report.Grove_Test) else (255, 0, 0), scale=2)
ui.canvas.draw_line(10, y + 25, 240, y + 25, color=(255, 255, 255))
y += 30
ui.canvas.draw_string(100, y, "Spmod", (127, 255, 255), scale=2)
ui.canvas.draw_string(10, y, "4 " + str(Report.Spmod_Test), (0, 255, 0) if (Report.Spmod_Test) else (255, 0, 0), scale=2)
ui.canvas.draw_line(10, y + 25, 240, y + 25, color=(255, 255, 255))
y += 30
ui.canvas.draw_string(100, y, "Key + RGB", (127, 255, 255), scale=2)
ui.canvas.draw_string(10, y, "5 " + str(Report.Key_Test), (0, 255, 0) if (Report.Key_Test) else (255, 0, 0), scale=2)
ui.canvas.draw_line(10, y + 25, 240, y + 25, color=(255, 255, 255))
y += 30
ui.canvas.draw_string(100, y, "RearSensor", (127, 255, 255), scale=2)
ui.canvas.draw_string(10, y, "8 " + str(Report.RearSensor_Test), (0, 255, 0) if (Report.RearSensor_Test) else (255, 0, 0), scale=2)
ui.canvas.draw_line(10, y + 25, 240, y + 25, color=(255, 255, 255))
y += 30
ui.canvas.draw_string(100, y, "Audio", (127, 255, 255), scale=2)
ui.canvas.draw_string(10, y, "9 " + str(Report.Audio_Test), (0, 255, 0) if (Report.Audio_Test) else (255, 0, 0), scale=2)
ui.canvas.draw_line(10, y + 25, 240, y + 25, color=(255, 255, 255))
y += 30
ui.canvas.draw_string(100, y, "SdCard & Lcd", (127, 255, 255), scale=2)
ui.canvas.draw_string(10, y, "* " + str(True), (0, 255, 0) if (True) else (255, 0, 0), scale=2)
ui.canvas.draw_line(10, y + 25, 240, y + 25, color=(255, 255, 255))
y += 30
def free(self):
if self.is_load:
#print(sample.free)
self.is_load = False
sample_page.btn.enable = True
class PowerTest():
def __init__(self):
self.is_load = False
self.i2c = I2C(I2C.I2C1, freq=100*1000, scl=30, sda=31)
#self.load()
def test_event(self):
if self.isconnected and self.vbat_voltage > 0 and self.usb_voltage:
Report.Power_Test = True
if Report.Power_Test:
sample_page.next()
def load(self):
if Report.Power_Test:
sample_page.next()
if self.is_load == False:
# i2c init()
sample_page.btn.enable = False
self.isconnected = False
self.isError = None
self.vbat_voltage = 0
self.work_info = []
self.agent = agent()
self.agent.event(500, self.check)
self.agent.event(1500, self.test_event)
self.is_load = True
def free(self):
if self.is_load:
# i2c deinit()
sample_page.btn.enable = True
self.is_load = False
def check(self):
try:
if self.isconnected == False:
if AXP173_ADDR in self.i2c.scan():
self.axp173 = AXP173(self.i2c)
self.isconnected = True
self.axp173.enable_adc(True)
# 默认充电限制在 4.2V, 190mA 档位
self.axp173.setEnterChargingControl(True)
self.axp173.exten_output_enable()
### amigo sensor config.
#self.axp173.writeREG(0x27, 0x20)
#self.axp173.writeREG(0x28, 0x0C)
else:
tmp = []
self.work_mode = self.axp173.getPowerWorkMode()
tmp.append("WorkMode:" + hex(self.work_mode))
# 检测 电池电压
self.vbat_voltage = self.axp173.getVbatVoltage()
tmp.append("vbat_voltage: {0} V".format(self.vbat_voltage))
# 检测 电池充电电流
self.BatteryChargeCurrent = self.axp173.getBatteryChargeCurrent()
tmp.append("BatChargeCurrent: {0:>4.1f}mA".format(
self.BatteryChargeCurrent))
# 检测 USB-ACIN 电压
self.usb_voltage = self.axp173.getUSBVoltage()
tmp.append("usb_voltage: {0:>4}mV".format(self.usb_voltage))
# 检测 USB-ACIN 电流
self.USBInputCurrent = self.axp173.getUSBInputCurrent()
tmp.append("USBInputCurrent: {0:>4.1f}mA".format(self.USBInputCurrent))
### 检测 VBUS 电压
#usb_voltage = self.axp173.getConnextVoltage()
#print("6 VUBS_voltage: " + str(usb_voltage))
### 检测 VBUS 电流
#USBInputCurrent = self.axp173.getConnextInputCurrent()
#print("7 VUBSInputCurrent: " + str(USBInputCurrent) + "mA")
self.getChargingControl = self.axp173.getChargingControl()
tmp.append("ChargingControl: {}".format(hex(self.getChargingControl)))
# 检测 是否正在充电
if self.axp173.is_charging() == True:
tmp.append("Charging....")
else:
tmp.append("Not Charging")
tmp.append(self.axp173.is_charging())
# 检测 USB 是否连接
if self.axp173.is_usb_plugged_in() == 1:
tmp.append("USB plugged ....")
else:
tmp.append("USB is not plugged in")
self.work_info = tmp
except Exception as e:
Report.Power_Test = False
Report.isError = str(e)
print(e)
def work(self):
self.agent.parallel_cycle()
ui.canvas.draw_string(10, 10, "1 Power Test", (127, 127, 255), scale=3)
ui.canvas.draw_string(10, 50, "isconnected: %s" % (
str)(self.isconnected), (255, 127, 0), scale=2)
if self.isconnected:
for i in range(len(self.work_info)):
ui.canvas.draw_string(
20, 20*i + 80, "{0}".format(str(self.work_info[i])), mono_space=2)
if self.isError != None:
ui.canvas.draw_string(40, 80, self.isError, (255, 255, 255), scale=2)
sample_page.next()
class PowerReport():
def __init__(self):
self.is_load = False
def load(self):
if self.is_load == False:
self.is_load = True
self.agent = agent()
self.agent.event(500, sample_page.next)
else:
sample_page.next()
def work(self):
self.agent.cycle()
ui.canvas.draw_string(10, 20, "1 PowerReport", (127, 255, 255), scale=3)
ui.canvas.draw_string(30, 120, "Pass" if (Report.Power_Test) else "Fail", (0, 255, 0) if (Report.Power_Test) else (255, 0, 0), scale=8)
def free(self):
if self.is_load:
pass
#self.is_load = False
class Msa301Test():
def __init__(self):
self.is_load = False
self.i2c = I2C(I2C.I2C1, freq=100*1000, scl=30, sda=31)
#fm.register(30, fm.fpioa.I2C1_SCLK, force=True)
#fm.register(31, fm.fpioa.I2C1_SDA, force=True)
def test_event(self):
if self.isconnected and self.acceleration[0] != 0 and self.acceleration[1] != 0 and self.acceleration[2] != 0:
Report.Msa301_Test = True
sample_page.next()
def load(self):
if Report.Msa301_Test:
sample_page.next()
if self.is_load == False:
# i2c init()
sample_page.btn.enable = False
self.isconnected = False
self.isError = None
self.tapped = False
self.acceleration = (0, 0, 0)
self.agent = agent()
self.agent.event(500, self.check)
self.agent.event(1500, self.test_event)
self.is_load = True
def free(self):
if self.is_load:
# i2c deinit()
sample_page.btn.enable = True
self.is_load = False
def check(self):
try:
if self.isconnected == False:
if _MSA301_I2CADDR_DEFAULT in self.i2c.scan():
self.msa301 = MSA301(self.i2c)
self.isconnected = True
else:
self.tapped = self.msa301.tapped
self.acceleration = self.msa301.acceleration
except Exception as e:
Report.Msa301_Test = False
Report.isError = str(e)
print(e)
def work(self):
self.agent.parallel_cycle()
ui.canvas.draw_string(10, 30, "2 Msa301Test", (127, 127, 255), scale=3)
ui.canvas.draw_string(10, 80, "isconnected: %s" % (
str)(self.isconnected), (255, 127, 0), scale=2)
if self.isconnected:
ui.canvas.draw_string(10, 120, "tapped: %s" % (
str)(self.tapped), (0, 214, 126), scale=2)
ui.canvas.draw_string(10, 140, "x", (255, 0, 0), scale=2)
ui.canvas.draw_line(120, 150, 120 + int(self.acceleration[0] * 8), 150, color=(41, 131, 255))
ui.canvas.draw_string(10, 160, "y", (0, 255, 0), scale=2)
ui.canvas.draw_line(120, 170, 120 + int(self.acceleration[1] * 8), 170, color=(141, 31, 255))
ui.canvas.draw_string(10, 180, "z", (0, 0, 255), scale=2)
ui.canvas.draw_line(120, 190, 120 + int(self.acceleration[2] * 8), 190, color=(241, 131, 55))
ui.canvas.draw_string(40, 210,
str(("%-02.2f %-02.2f %-02.2f" % self.acceleration)), (127, 255, 255), scale=2)
if self.isError != None:
ui.canvas.draw_string(40, 80, self.isError, (255, 255, 255), scale=2)
sample_page.next()
class Msa301Report():
def __init__(self):
self.is_load = False
def load(self):
if self.is_load == False:
self.is_load = True
self.agent = agent()
self.agent.event(500, sample_page.next)
else:
sample_page.next()
def work(self):
self.agent.cycle()
ui.canvas.draw_string(10, 20, "2 Msa301Report", (127, 255, 255), scale=3)
ui.canvas.draw_string(30, 120, "Pass" if (Report.Msa301_Test) else "Fail", (0, 255, 0) if (Report.Msa301_Test) else (255, 0, 0), scale=8)
def free(self):
if self.is_load:
pass
#self.is_load = False
class GroveTest():
def __init__(self):
self.is_load = False
self.i2c = I2C(I2C.I2C4, freq=100*1000, scl=24, sda=25)
def test_event(self):
if self.isconnected and self.work_data != None and self.work_data[0] > 0 and self.work_data[1] > 1:
Report.Grove_Test = True
sample_page.next()
def load(self):
if Report.Grove_Test:
sample_page.next()
if self.is_load == False:
# i2c init()
sample_page.btn.enable = False
self.isconnected = False
self.isError = None
self.work_info = []
self.work_data = None
self.agent = agent()
self.agent.event(250, self.check)
self.agent.event(3000, self.test_event)
self.is_load = True
def free(self):
if self.is_load:
# i2c deinit()
sample_page.btn.enable = True
self.is_load = False
def check(self):
try:
if self.isconnected == False:
# print(self.i2c.scan())
if SHT3x_ADDR in self.i2c.scan():
self.sht3x = SHT3x(self.i2c, SHT3x_ADDR)
self.isconnected = True
if SHT31_ADDR in self.i2c.scan():
self.sht3x = SHT3x(self.i2c, SHT31_ADDR)
self.isconnected = True
else:
tmp = []
self.work_data = self.sht3x.read_temp_humd()
tmp.append("data:" + str(self.work_data))
self.work_info = tmp
except Exception as e:
Report.Grove_Test = False
Report.isError = str(e)
print(e)
def work(self):
self.agent.parallel_cycle()
ui.canvas.draw_string(10, 10, "3 Grove Test SHT3X", (0, 255, 127), scale=2)
ui.canvas.draw_string(10, 50, "isconnected: %s" % (
str)(self.isconnected), (255, 127, 0), scale=2)
if self.isconnected:
for i in range(len(self.work_info)):
ui.canvas.draw_string(
20, 20*i + 90, "{0}".format(str(self.work_info[i])), scale=2)
if self.isError != None:
ui.canvas.draw_string(40, 80, self.isError, (255, 255, 255), scale=2)
sample_page.next()
class GroveReport():
def __init__(self):
self.is_load = False
def load(self):
if self.is_load == False:
self.is_load = True
self.agent = agent()
self.agent.event(500, sample_page.next)
else:
sample_page.next()
def work(self):
self.agent.cycle()
ui.canvas.draw_string(10, 20, "3 GroveReport", (127, 255, 255), scale=3)
ui.canvas.draw_string(30, 120, "Pass" if (Report.Grove_Test) else "Fail", (0, 255, 0) if (Report.Grove_Test) else (255, 0, 0), scale=8)
def free(self):
if self.is_load:
pass
#self.is_load = False
class SpmodTest():
test_conut = 0
def __init__(self, mosi=8, miso=15, cs=20, clk=21):
self.is_load = False
self.spi = SPI(SPI.SPI_SOFT, mode=SPI.MODE_MASTER, baudrate=400*1000,
polarity=0, phase=0, bits=8, firstbit=SPI.MSB, sck=clk, mosi=mosi, miso=miso)
fm.register(cs, fm.fpioa.GPIO6, force=True)
self.cs = GPIO(GPIO.GPIO6, GPIO.OUT)
def test_event(self):
if self.work_data != None and self.work_data == b'\x0b\x17':
Report.Spmod_Test = True
sample_page.next()
def load(self):
if Report.Spmod_Test:
sample_page.next()
if self.is_load == False:
# i2c init()
sample_page.btn.enable = False
self.isError = None
self.work_info = []
self.work_data = None
self.agent = agent()
self.agent.event(250, self.check)
self.agent.event(1500, self.test_event)
self.is_load = True
def free(self):
if self.is_load:
# i2c deinit()
sample_page.btn.enable = True
self.is_load = False
def check(self):
try:
tmp = []
self.cs.value(0)
write_data = bytearray([0x90, 0x00, 0x00, 0x00])
self.spi.write(write_data)
id_buf = bytearray(2)
self.spi.readinto(id_buf, write=0xff)
self.work_data = id_buf
self.cs.value(1)
tmp.append("Flash ReadID\n\n" + str(self.work_data))
self.work_info = tmp
except Exception as e:
Report.Spmod_Test = False
Report.isError = str(e)
print(e)
def work(self):
self.agent.parallel_cycle()
ui.canvas.draw_string(10, 10, "4 Spmod Test", (0, 255, 127), scale=2)
if self.work_data:
for i in range(len(self.work_info)):
ui.canvas.draw_string(
20, 20*i + 90, "{0}".format(str(self.work_info[i])), scale=2)
if self.isError != None:
ui.canvas.draw_string(40, 80, self.isError, (255, 255, 255), scale=2)
sample_page.next()
class SpmodReport():
def __init__(self):
self.is_load = False
def load(self):
if self.is_load == False:
self.is_load = True
self.agent = agent()
self.agent.event(500, sample_page.next)
else:
sample_page.next()
def work(self):
self.agent.cycle()
ui.canvas.draw_string(10, 20, "4 SpmodReport", (127, 255, 255), scale=3)
ui.canvas.draw_string(30, 120, "Pass" if (Report.Spmod_Test) else "Fail", (0, 255, 0) if (Report.Spmod_Test) else (255, 0, 0), scale=8)
def free(self):
if self.is_load:
pass
#self.is_load = False
class WaitTestStart():
def __init__(self):
self.is_load = False
def key_event(self):
self.btn.expand_event()
if self.btn.back() == 2:
sample_page.next()
elif self.btn.next() == 2:
sample_page.next()
elif self.btn.home() == 2:
sample_page.next()
def load(self):
if self.is_load == False:
self.is_load = True
sample_page.btn.enable = False
self.btn = sipeed_button()
# self.btn.config(23, 20, 31)
self.agent = agent()
self.agent.event(150, self.key_event)
#self.agent.event(500, sample_page.next)
else:
if Report.Key_Test:
sample_page.next()
def work(self):
self.agent.cycle()
ui.canvas.draw_string(10, 20, "Press \n\n Any-key \n\n Start Test", (127, 255, 255), scale=3)
def free(self):
if self.is_load:
pass
#self.is_load = False
sample_page.btn.enable = True
class KeyTest():
home_click = 0
back_click = 0
next_click = 0
def __init__(self):
self.is_load = False
#self.load()
def load(self):
if Report.Key_Test:
sample_page.next()
if self.is_load == False:
#print(case.load)
self.is_load = True
sample_page.btn.enable = False
sipeed_led.init(13, 12, 14, 32)
self.btn = sipeed_button()
# self.btn.config(23, 20, 31)
self.agent = agent()
self.agent.event(150, self.key_event)
self.agent.event(16000, lambda :sample_page.next())
KeyTest.home_click = 0
KeyTest.back_click = 0
KeyTest.next_click = 0
def key_event(self):
self.btn.expand_event()
if self.btn.back() == 2:
KeyTest.back_click += 1
sipeed_led.r.value(0)
sipeed_led.g.value(1)
sipeed_led.b.value(1)
elif self.btn.next() == 2:
KeyTest.next_click += 1
sipeed_led.r.value(1)
sipeed_led.g.value(1)
sipeed_led.b.value(0)
elif self.btn.home() == 2:
KeyTest.home_click += 1
sipeed_led.r.value(1)
sipeed_led.g.value(0)
sipeed_led.b.value(1)
if self.btn.interval() > 1500: # long press
sample_page.next()
if KeyTest.home_click > 1 and KeyTest.back_click > 1 and KeyTest.next_click > 1:
Report.Key_Test = True
sample_page.next()
def work(self):
self.agent.parallel_cycle()
y = 20
ui.canvas.draw_string(10, y,
'5 KeyTest', (255, 255, 255), scale=3)
y += 60
ui.canvas.draw_string(20, y,
'home click %d' % self.home_click, (255, 0, 0), scale=3)
y += 60
ui.canvas.draw_string(20, y,
'back click %d' % self.back_click, (0, 255, 0), scale=3)
y += 60
ui.canvas.draw_string(20, y,
'next click %d' % self.next_click, (0, 0, 255), scale=3)
def free(self):
if self.is_load:
#print(sample.free)
self.is_load = False
sample_page.btn.enable = True
if self.home_click > 0 and self.back_click > 0 and self.next_click > 0:
Report.Key_Test = True
sipeed_led.r.value(1)
sipeed_led.g.value(1)
sipeed_led.b.value(1)
class KeyReport():
def __init__(self):
self.is_load = False
def key_event(self):
self.btn.expand_event()
if self.btn.back() == 2:
sample_page.next()
elif self.btn.next() == 2:
sample_page.next()
elif self.btn.home() == 2:
sample_page.next()
def load(self):
if self.is_load == False:
self.is_load = True
sample_page.btn.enable = False
self.btn = sipeed_button()
# self.btn.config(23, 20, 31)
self.agent = agent()
self.agent.event(150, self.key_event)
#self.agent.event(500, sample_page.next)
else:
if Report.Key_Test:
sample_page.next()
def work(self):
self.agent.cycle()
y = 20
ui.canvas.draw_string(10, y, "5 KeyReport", (127, 255, 255), scale=3)
y += 40
ui.canvas.draw_string(10, y, "Home " + ("Pass" if (KeyTest.home_click) else "Fail"), (0, 255, 0) if (KeyTest.home_click) else (255, 0, 0), scale=3)
y += 40
ui.canvas.draw_string(10, y, "Back " + ("Pass" if (KeyTest.back_click) else "Fail"), (0, 255, 0) if (KeyTest.back_click) else (255, 0, 0), scale=3)
y += 40
ui.canvas.draw_string(10, y, "Next " + ("Pass" if (KeyTest.next_click) else "Fail"), (0, 255, 0) if (KeyTest.next_click) else (255, 0, 0), scale=3)
y += 40
ui.canvas.draw_string(10, y, "KeyTest " + ("Pass" if (Report.Key_Test) else "Fail"), (0, 255, 0) if (Report.Key_Test) else (255, 0, 0), scale=3)
y += 40
ui.canvas.draw_string(10, y, "Press Any-Key Continue", (255, 255, 255), scale=2)
def free(self):
if self.is_load:
pass
sample_page.btn.enable = True
class RearSensorTest():
def __init__(self):
self.is_load = False
self.isconnected = False
def test_event(self):
if self.get_image != None:
Report.RearSensor_Test = True
sample_page.next()
def check(self):
try:
self.btn.expand_event()
if self.btn.home() == 2:
sipeed_led.w.value(0)
Report.RearSensor_Test = True
sample_page.next()
if self.isconnected == False:
try:
sensor.reset()
sensor.set_pixformat(sensor.YUV422)
sensor.set_framesize(sensor.QVGA)
sensor.set_hmirror(1)
sensor.set_vflip(1)
sensor.run(1)
sensor.skip_frames()
self.isconnected = True
sipeed_led.w.value(0)
except Exception as e:
Report.RearSensor_Test = False
Report.isError = str(e)
print(e)
except Exception as e:
Report.RearSensor_Test = False
Report.isError = str(e)
print(e)
def load(self):
if Report.RearSensor_Test:
sample_page.next()
if self.is_load == False:
sipeed_led.init(13, 12, 14, 32)
sipeed_led.w.value(1)
sample_page.btn.enable = False
self.btn = sipeed_button()
# self.btn.config(23, 20, 31)
self.get_image = None
self.isError = None
self.agent = agent()
self.agent.event(150, self.check)
self.agent.event(8000, self.test_event)
self.is_load = True
def free(self):
if self.is_load:
sample_page.btn.enable = True
self.is_load = False
sipeed_led.w.value(1)
def work(self):
self.agent.parallel_cycle()
if self.isconnected:
try:
self.get_image = sensor.snapshot()
#ui.canvas.draw_image(self.get_image, 0, 0)
ui.canvas = (self.get_image)
except Exception as e:
print(e)
ui.canvas.draw_string(10, 30, "8 RearSensor Test", (127, 127, 255), scale=3)
ui.canvas.draw_string(10, 70, "isconnected: %s" % (
str)(self.isconnected), (255, 127, 0), scale=2)
if self.isError != None:
ui.canvas.draw_string(40, 80, self.isError, (255, 255, 255), scale=2)
sample_page.next()
class RearSensorReport():
def __init__(self):
self.is_load = False
def key_event(self):
self.btn.expand_event()
if self.btn.back() == 2:
sample_page.next()
elif self.btn.next() == 2:
sample_page.next()
elif self.btn.home() == 2:
sample_page.next()
def load(self):
if self.is_load == False:
self.is_load = True
sample_page.btn.enable = False
self.btn = sipeed_button()
# self.btn.config(23, 20, 31)
self.agent = agent()
self.agent.event(150, self.key_event)
#self.agent.event(500, sample_page.next)
elif Report.RearSensor_Test:
sample_page.next()
def work(self):
self.agent.cycle()
y = 20
ui.canvas.draw_string(10, y, "8 RearSensorReport", (127, 255, 255), scale=3)
y += 50
ui.canvas.draw_string(10, y, "RearSensor " + ("Pass" if (Report.RearSensor_Test) else "Fail"), (0, 255, 0) if (Report.RearSensor_Test) else (255, 0, 0), scale=3)
y += 50
ui.canvas.draw_string(10, y, "Press Any-Key Continue", (255, 255, 255), scale=2)
def free(self):
if self.is_load:
pass
#self.is_load = False
sample_page.btn.enable = True
class AudioTest():
PlayTest = False
RecordTest = False
def __init__(self):
self.is_load = False
self.i2c = I2C(I2C.I2C1, freq=100*1000, scl=30, sda=31)
CubeAudio.init(self.i2c)
self.count = 0
def load(self):
if Report.Audio_Test:
sample_page.next()
if self.is_load == False:
# i2c init()
sample_page.btn.enable = False
self.isconnected = False
self.isError = None
self.is_play = False
self.is_record = False
self.state = 0
self.fft_amp = None
self.btn = sipeed_button()
# self.btn.config(23, 20, 31)
self.count += 1
self.agent = agent()
self.agent.event(150, self.key_event)
self.agent.event(500, self.check)
self.agent.event(16000, self.test_event)
self.is_load = True
def key_event(self):
self.btn.expand_event()
if self.btn.back() == 2 or self.btn.next() == 2:
if self.state == 0:
AudioTest.PlayTest = False
if self.state == 2:
AudioTest.RecordTest = False
self.state += 1
elif self.btn.home() == 2:
if self.state == 0:
AudioTest.PlayTest = True
if self.state == 2:
AudioTest.RecordTest = True
self.state += 1
if self.state > 2:
sample_page.next()
def test_event(self):
if self.state == 0 or self.state == 2:
self.state += 1
def free(self):
if self.is_load:
# i2c deinit()
sample_page.btn.enable = True
self.is_load = False
def check(self):
try:
if self.isconnected == False:
self.isconnected = CubeAudio.check()
else:
if self.state == 0 and self.is_play == False:
self.is_play = True
CubeAudio.ready()
from fpioa_manager import fm
fm.register(19, fm.fpioa.I2S0_MCLK, force=True)
fm.register(35, fm.fpioa.I2S0_SCLK, force=True)
fm.register(33, fm.fpioa.I2S0_WS, force=True)
fm.register(34, fm.fpioa.I2S0_IN_D0, force=True)
fm.register(18, fm.fpioa.I2S0_OUT_D2, force=True)
# CubeAudio.i2s.set_sample_rate(22050)
elif self.state == 1 and self.is_record == False:
self.is_record = True
CubeAudio.ready(True)
CubeAudio.i2s.set_sample_rate(22050)
except Exception as e:
#Report.Audio_Test = False
Report.isError = str(e)
print(e)
def work(self):
self.agent.parallel_cycle()
ui.canvas.draw_string(10, 30, "9 Audio Test", (127, 127, 255), scale=3)
ui.canvas.draw_string(10, 70, "isconnected: %s" % (
str)(self.isconnected), (255, 127, 0), scale=2)
ui.canvas.draw_string(10, 100, "Test: %s" %
('play' if self.state == 0 else 'record'), (255, 127, 0), scale=3)
#print(time.ticks_ms())
if self.isconnected:
if self.state == 0 and self.is_play:
if CubeAudio.event() == False:
CubeAudio.load(os.getcwd() + "/res/sound/loop.wav", 100)
#print('self.count', self.count)
if self.count > 1:
CubeAudio.i2s.set_sample_rate(22050)
else:
# pass
CubeAudio.i2s.set_sample_rate(22050)
elif self.state == 1:
ui.canvas.draw_string(10, 200, "Press Any-Key \n Start", (255, 127, 0), scale=3)
elif self.state == 2 and self.is_record:
tmp = CubeAudio.i2s.record(1024)
fft_res = FFT.run(tmp.to_bytes(), 512)
fft_amp = FFT.amplitude(fft_res)
if fft_amp[50] > 100 and fft_amp[100] > 100:
AudioTest.RecordTest = True
sample_page.next()
for x_shift in range(240):
hist_height = fft_amp[x_shift]
ui.canvas.draw_rectangle((x_shift, 0, 1, hist_height), [255,255,255], 1, True)
#print((x_shift, 0, 1, hist_height))
if self.isError != None:
ui.canvas.draw_string(40, 80, self.isError, (255, 255, 255), scale=2)
sample_page.next()
class AudioReport():
def __init__(self):
self.is_load = False
def key_event(self):
self.btn.expand_event()
if self.btn.back() == 2:
sample_page.next()
elif self.btn.next() == 2:
sample_page.next()
elif self.btn.home() == 2:
sample_page.next()
def load(self):
if self.is_load == False:
self.is_load = True
sample_page.btn.enable = False
self.btn = sipeed_button()
# self.btn.config(23, 20, 31)
self.agent = agent()
self.agent.event(150, self.key_event)
#self.agent.event(500, sample_page.next)
elif Report.Audio_Test:
sample_page.next()
Report.Audio_Test = False
if AudioTest.PlayTest and AudioTest.RecordTest:
Report.Audio_Test = True
def work(self):
self.agent.cycle()
y = 20
ui.canvas.draw_string(10, y, "9 AudioReport", (127, 255, 255), scale=3)
y += 50
ui.canvas.draw_string(10, y, "PlayTest " + ("Pass" if (AudioTest.PlayTest) else "Fail"), (0, 255, 0) if (AudioTest.PlayTest) else (255, 0, 0), scale=3)
y += 50
ui.canvas.draw_string(10, y, "Record " + ("Pass" if (AudioTest.RecordTest) else "Fail"), (0, 255, 0) if (AudioTest.RecordTest) else (255, 0, 0), scale=3)
y += 50
ui.canvas.draw_string(10, y, "Audio " + ("Pass" if (Report.Audio_Test) else "Fail"), (0, 255, 0) if (Report.Audio_Test) else (255, 0, 0), scale=3)
y += 50
ui.canvas.draw_string(10, y, "Press Any-Key Continue", (255, 255, 255), scale=2)
def free(self):
if self.is_load:
pass
#self.is_load = False
sample_page.btn.enable = True
class SdcardTest():
def __init__(self):
self.is_load = False
self.load()
def load(self):
if self.is_load == False:
self.is_load = True
self.result = os.getcwd() == '/sd' # and len(os.listdir('/sd')) > 0
self.agent = agent()
self.agent.event(500, sample_page.next)
else:
sample_page.next()
def work(self):
self.agent.cycle()
ui.canvas.draw_string(10, 20, "SdCardTest", (127, 255, 255), scale=3)
ui.canvas.draw_string(30, 120, "Pass" if (self.result) else "Fail", (0, 255, 0) if (self.result) else (255, 0, 0), scale=8)
ui.canvas.draw_string(30, 260, "Start Test", (0, 0, 255), scale=3)
def free(self):
if self.is_load:
pass
#self.is_load = False
if __name__ == "__main__":
import json
cube = {
"type": "cube",
"lcd": {
"height": 240,
"width": 240,
"invert": 1,
"dir": 96
},
"freq_cpu": 416000000,
"freq_pll1": 400000000,
"kpu_div": 1
}
data = cube
cfg = json.dumps(data)
#print(cfg)
try:
with open('/flash/config.json', 'rb') as f:
tmp = json.loads(f.read())
print(tmp)
if tmp["type"] != data["type"]:
raise Exception('config.json no exist')
except Exception as e:
with open('/flash/config.json', "w") as f:
f.write(cfg)
import machine
machine.reset()
protect.keep()
import time, gc
# gc.collect()
# gc.collect()
if len(sample_page.samples) > 0:
sample_page.samples = []
# gc.collect()
button_io.config(10, 11, 16)
sample_page.key_init()
sample_page.add_sample(Report()) # keep
sample_page.add_sample(AudioReport())
sample_page.add_sample(AudioTest())
sample_page.add_sample(RearSensorReport())
sample_page.add_sample(RearSensorTest())
sample_page.add_sample(KeyReport())
sample_page.add_sample(KeyTest())
sample_page.add_sample(WaitTestStart())
#sample_page.add_sample(SpmodReport())
sample_page.add_sample(SpmodTest())
#sample_page.add_sample(GroveReport())
sample_page.add_sample(GroveTest())
#sample_page.add_sample(Msa301Report())
sample_page.add_sample(Msa301Test())
#sample_page.add_sample(PowerReport())
sample_page.add_sample(PowerTest())
sample_page.add_sample(SdcardTest()) # keep
#ui.height, ui.weight = int(lcd.width() / 2), int(lcd.height())
ui.height, ui.weight = 240, 240
@ui.warp_template(ui.blank_draw)
#@ui.warp_template(ui.grey_draw)
@ui.warp_template(sample_page.sample_draw)
def app_main():
ui.display()
import time
last = time.ticks_ms() - 1
while True:
last = time.ticks_ms()
app_main()
protect.keep()
#print((int)(1000 / (time.ticks_ms() - last)), 'fps')
continue
while True:
app_main()
protect.keep()
continue
try:
print((int)(1000 / (time.ticks_ms() - last)), 'fps')
last = time.ticks_ms()
app_main()
protect.keep()
#print(time.ticks_ms(), 'ram total : ' + str(gc.mem_free() / 1024) + ' kb')
#time.sleep(0.1)
except KeyboardInterrupt:
protect.stop()
raise KeyboardInterrupt()
except MemoryError as e:
print(time.ticks_ms(), 'ram total : ' + str(gc.mem_free() / 1024) + ' kb')
#print(e)
except Exception as e:
print(e)
|
[
"junhuanchen@qq.com"
] |
junhuanchen@qq.com
|
065c76ebb916b6fbb222629b18d12f979ae38395
|
b6b30fb06124883b074144c419b43d9182efcdff
|
/DS/build_graph.py
|
82f2ad4ba68415e6727f264c0a7a1c5ad387756f
|
[] |
no_license
|
JohnnySunkel/BlueSky
|
da9f5107034289bfbdd3ba40458f9b9bd8d01a13
|
5a20eba9ef7509a5a7b7af86e7be848242e1a72f
|
refs/heads/master
| 2021-07-07T09:57:37.256950
| 2020-09-02T23:06:46
| 2020-09-02T23:06:46
| 166,883,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
from graph import Graph, Vertex
def buildGraph(wordFile):
d = {}
g = Graph()
wfile = open(wordFile, 'r')
# create buckets of words that differ by one letter
for line in wfile:
word = line[:-1]
for i in range(len(word)):
bucket = word[:i] + '_' + word[i + 1:]
if bucket in d:
d[bucket].append(word)
else:
d[bucket] = [word]
# add vertices and edges for words in the same bucket
for bucket in d.keys():
for word1 in d[bucket]:
for word2 in d[bucket]:
if word1 != word2:
g.addEdge(word1, word2)
return g
|
[
"noreply@github.com"
] |
JohnnySunkel.noreply@github.com
|
45829c58f5104dd794e9512763cf734ea3933c34
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02677/s846980007.py
|
301ff67aebaef7d22f6917eeb7d949005f4daaa8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
a,b,h,m=map(int,input().split())
kk=h*30-5.5*m
if kk <0:
kk+=360
import math
c= math.cos(math.radians(kk))
print((a*a+b*b-2*a*b*c)**0.5)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
247324b6253b7eedd461b3826e0dfbeeab2909a0
|
0e3697eae327b5e86e6dbc7295ceaf8fb620364c
|
/Advent of Code/2021/day_20.py
|
41df0accf39afe0f8877759dbcde16e12dddc801
|
[
"MIT"
] |
permissive
|
AbeleMM/Algorithmic-Problems
|
f89cd9a11660ad0f54819a50739df8cc4ba32c89
|
212ad9dd6a3f04f426ad7789b3de11605c426906
|
refs/heads/master
| 2023-01-11T20:45:33.392652
| 2022-12-06T09:03:17
| 2022-12-26T11:03:29
| 201,615,751
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
def read_in():
def mapping(x):
return '0' if x == '.' else '1'
def read_row(s):
r = ['0', '0', '0']
r.extend((map(mapping, s)))
r.extend(r[:3])
return r
algorithm = list(map(mapping, input()))
input()
row = read_row(input())
image = [['0' for _ in range(len(row))] for _ in range(3)]
image.append(row)
while value := input():
row = read_row(value)
image.append(row)
image.extend(image[:3])
return algorithm, image
def get_algo_ind(img, i, j):
s = []
for alt_i in [i - 1, i, i + 1]:
for alt_j in [j - 1, j, j + 1]:
s.append(img[alt_i][alt_j])
return int(''.join(s), 2)
def get_count_enhanced(algo, init_img, count):
img = init_img
for _ in range(count):
fill_char = algo[int(img[0][0] * 9, 2)]
new_img = [[fill_char for _ in range(len(img[0]) + 2)] for _ in range(len(img) + 2)]
for i in range(2, len(img) - 2):
for j in range(2, len(img[i]) - 2):
new_img[i + 1][j + 1] = algo[get_algo_ind(img, i, j)]
img = new_img
res = 0
for row in img[3:-3]:
for e in row[3:-3]:
res += e == '1'
return res
def part_one(data):
print(get_count_enhanced(data[0], data[1], 2))
def part_two(data):
print(get_count_enhanced(data[0], data[1], 50))
if __name__ == '__main__':
d = read_in()
part_one(d)
print()
part_two(d)
|
[
"AbeleMM@users.noreply.github.com"
] |
AbeleMM@users.noreply.github.com
|
e4c68679cafe1640db49b7d5c9db68c7b36ddf7b
|
76adec78cb739508171a13b8f512f163f9db0aba
|
/Matrix/squares_in_matrix.py
|
3f0eee78aa8a6ceccc92fa118185f81121f40369
|
[] |
no_license
|
heenach12/pythonpractice
|
0c95708c9bad8dace8f0346de746829eb5685c07
|
d4e3cc46ae9f5001d1ebfee6d3c3222d80962156
|
refs/heads/master
| 2023-07-14T08:46:27.337761
| 2021-08-28T07:45:25
| 2021-08-28T07:45:25
| 400,732,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
class Square_matrix():
def squaresinmatrix(self, m, n):
# import pdb; pdb.set_trace()
list
max_size = min(m, n)
min_n = 2
res = 0
prod = m*n
res += prod
while (min_n < max_size):
prod = prod//min_n
res += prod
# prod =
min_n += 1
return res
t = int(input())
for i in range(t):
m, n = list(map(int, input().strip().split()))
ob = Square_matrix()
print(ob.squaresinmatrix(m, n))
|
[
"heenachoudhary802@gmail.com"
] |
heenachoudhary802@gmail.com
|
e4075ed7d3eaf24aaba9038214a34b18d474a666
|
f098c361ee79bb8b7a8402fcf20b37f17fb36983
|
/Back-End/Python/Basics/Part -1 - Functional/02 - Numeric Types/floor.py
|
f2dcda98601c25c620125eab47f87b31f525f2c2
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
rnsdoodi/Programming-CookBook
|
4d619537a6875ffbcb42cbdaf01d80db1feba9b4
|
9bd9c105fdd823aea1c3f391f5018fd1f8f37182
|
refs/heads/master
| 2023-09-05T22:09:08.282385
| 2021-10-31T11:57:40
| 2021-10-31T11:57:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
import math
print(math.floor(3.999999999))
# 3
math.floor(-3.0000001)
# -4
a = 33
b = 16
print(a/b)
print(a//b) # same
print(math.floor(a/b)) # Same
# 2.0625
# 2
# 2
a = -33
b = 16
print('{0}/{1} = {2}'.format(a, b, a/b))
print('trunc({0}/{1}) = {2}'.format(a, b, math.trunc(a/b)))
print('{0}//{1} = {2}'.format(a, b, a//b))
print('floor({0}//{1}) = {2}'.format(a, b, math.floor(a/b)))
# -33/16 = -2.0625
# trunc(-33/16) = -2
# -33//16 = -3
# floor(-33//16) = -3
# The Modulo Operator
# The modulo operator and the floor division operator will always satisfy the following equation:
#
# a = b * (a // b) + a % b
a = 13
b = 4
print('{0}/{1} = {2}'.format(a, b, a/b))
print('{0}//{1} = {2}'.format(a, b, a//b))
print('{0}%{1} = {2}'.format(a, b, a%b))
print(a == b * (a//b) + a%b)
# 13/4 = 3.25
# 13//4 = 3
# 13%4 = 1
# True
a = -13
b = 4
print('{0}/{1} = {2}'.format(a, b, a/b))
print('{0}//{1} = {2}'.format(a, b, a//b))
print('{0}%{1} = {2}'.format(a, b, a%b))
print(a == b * (a//b) + a%b)
# -13/4 = -3.25
# -13//4 = -4
# -13%4 = 3
# True
a = 13
b = -4
print('{0}/{1} = {2}'.format(a, b, a/b))
print('{0}//{1} = {2}'.format(a, b, a//b))
print('{0}%{1} = {2}'.format(a, b, a%b))
print(a == b * (a//b) + a%b)
# 13/-4 = -3.25
# 13//-4 = -4
# 13%-4 = -3
# True
a = -13
b = -4
print('{0}/{1} = {2}'.format(a, b, a/b))
print('{0}//{1} = {2}'.format(a, b, a//b))
print('{0}%{1} = {2}'.format(a, b, a%b))
print(a == b * (a//b) + a%b)
# -13/-4 = 3.25
# -13//-4 = 3
# -13%-4 = -1
# True
|
[
"58447627+Koubae@users.noreply.github.com"
] |
58447627+Koubae@users.noreply.github.com
|
cd154bf101ef47cd31ac6f80fcbc570fc3dae9be
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_snip.py
|
87f6e292da0556cd1a588869072783395c07f943
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
#calss header
class _SNIP():
def __init__(self,):
self.name = "SNIP"
self.definitions = [u'to cut something with scissors, usually with small, quick cuts: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
dbc40bd0ffa26b7b59b9253a4a6a002227cfee1b
|
984167e1f9aca3b6246e055e9fbdc3ddcb7b6550
|
/test.py
|
ae3ecd8cb3cc298cb2db7901d9bd39dc4db34334
|
[] |
no_license
|
pandehoST/Assignment1
|
1f30e8495d43811c1e7957ace7e349552747a30f
|
e08ebef183dc7cf515fe17813b1264ba757283d3
|
refs/heads/main
| 2023-06-17T04:03:53.788299
| 2021-07-08T12:58:36
| 2021-07-08T12:58:36
| 383,358,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
def add():
id = input("Enter")
...
booking = bk.booking(id, ...)
record_list[id] = booking
record_list.append()
def update():
id = input()
if id in record_list:
package_name=
...
if len(packagename) > 0:
record_list[id].set_packagename(package_name)
if len(name) >0:
record_list[id].set_name(name)
print(is updated)
def delete():
id = input()
if id in rl:
rl。pop(id, none)
|
[
"noreply@github.com"
] |
pandehoST.noreply@github.com
|
67d7512fadebefb70e8ee6d6d1b11b69f6dfe6a2
|
e40388ddde8a5511714d5dcdfc14f502ce07e10e
|
/python_basics/dictionary/main.py
|
0c7533b64a78cffda1e361bf3810afc6e5f373fa
|
[] |
no_license
|
bfaure/ctci
|
b5f4fa1aae79d2b006242c0cce56f232f5e96c48
|
2dec5665525547f4e3f1acb0b34bc6da80ba6938
|
refs/heads/master
| 2021-01-21T22:19:16.585866
| 2017-11-14T22:23:13
| 2017-11-14T22:23:13
| 102,149,317
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
class_names = [ "jack","bob","mary","jeff","ann","pierre",
"martha","clause","pablo","susan","gustav"]
def create_dataset():
import random
num_entries = 50000000 # 50 million lines
f = open("data.txt","w")
for i in range(num_entries):
current = random.choice(class_names)
f.write(current+"\n")
f.close()
def read_dataset_list():
class_counts = []
for c in class_names:
class_counts.append(0)
f = open("data.txt","r")
for line in f:
idx = class_names.index(line)
class_counts[idx]+=1
print class_counts
def read_dataset_dict():
class_counts = {}
for c in class_names:
class_counts[c]=0
f = open("data.txt","r")
for line in f:
class_counts[line]+=1
print class_counts
import time
start = time.time()
create_dataset()
print "Dataset creation took %0.1f seconds" % time.time()-start
|
[
"bfaure23@gmail.com"
] |
bfaure23@gmail.com
|
c445768f929c7b44c66322e12d001e7995a741bb
|
0600766f7e0691bb600e22067f710442c88fcd2d
|
/libs/easyutils/__init__.py
|
4ff09bf01c5a507e18f4aa4ee9efef32101298a4
|
[] |
no_license
|
ykf666/StockHelper
|
588a0312fe583ab45e7db1c958340f259978aac7
|
b486fc35e5610af04fae9f4921a2fc71262fdeee
|
refs/heads/master
| 2022-12-13T21:33:19.408585
| 2021-02-24T07:05:17
| 2021-02-24T07:05:17
| 114,801,974
| 1
| 0
| null | 2022-11-22T03:29:24
| 2017-12-19T19:12:58
|
Python
|
UTF-8
|
Python
| false
| false
| 69
|
py
|
from .timeutils import *
from .stock import *
__version__ = '1.0.0'
|
[
"yan.kefei@opg.cn"
] |
yan.kefei@opg.cn
|
577b6a661b3d03722b9f84cb5c6a4b2f47486789
|
9c04b4480fa36f7f82003c1bc65917a4467a6de8
|
/labs/tests/test_stream.py
|
791d6433f0ddf7ae5de4a513fb2ff47d5cd19a71
|
[
"Apache-2.0"
] |
permissive
|
aojea/network-performance-automation
|
c82d9cb99bbc4641850f44f50ed5dc5303e47eff
|
21a7c8bfde9ed0d6e60c12b9578b4c0060dab31b
|
refs/heads/master
| 2020-07-16T17:38:47.308520
| 2019-09-24T06:10:47
| 2019-09-24T06:10:47
| 205,833,997
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,287
|
py
|
import pytest
import time
import stl_path
from trex_stl_lib.api import *
"""
An example on how to use TRex for functional tests
"""
def test_single_continuous_stream(trex):
tx_port, rx_port = trex.get_all_ports()
trex.reset(ports=[tx_port, rx_port])
# create a base pkt
base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
# create a stream with 100 packets and 5 pps
s1 = STLStream(packet = STLPktBuilder(base_pkt),
mode = STLTXSingleBurst(pps = 5, total_pkts = 20))
# add the streams
trex.add_streams(s1, ports = tx_port)
# start traffic with limit of 10 seconds (otherwise it will continue forever)
trex.start(ports = tx_port, duration = 10)
# hold until traffic ends
trex.wait_on_traffic()
stats = trex.get_stats()
ipackets = stats[rx_port]['ipackets']
assert(ipackets == 20 )
def test_bidirectional_continuous_stream(trex):
burst_size = 10
packets_sec = 2
port_0, port_1 = trex.get_all_ports()
# create a base pkts
base_pkt_dir_a = Ether()/IP(src="16.0.1.1",dst="48.0.1.1")/TCP(dport=8812,sport=1025)
base_pkt_dir_b = Ether()/IP(src="48.0.1.1",dst="16.0.1.1")/TCP(dport=1025,sport=8812)
# let's pad to 300 bytes
pad = (300 - len(base_pkt_dir_a)) * 'x'
# create a stream with burst_size packets
s1 = STLStream(packet = STLPktBuilder(base_pkt_dir_a/pad),
mode = STLTXSingleBurst(pps = packets_sec, total_pkts = burst_size))
# create a stream with burst_size packets
s2 = STLStream(packet = STLPktBuilder(base_pkt_dir_b/pad),
mode = STLTXSingleBurst(pps = packets_sec, total_pkts = burst_size))
# prepare the ports
trex.reset(ports = [port_0, port_1])
# add the streams
trex.add_streams(s1, ports = port_0)
trex.add_streams(s2, ports = port_1)
# start traffic with limit of 10 seconds (otherwise it will continue forever)
trex.start(ports = [port_0, port_1], duration = 20)
# hold until traffic ends
trex.wait_on_traffic()
stats = trex.get_stats()
print stats
ipackets = stats['total']['ipackets']
print(" Packets Received: {0} ".format(ipackets))
# two streams X 2 ports
assert(ipackets == (burst_size*2) )
|
[
"antonio.ojea.garcia@gmail.com"
] |
antonio.ojea.garcia@gmail.com
|
a146e5a7f1139e7326d4b3966a9602de0661738d
|
6519a3ddd2e5c9b2fceb45f18fba607bd769fa10
|
/basic_wallet_p/wallet.py
|
63bc55fc4a4cc8dd9af963cbc5707c15dede6377
|
[
"MIT"
] |
permissive
|
oraclown/Blockchain
|
b1a528d265b1c5f293cb363588fac82cf1191859
|
8bc8d9499fc9075ee6b17b8c503a21f71b7e7a01
|
refs/heads/master
| 2022-08-23T14:29:22.332054
| 2020-05-24T20:10:35
| 2020-05-24T20:10:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,954
|
py
|
import streamlit as st
import requests
import pandas as pd
def get_chain():
try: r = requests.get("http://127.0.0.1:5000/chain")
except: return None, "Invalid url"
try: data = r.json()
except ValueError: return None, "Error: non json response"
return data["chain"], "Downloaded full chain"
def user_overview(chain, user_id):
balance = 0
history = pd.DataFrame(columns=['block', 'to/from', 'amount'])
for block in chain[1:]: # block is a dict, chain is a list
for payment in block["transactions"]: # block["transactions"] is a list, transactions is a
if payment["sender"]==user_id:
balance -= int(payment["amount"])
history = history.append({
"block": block["index"],
"to/from": payment["recipient"],
"amount": -1 * payment["amount"],
},
ignore_index=True)
if payment["recipient"]==user_id:
balance += int(payment["amount"])
history = history.append({
"block": block["index"],
"to/from": payment["sender"],
"amount": payment["amount"]
},
ignore_index=True)
return f"current balance of {user_id}: {balance}", history
def highlight(df):
if df.amount > 0:
return ['background-color: lightgreen']*3
else:
return ['background-color: pink']*3
st.title("Wallet 1.0")
user_id = st.text_input("Enter a user_id")
chain = None
if st.button("Get user balance and transaction history"):
if user_id:
chain, response = get_chain()
st.write(response)
st.json(chain)
user_balance, user_hist = user_overview(chain, user_id)
st.write(user_balance)
st.table(user_hist.style.apply(highlight, axis=1))
else:
st.write("Missing user_id")
|
[
"owenburton@users.noreply.github.com"
] |
owenburton@users.noreply.github.com
|
9f8a1f8c093e488c63df23323d44aad32bbf462c
|
9e0f08b05a4334eb6550c26de18dcf51b99b384e
|
/ncdeltaprocess/delta_process.py
|
e85338513959606d569dd9a9d8f5f7546f6e6f5c
|
[] |
no_license
|
isabella232/ncdeltaprocess
|
93e2fcfb74a4caa21872c4ab8e5081980d9b318b
|
d5a52dc39d2911b55daa88ceb4601089bd55d295
|
refs/heads/master
| 2022-09-29T17:49:14.575029
| 2020-06-07T19:37:15
| 2020-06-07T19:37:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,488
|
py
|
from .document import *
from . import block
from . import block as bks
from . import node
__all__ = ['TranslatorBase', 'TranslatorQuillJS']
class TranslatorBase(object):
"""This is a base class for Delta formats."""
def __init__(self):
self.block_registry = {
# text blocks will be handled by default
}
self.node_registry = {
#self.string_node_test: self.make_string_node, # we will make the string the default
}
self.settings = {
'list_text_blocks_are_p': True
}
def translate_to_html(self, delta_ops):
return self.ops_to_internal_representation(delta_ops).render_html()
def ops_to_internal_representation(self, delta_ops):
this_document = QDocument()
previous_block = None
for qblock in self.yield_blocks(delta_ops):
# first do the block
arguments = (qblock, this_document, previous_block)
matched_tests = tuple(test for test in self.block_registry.keys() if test(*arguments))
if len(matched_tests) == 1:
this_block = self.block_registry[matched_tests[0]](*arguments)
elif len(matched_tests) > 1:
raise ValueError("More than one test matched")
else:
# assume it is a standard text block
this_block = self.make_standard_text_block(*arguments)
previous_block = this_block
# now do the nodes
for this_content in qblock['contents']:
node_arguments = {'block': this_block, 'contents': this_content['insert'], 'attributes': this_content.get('attributes', {}).copy()}
node_matched_tests = tuple(test for test in self.node_registry.keys() if test(**node_arguments))
if len(node_matched_tests) == 1:
previous_node = self.node_registry[node_matched_tests[0]](**node_arguments)
elif len(node_matched_tests) > 1:
raise ValueError("More than one test matched.")
else:
if isinstance(this_content['insert'], str):
previous_node = self.make_string_node(**node_arguments)
else:
raise ValueError("I don't know how to add this node. Default string handler failed. Node contents is %s" % node_arguments['contents'])
## The following line allows custom node creators to split a block in two if necessary -- for example
## a custom node adding a horizonal rule might do so.
this_block = previous_node.parent
return this_document
def is_block(self, insert_instruction):
return False # For extension later. Currently assumed that blocks are only marked by \n
def yield_blocks(self, delta_ops):
"""Yields each block-level chunk, though without nesting blocks, which will be the responsibility of another function.
Has the effect of de-normalizing Quilljs's compact representation.
Blocks are yielded as a dictionary, consisting of
{'contents': [...] # a list of dictionaries containing the nodes for the block.
'attributes': {} # a dictionary containing the attributes for the block
}
"""
block_marker = '\n' # currently assumed that there is one, and one only type of block marker.
raw_blocks = []
temporary_nodes = [] # the block marker comes at the end of the block, so we may not have one yet.
block_keys = ['attributes']
for counter, instruction in enumerate(delta_ops):
if 'insert' not in instruction:
raise ValueError("This parser can only deal with documents.")
insert_instruction = instruction['insert']
if isinstance(insert_instruction, str):
if not 'attributes' in instruction:
instruction['attributes'] = {}
block_attributes = instruction['attributes']
#if insert_instruction.endswith(block_marker):
# # then we have complete blocks.
# last_node_completes_block = True
#else:
# last_node_completes_block = False
if block_marker not in insert_instruction:
temporary_nodes.append(instruction)
elif insert_instruction == block_marker:
# put the newline on the end of the last instruction, just in case we need it
if not 'attributes' in instruction:
instruction['attributes'] = {}
block_attributes = instruction['attributes']
temporary_nodes.append({"insert": "\n", "attributes": block_attributes.copy()})
yield_this = {'contents': temporary_nodes[:],}
for k in instruction.keys():
if k in block_keys:
yield_this[k] = instruction[k]
temporary_nodes = []
yield yield_this
else:
sub_blocks = insert_instruction.split(block_marker)
sub_blocks_len = len(sub_blocks)
if sub_blocks[-1] == '':
sub_blocks.pop()
last_node_completes_block = True
else:
last_node_completes_block = False
for this_c, contents in enumerate(sub_blocks):
if last_node_completes_block or this_c < sub_blocks_len-1:
temporary_nodes.append({'insert': contents})
for k in instruction.keys():
if k in block_keys:
temporary_nodes[-1][k] = instruction[k]
yield_this = {'contents': temporary_nodes[:]}
temporary_nodes = []
for k in instruction.keys():
if k in block_keys:
yield_this[k] = instruction[k]
yield yield_this
else:
# on the last part of an insert statement but not a complete block
temporary_nodes.append({'insert': contents})
for k in instruction.keys():
if k in block_keys:
temporary_nodes[-1][k] = instruction[k]
else:
if not self.is_block(insert_instruction):
temporary_nodes.append(instruction)
else:
yield(instruction)
def make_standard_text_block(self, qblock, this_document, previous_block):
this_block = this_document.add_block(
block.TextBlockParagraph(parent=this_document,
last_block=previous_block,
attributes=qblock['attributes'].copy())
)
return this_block
def make_string_node(self, block, contents, attributes):
if isinstance(block, bks.TextBlockCode):
return block.add_node(node.TextLine(contents=contents, attributes=attributes, strip_newline=False))
else:
return block.add_node(node.TextLine(contents=contents, attributes=attributes, strip_newline=True))
class TranslatorQuillJS(TranslatorBase):
"""This class converts structures found in the QuillJS flavour of Delta formats."""
def __init__(self):
super(TranslatorQuillJS, self).__init__()
self.block_registry.update({
self.header_test: self.make_header_block,
self.list_test: self.make_list_block,
self.better_table_test: self.make_better_table_blocks,
self.table_cell_test: self.make_table_cell_block,
self.code_block_test: self.make_code_block,
# text blocks will be handled by default
})
self.node_registry.update({
#self.string_node_test: self.make_string_node, # we will make the string the default
self.image_node_test: self.make_image_node,
})
##### Test functions and node/block creators follow #####
def header_test(self, qblock, this_document, previous_block):
if 'header' in qblock['attributes']:
return True
else:
return False
def list_test(self, qblock, this_document, previous_block):
if 'list' in qblock['attributes']:
return True
else:
return False
def better_table_test(self, qblock, this_document, previous_block):
if qblock['attributes']:
if qblock['attributes'].get('table-col', False) or qblock['attributes'].get('table-cell-line', False):
return True
else:
return False
return False
def table_cell_test(self, qblock, this_document, previous_block):
if qblock['attributes']:
return qblock['attributes'].get('table', False)
def code_block_test(self, qblock, this_document, previous_block):
if 'code-block' in qblock['attributes']:
return True
else:
return False
def make_header_block(self, qblock, this_document, previous_block):
this_block = this_document.add_block(
block.TextBlockHeading(
parent=this_document, last_block=previous_block, attributes=qblock['attributes'].copy()
)
)
return this_block
def make_code_block(self, qblock, this_document, previous_block):
# should we be adding the contents of this block to a previous
# textblock if there is one?
# if so, we probably just want to return the previous block, so that the text can be added into it.
if isinstance(previous_block, block.TextBlockCode) and \
previous_block.attributes == qblock['attributes']: # relying on standard python mapping comparison.
return previous_block
this_block = this_document.add_block(
block.TextBlockCode(
parent=this_document, last_block=previous_block, attributes=qblock['attributes'].copy()
)
)
return this_block
def make_standard_text_block(self, qblock, this_document, previous_block):
this_block = this_document.add_block(
block.TextBlockParagraph(parent=this_document,
last_block=previous_block,
attributes=qblock['attributes'].copy()
)
)
return this_block
def make_list_block(self, qblock, this_document, previous_block):
container_block = None
required_depth = qblock['attributes'].get('indent', 0)
# see if the previous block was part of a list
lb_parents = [p for p in list(previous_block.get_parents()) if isinstance(p, block.ListItemBlock)]
if lb_parents and lb_parents[0].attributes.get('indent', 0) == required_depth:
# prefect, we can use this
container_block = previous_block.parent
elif lb_parents and lb_parents[0].attributes.get('indent', 0) < required_depth:
# we are part of a list, but it isn't deep enough
container_block = lb_parents[0]
while required_depth > container_block.attributes.get('indent', 0):
current_depth = container_block.attributes.get('indent', 0)
if isinstance(container_block, block.ListBlock):
container_block = container_block.add_block(
block.ListItemBlock(parent=container_block, last_block=container_block, attributes=qblock['attributes'].copy())
)
container_block.attributes['indent'] = current_depth + 1
container_block = container_block.add_block(
block.ListBlock(parent=container_block, last_block=container_block, attributes=qblock['attributes'].copy())
)
container_block.attributes['indent'] = current_depth + 1
else:
# see if there is a parent list item that we can latch on to.
container_block = None
for candidate_block in lb_parents:
if candidate_block.attributes.get('indent', 0) == required_depth:
container_block = candidate_block
break
else:
# perhaps the previous paragraph has the depth we need - but don't use it for a depth of 0 -- use the root document instead.
if required_depth > 0 and previous_block.attributes.get('indent', 0) == required_depth:
container_block = previous_block.add_block(
block.ListBlock(parent=container_block, last_block=container_block, attributes=qblock['attributes'].copy())
)
else:
# Bail out and put it on the root document, building up to the depth needed.
container_block = this_document.add_block(
block.ListBlock(parent=container_block, last_block=container_block, attributes=qblock['attributes'].copy())
)
while required_depth > container_block.attributes.get('indent', 0):
current_depth = container_block.attributes.get('indent', 0)
if isinstance(container_block, block.ListBlock):
container_block = container_block.add_block(
block.ListItemBlock(parent=container_block, last_block=container_block, attributes=qblock['attributes'].copy())
)
container_block.attributes['indent'] = current_depth + 1
container_block = container_block.add_block(
block.ListBlock(parent=container_block, last_block=container_block, attributes=qblock['attributes'].copy())
)
container_block.attributes['indent'] = current_depth + 1
# finally, we should have a list block to add our current block to:
# It should be wrapped in a list item block:
container_block = container_block.add_block(
block.ListItemBlock(parent=container_block, last_block=container_block, attributes=qblock['attributes'].copy())
)
if self.settings['list_text_blocks_are_p']:
this_block = container_block.add_block(
block.TextBlockParagraph(
parent=container_block,
last_block=previous_block,
attributes=qblock['attributes'].copy()
)
)
else:
this_block = container_block.add_block(
block.TextBlockPlain(
parent=container_block,
last_block=previous_block,
attributes=qblock['attributes'].copy()
)
)
return this_block
def make_table_cell_block(self, qblock, this_document, previous_block):
# This can be exended easily to cover the
# https://codepen.io/soccerloway/pen/WWJowj
# Better table plugin, that allows multi-line paragraphs in cells
# and multi-span cells. The appraoch is the same -- except that the
# cells and rows both have an id, and the first check should be whether a block
# is part of the previous cell.
# https://github.com/soccerloway/quill-better-table
container_row = None
container_table = None
# best case scenario - we are in the same table row are the previous block
if previous_block and previous_block.parent and previous_block.parent.parent and isinstance(previous_block.parent.parent, block.TableRowBlock) and \
previous_block.parent.parent.row_id == qblock['attributes']['table']: # this would also be in attributes of previous block
container_row = previous_block.parent.parent
container_table = previous_block.parent.parent.parent
# next best case scenario - we are still in a table, but we need a new row
elif previous_block and previous_block.parent and previous_block.parent.parent and isinstance(previous_block.parent.parent, block.TableRowBlock):
container_table = previous_block.parent.parent
container_row = container_table.add_block(
block.TableRowBlock(qblock['attributes']['table'],
attributes=qblock['attributes'].copy()
)
)
else:
# worst case scenario, we need a table too.
# remove the id from the attributes
table_attributes = qblock['attributes'].copy()
del table_attributes['table']
container_table = this_document.add_block(
block.TableBlock(
attributes=table_attributes,
)
)
container_row = container_table.add_block(
block.TableRowBlock(qblock['attributes']['table'],
attributes=qblock['attributes'].copy()
)
)
# now at last we can make the table Cell!
this_cell = container_row.add_block(
block.TableCellBlock(
attributes=qblock['attributes'].copy()
)
)
# now we can add the contents of the cell
this_block = this_cell.add_block(
block.TextBlockPlain(
parent=container_row,
last_block=previous_block,
attributes=qblock['attributes'].copy()
)
)
return this_block
def make_better_table_blocks(self, block, contents, attributes):
container_table = None
container_row = None
if 'table-col' in qblock['attributes']:
# we need to make a table column.
# this should be the first thing in the table, so we can check to see if one exists, and if not, we can make the table.
pass
elif 'table-cell-line' in qblock['attributes']:
# we have a cell of the table.
pass
def image_node_test(self, block, contents, attributes):
if isinstance(contents, dict) and 'image' in contents:
return True
else:
return False
def make_image_node(self, block, contents, attributes):
return block.add_node(node.Image(contents=contents, attributes=attributes))
def make_string_node(self, block, contents, attributes):
if isinstance(block, bks.TextBlockCode):
return block.add_node(node.TextLine(contents=contents, attributes=attributes, strip_newline=False))
else:
return block.add_node(node.TextLine(contents=contents, attributes=attributes, strip_newline=True))
|
[
"nicholas.cole@history.ox.ac.uk"
] |
nicholas.cole@history.ox.ac.uk
|
2e9d84f60e833682c49410d28244d43ebdd072d8
|
d165a02d11d91272031b0806ede495981626daf7
|
/Tabla de multiplicacion.py
|
3e91aafac04445b9ac3e313d24a47b94f7d37c2f
|
[] |
no_license
|
edumarg/python_ejercises
|
e8eefc20cfae49923674841a8e972eb5d4c7227e
|
216e5b53ba0ba48ec7174576c09645967167ec35
|
refs/heads/master
| 2023-01-21T00:28:17.845448
| 2020-11-29T15:15:27
| 2020-11-29T15:15:27
| 316,978,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
# Mostrar la tabla de multiplicacion de un numero determinado
try:
numero = int(input('Introdusca un numero entero y para mostrar su tabla de multiplicacion -Del 1 al 10-: '))
except ValueError:
print('El valor introducido no es valido, por favor ingrese un valor valido)')
else:
rango = range(11)
for r in rango:
print(f'{r} x {numero} = {r * numero}' )
|
[
"emargulis@hotmail.com"
] |
emargulis@hotmail.com
|
a4a08f0d93185c5de9080c25c7875fc18341cb21
|
cc1597f8e6163e9d0b2dcb459c6d489eb63a9d44
|
/tempCode251117.py
|
2df436f5fa46b58f9b1345e118e34a81432ba65b
|
[] |
no_license
|
dcaseGH/organicCrystals
|
b8634cafaff6962a6f4a5d8ebf3c1f06087959d5
|
edb3f8260884615f5b17f6bc6add30899cf37dbc
|
refs/heads/master
| 2020-04-07T16:35:24.801852
| 2018-03-07T10:59:33
| 2018-03-07T10:59:33
| 124,222,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
from ase.spacegroup import crystal
from ase.spacegroup import Spacegroup as ASESpacegroup
from ase.atoms import Atoms
import numpy as np
def getSpeciesListCIF(inputFile):
''' inputFile is the name of an input file
use ASE at this point to quickly parse file
DOES NOTHING ABOUT FRACTIONAL OCCUPANCY '''
from ase.io.cif import parse_cif
aseParser = parse_cif(inputFile)
for name, c in aseParser:
scaled_positions = np.array([c['_atom_site_fract_x'],
c['_atom_site_fract_y'],
c['_atom_site_fract_z']]).T
crys = crystal(c['_atom_site_type_symbol'],
basis=scaled_positions,
cellpar=[c['_cell_length_a'], c['_cell_length_b'], c['_cell_length_c'],
c['_cell_angle_alpha'], c['_cell_angle_beta'], c['_cell_angle_gamma']],
spacegroup=c['_symmetry_int_tables_number'])
atoms = Atoms(symbols = c['_atom_site_type_symbol'],
scaled_positions=scaled_positions,
cell = [c['_cell_length_a'], c['_cell_length_b'], c['_cell_length_c'],
c['_cell_angle_alpha'], c['_cell_angle_beta'], c['_cell_angle_gamma']],
info = {'spacegroup': ASESpacegroup(c['_symmetry_int_tables_number'])})
print 'pbc=False', len(crys), len(atoms)
yield atoms
#getSpeciesListCIF('testingScripts/CSPPCM_example.cif')
|
[
"e@mail.com"
] |
e@mail.com
|
695d647e4bc201cdcb229e2e5609d6d84c60f39d
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/quantopian_zipline/zipline-master/zipline/assets/assets.py
|
263430fd72952258cc7559e4a602e600bca7e555
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097
| 2019-12-12T08:49:07
| 2019-12-12T08:49:07
| 227,546,525
| 10
| 7
| null | 2022-12-19T02:53:01
| 2019-12-12T07:29:39
|
Python
|
UTF-8
|
Python
| false
| false
| 46,563
|
py
|
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from numbers import Integral
from operator import itemgetter, attrgetter
import struct
from logbook import Logger
import numpy as np
import pandas as pd
from pandas import isnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from toolz import (
compose,
concat,
concatv,
curry,
merge,
partition_all,
sliding_window,
valmap,
)
from toolz.curried import operator as op
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MapAssetIdentifierIndexError,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
ValueNotFoundForField,
SidsNotFound,
SymbolNotFound,
)
from . import (
Asset, Equity, Future,
)
from . continuous_futures import (
OrderedContracts,
ContinuousFuture,
CHAIN_PREDICATES
)
from .asset_writer import (
check_version_info,
split_delimited_symbol,
asset_db_table_names,
symbol_columns,
SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
ASSET_DB_VERSION
)
from zipline.utils.control_flow import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_eng
log = Logger('assets.py')
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
rows = sa.select(table.c).execute().fetchall()
mappings = {}
for row in rows:
mappings.setdefault(
key_from_row(row),
[],
).append(
OwnershipPeriod(
pd.Timestamp(row.start_date, unit='ns', tz='utc'),
pd.Timestamp(row.end_date, unit='ns', tz='utc'),
row.sid,
value_from_row(row),
),
)
return merge_ownership_periods(mappings)
@curry
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
_filter_future_kwargs = _filter_kwargs(Future._kwargnames)
_filter_equity_kwargs = _filter_kwargs(Equity._kwargnames)
def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_
SID_TYPE_IDS = {
# Asset would be 0,
ContinuousFuture: 1,
}
CONTINUOUS_FUTURE_ROLL_STYLE_IDS = {
'calendar': 0,
'volume': 1,
}
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = {
None: 0,
'div': 1,
'add': 2,
}
def _encode_continuous_future_sid(root_symbol,
offset,
roll_style,
adjustment_style):
s = struct.Struct("B 2B B B B 2B")
# B - sid type
# 2B - root symbol
# B - offset (could be packed smaller since offsets of greater than 12 are
# probably unneeded.)
# B - roll type
# B - adjustment
# 2B - empty space left for parameterized roll types
# The root symbol currently supports 2 characters. If 3 char root symbols
# are needed, the size of the root symbol does not need to change, however
# writing the string directly will need to change to a scheme of writing
# the A-Z values in 5-bit chunks.
a = array.array('B', [0] * s.size)
rs = bytearray(root_symbol, 'ascii')
values = (SID_TYPE_IDS[ContinuousFuture],
rs[0],
rs[1],
offset,
CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style],
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style],
0, 0)
s.pack_into(a, 0, *values)
return int(binascii.hexlify(a), 16)
class AssetFinder(object):
"""
An AssetFinder is an interface to a database of Asset metadata written by
an ``AssetDBWriter``.
This class provides methods for looking up assets by unique integer id or
by symbol. For historical reasons, we refer to these unique ids as 'sids'.
Parameters
----------
engine : str or SQLAlchemy.engine
An engine with a connection to the asset database to use, or a string
that can be parsed by SQLAlchemy as a URI.
future_chain_predicates : dict
A dict mapping future root symbol to a predicate function which accepts
a contract as a parameter and returns whether or not the contract should be
included in the chain.
See Also
--------
:class:`zipline.assets.AssetDBWriter`
"""
# Token used as a substitute for pickling objects that contain a
# reference to an AssetFinder.
PERSISTENT_TOKEN = "<AssetFinder>"
@preprocess(engine=coerce_string_to_eng)
def __init__(self, engine, future_chain_predicates=CHAIN_PREDICATES):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(engine, self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset will populate the cache on first retrieval.
self._caches = (self._asset_cache, self._asset_type_cache) = {}, {}
self._future_chain_predicates = future_chain_predicates \
if future_chain_predicates is not None else {}
self._ordered_contracts = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def _reset_caches(self):
"""
Reset our asset caches.
You probably shouldn't call this method.
"""
# This method exists as a workaround for the in-place mutating behavior
# of `TradingAlgorithm._write_and_map_id_index_to_sids`. No one else
# should be calling this.
for cache in self._caches:
cache.clear()
self.reload_symbol_maps()
def reload_symbol_maps(self):
"""Clear the in memory symbol lookup maps.
This will make any changes to the underlying db available to the
symbol maps.
"""
# clear the lazyval caches, the next access will requery
try:
del type(self).symbol_ownership_map[self]
except KeyError:
pass
try:
del type(self).fuzzy_symbol_ownership_map[self]
except KeyError:
pass
@lazyval
def symbol_ownership_map(self):
return build_ownership_map(
table=self.equity_symbol_mappings,
key_from_row=(
lambda row: (row.company_symbol, row.share_class_symbol)
),
value_from_row=lambda row: row.symbol,
)
@lazyval
def fuzzy_symbol_ownership_map(self):
fuzzy_mappings = {}
for (cs, scs), owners in iteritems(self.symbol_ownership_map):
fuzzy_owners = fuzzy_mappings.setdefault(
cs + scs,
[],
)
fuzzy_owners.extend(owners)
fuzzy_owners.sort()
return fuzzy_mappings
@lazyval
def equity_supplementary_map(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.value),
value_from_row=lambda row: row.value,
)
@lazyval
def equity_supplementary_map_by_sid(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.sid),
value_from_row=lambda row: row.value,
)
def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found
def group_by_type(self, sids):
"""
Group a list of sids by asset type.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[str or None -> list[int]]
A dict mapping unique asset types to lists of sids drawn from sids.
If we fail to look up an asset, we assign it a key of None.
"""
return invert(self.lookup_asset_types(sids))
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset for a given sid.
"""
try:
asset = self._asset_cache[sid]
if asset is None and not default_none:
raise SidsNotFound(sids=[sid])
return asset
except KeyError:
return self.retrieve_all((sid,), default_none=default_none)[0]
def retrieve_all(self, sids, default_none=False):
"""
Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of int
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False.
"""
hits, missing, failures = {}, set(), []
for sid in sids:
try:
asset = self._asset_cache[sid]
if not default_none and asset is None:
# Bail early if we've already cached that we don't know
# about an asset.
raise SidsNotFound(sids=[sid])
hits[sid] = asset
except KeyError:
missing.add(sid)
# All requests were cache hits. Return requested sids in order.
if not missing:
return [hits[sid] for sid in sids]
update_hits = hits.update
# Look up cache misses by type.
type_to_assets = self.group_by_type(missing)
# Handle failures
failures = {failure: None for failure in type_to_assets.pop(None, ())}
update_hits(failures)
self._asset_cache.update(failures)
if failures and not default_none:
raise SidsNotFound(sids=list(failures))
# We don't update the asset cache here because it should already be
# updated by `self.retrieve_equities`.
update_hits(self.retrieve_equities(type_to_assets.pop('equity', ())))
update_hits(
self.retrieve_futures_contracts(type_to_assets.pop('future', ()))
)
# We shouldn't know about any other asset types.
if type_to_assets:
raise AssertionError(
"Found asset types: %s" % list(type_to_assets.keys())
)
return [hits[sid] for sid in sids]
def retrieve_equities(self, sids):
"""
Retrieve Equity objects for a list of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.equities, Equity)
def _retrieve_equity(self, sid):
return self.retrieve_equities((sid,))[sid]
def retrieve_futures_contracts(self, sids):
"""
Retrieve Future objects for an iterable of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.futures_contracts, Future)
@staticmethod
def _select_assets_by_sid(asset_tbl, sids):
return sa.select([asset_tbl]).where(
asset_tbl.c.sid.in_(map(int, sids))
)
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
symbol_cols = self.equity_symbol_mappings.c
inner = sa.select(
(symbol_cols.sid,) +
tuple(map(
op.getitem(symbol_cols),
symbol_columns,
)),
).where(
symbol_cols.sid.in_(map(int, sid_group)),
).order_by(
symbol_cols.end_date.asc(),
)
return sa.select(inner.c).group_by(inner.c.sid)
def _lookup_most_recent_symbols(self, sids):
symbols = {
row.sid: {c: row[c] for c in symbol_columns}
for row in concat(
self.engine.execute(
self._select_most_recent_symbols_chunk(sid_group),
).fetchall()
for sid_group in partition_all(
SQLITE_MAX_VARIABLE_NUMBER,
sids
),
)
}
if len(symbols) != len(sids):
raise EquitiesNotFound(
sids=set(sids) - set(symbols),
plural=True,
)
return symbols
def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities):
if not sids:
return
if querying_equities:
def mkdict(row,
symbols=self._lookup_most_recent_symbols(sids)):
return merge(row, symbols[row['sid']])
else:
mkdict = dict
for assets in group_into_chunks(sids):
# Load misses from the db.
query = self._select_assets_by_sid(asset_tbl, assets)
for row in query.execute().fetchall():
yield _convert_asset_timestamp_fields(mkdict(row))
def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits
def _lookup_symbol_strict(self, symbol, as_of_date):
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = self.symbol_ownership_map[
company_symbol,
share_class_symbol,
]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this ticker, this is ambigious
# without the date
raise MultipleSymbolsFound(
symbol=symbol,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this symbol, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
return self.retrieve_asset(sid)
# no equity held the ticker on the given asof date
raise SymbolNotFound(symbol=symbol)
def _lookup_symbol_fuzzy(self, symbol, as_of_date):
symbol = symbol.upper()
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = self.fuzzy_symbol_ownership_map[
company_symbol + share_class_symbol
]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held a symbol matching the fuzzy symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) == 1:
# only one valid match
return self.retrieve_asset(owners[0].sid)
options = []
for _, _, sid, sym in owners:
if sym == symbol:
# there are multiple options, look for exact matches
options.append(self.retrieve_asset(sid))
if len(options) == 1:
# there was only one exact match
return options[0]
# there are more than one exact match for this fuzzy symbol
raise MultipleSymbolsFound(
symbol=symbol,
options=set(options),
)
options = {}
for start, end, sid, sym in owners:
if start <= as_of_date < end:
# see which fuzzy symbols were owned on the asof date.
options[sid] = sym
if not options:
# no equity owned the fuzzy symbol on the date requested
raise SymbolNotFound(symbol=symbol)
sid_keys = list(options.keys())
# If there was only one owner, or there is a fuzzy and non-fuzzy which
# map to the same sid, return it.
if len(options) == 1:
return self.retrieve_asset(sid_keys[0])
for sid, sym in options.items():
# Possible to have a scenario where multiple fuzzy matches have the
# same date. Want to find the one where symbol and share class
# match.
if (company_symbol, share_class_symbol) == \
split_delimited_symbol(sym):
return self.retrieve_asset(sid)
# multiple equities held tickers matching the fuzzy ticker but
# there are no exact matches
raise MultipleSymbolsFound(
symbol=symbol,
options=[self.retrieve_asset(s) for s in sid_keys],
)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None for "
"as of date %s." % as_of_date)
if fuzzy:
return self._lookup_symbol_fuzzy(symbol, as_of_date)
return self._lookup_symbol_strict(symbol, as_of_date)
def lookup_symbols(self, symbols, as_of_date, fuzzy=False):
"""
Lookup a list of equities by symbol.
Equivalent to::
[finder.lookup_symbol(s, as_of, fuzzy) for s in symbols]
but potentially faster because repeated lookups are memoized.
Parameters
----------
symbols : sequence[str]
Sequence of ticker symbols to resolve.
as_of_date : pd.Timestamp
Forwarded to ``lookup_symbol``.
fuzzy : bool, optional
Forwarded to ``lookup_symbol``.
Returns
-------
equities : list[Equity]
"""
memo = {}
out = []
append_output = out.append
for sym in symbols:
if sym in memo:
append_output(memo[sym])
else:
equity = memo[sym] = self.lookup_symbol(sym, as_of_date, fuzzy)
append_output(equity)
return out
def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid'])
def lookup_by_supplementary_field(self, field_name, value, as_of_date):
try:
owners = self.equity_supplementary_map[
field_name,
value,
]
assert owners, 'empty owners list for %r' % (field_name, value)
except KeyError:
# no equity has ever held this value
raise ValueNotFoundForField(field=field_name, value=value)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this value, this is ambigious
# without the date
raise MultipleValuesFoundForField(
field=field_name,
value=value,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this value, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
return self.retrieve_asset(sid)
# no equity held the value on the given asof date
raise ValueNotFoundForField(field=field_name, value=value)
def get_supplementary_field(
self,
sid,
field_name,
as_of_date,
):
"""Get the value of a supplementary field for an asset.
Parameters
----------
sid : int
The sid of the asset to query.
field_name : str
Name of the supplementary field.
as_of_date : pd.Timestamp, None
The last known value on this date is returned. If None, a
value is returned only if we've only ever had one value for
this sid. If None and we've had multiple values,
MultipleValuesFoundForSid is raised.
Raises
------
NoValueForSid
If we have no values for this asset, or no values was known
on this as_of_date.
MultipleValuesFoundForSid
If we have had multiple values for this asset over time, and
None was passed for as_of_date.
"""
try:
periods = self.equity_supplementary_map_by_sid[
field_name,
sid,
]
assert periods, 'empty periods list for %r' % (field_name, sid)
except KeyError:
raise NoValueForSid(field=field_name, sid=sid)
if not as_of_date:
if len(periods) > 1:
# This equity has held more than one value, this is ambigious
# without the date
raise MultipleValuesFoundForSid(
field=field_name,
sid=sid,
options={p.value for p in periods},
)
# this equity has only ever held this value, we may resolve
# without the date
return periods[0].value
for start, end, _, value in periods:
if start <= as_of_date < end:
return value
# Could not find a value for this sid on the as_of_date.
raise NoValueForSid(field=field_name, sid=sid)
def _get_contract_sids(self, root_symbol):
fc_cols = self.futures_contracts.c
return [r.sid for r in
list(sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(fc_cols.start_date != pd.NaT.value)).order_by(
fc_cols.sid).execute().fetchall())]
def _get_root_symbol_exchange(self, root_symbol):
fc_cols = self.futures_root_symbols.c
fields = (fc_cols.exchange,)
return sa.select(fields).where(
fc_cols.root_symbol == root_symbol).execute().fetchone()[0]
def get_ordered_contracts(self, root_symbol):
try:
return self._ordered_contracts[root_symbol]
except KeyError:
contract_sids = self._get_contract_sids(root_symbol)
contracts = deque(self.retrieve_all(contract_sids))
chain_predicate = self._future_chain_predicates.get(root_symbol,
None)
oc = OrderedContracts(root_symbol, contracts, chain_predicate)
self._ordered_contracts[root_symbol] = oc
return oc
def create_continuous_future(self, root_symbol, offset, roll_style):
oc = self.get_ordered_contracts(root_symbol)
exchange = self._get_root_symbol_exchange(root_symbol)
sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
None)
mul_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'div')
add_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'add')
mul_cf = ContinuousFuture(mul_sid,
root_symbol,
offset,
roll_style,
oc.start_date,
oc.end_date,
exchange,
'mul')
add_cf = ContinuousFuture(add_sid,
root_symbol,
offset,
roll_style,
oc.start_date,
oc.end_date,
exchange,
'add')
cf = ContinuousFuture(sid,
root_symbol,
offset,
roll_style,
oc.start_date,
oc.end_date,
exchange,
adjustment_children={
'mul': mul_cf,
'add': add_cf
})
self._asset_cache[cf.sid] = cf
self._asset_cache[add_cf.sid] = add_cf
self._asset_cache[mul_cf.sid] = mul_cf
return cf
def _make_sids(tblattr):
def _(self):
return tuple(map(
itemgetter('sid'),
sa.select((
getattr(self, tblattr).c.sid,
)).execute().fetchall(),
))
return _
sids = property(
_make_sids('asset_router'),
doc='All the sids in the asset finder.',
)
equities_sids = property(
_make_sids('equities'),
doc='All of the sids for equities in the asset finder.',
)
futures_sids = property(
_make_sids('futures_contracts'),
doc='All of the sids for futures consracts in the asset finder.',
)
del _make_sids
@lazyval
def _symbol_lookups(self):
"""
An iterable of symbol lookup functions to use with ``lookup_generic``
Attempts equities lookup, then futures.
"""
return (
self.lookup_symbol,
# lookup_future_symbol method does not use as_of date, since
# symbols are unique.
#
# Wrap the function in a lambda so that both methods share a
# signature, so that when the functions are iterated over
# the consumer can use the same arguments with both methods.
lambda symbol, _: self.lookup_future_symbol(symbol)
)
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidsNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
for lookup in self._symbol_lookups:
try:
matches.append(lookup(asset_convertible, as_of_date))
return
except SymbolNotFound:
continue
else:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidsNotFound(sids=[asset_convertible_or_iterable])
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
----------
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
-------
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
if missing:
raise ValueError("Missing assets for identifiers: %s" % missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
"""
equities_cols = self.equities.c
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).execute(),
), dtype='<f8', # use doubles so we get NaNs
)
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', '<f8'),
('start', '<f8'),
('end', '<f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', '<i8'),
('start', '<i8'),
('end', '<i8'),
])
def lifetimes(self, dates, include_start_date):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
class PricingDataAssociable(with_metaclass(ABCMeta)):
"""
ABC for types that can be associated with pricing data.
Includes Asset, Future, ContinuousFuture
"""
pass
PricingDataAssociable.register(Asset)
PricingDataAssociable.register(Future)
PricingDataAssociable.register(ContinuousFuture)
def was_active(reference_date_value, asset):
"""
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
"""
return (
asset.start_date.value
<= reference_date_value
<= asset.end_date.value
)
def only_active_assets(reference_date_value, assets):
"""
Filter an iterable of Asset objects down to just assets that were alive at
the time corresponding to `reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
assets : iterable[Asset]
The assets to filter.
Returns
-------
active_assets : list
List of the active assets from `assets` on the requested date.
"""
return [a for a in assets if was_active(reference_date_value, a)]
|
[
"659338505@qq.com"
] |
659338505@qq.com
|
819c1c3c25375fac54459fa8e1ceaf4143d591ff
|
20654aa141a6c70d05447badc33bbfed5b428f62
|
/Dinosaur/Dinosaur-final/get_train_data.py
|
42c7b84c6987442c0c34903692ba9fb29cd02176
|
[] |
no_license
|
my0sotis/GameBots
|
6dc3464606ba5a2dc8fe1c637fd705f850c0cf4e
|
ff090c8af1916a693381766203997af63bdc13ee
|
refs/heads/main
| 2023-03-18T23:12:30.397164
| 2021-03-15T12:43:42
| 2021-03-15T12:43:42
| 347,962,791
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,275
|
py
|
# Developed by 刘晓林
# update by 高战立
import numpy as np
from old_grab import grab_screen
import cv2
import time
from get_key import key_check
import os
starting_value = 1
# 存储输入的操作,output[0] = 1代表执行跳的操作
def keys_to_output(keys):
output = [0,0]
if 'space' in keys:
output[0] = 1
else:
output[1] = 1
return output
def main(file_name, starting_value):
file_name = file_name
starting_value = starting_value
training_data = []
# 开始倒计时
for i in list(range(4))[::-1]:
print(i + 1)
time.sleep(1)
last_time = time.time()
paused = False
print('STARTING!!!')
while True:
if not paused:
now = time.time()
# 获取屏幕截图
screen = grab_screen(region=(560, 147, 1360, 347))
screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
# resize减小大小
screen = cv2.resize(screen, (160, 40))
keys = key_check()
output = keys_to_output(keys)
training_data.append([screen, output])
print(time.time() - now)
if len(training_data) % 100 == 0:
print(len(training_data))
if len(training_data) == 500:
np.save(file_name, training_data)
print('SAVED')
training_data = []
starting_value += 1
file_name = 'training_data-{}.npy'.format(starting_value)
keys = key_check()
# 输入T实现暂停功能
if 'T' in keys:
if paused:
paused = False
print('unpaused!')
time.sleep(1)
else:
print('Pausing!')
paused = True
time.sleep(4)
def record():
main(file_name, starting_value)
if __name__ == '__main__':
while True:
file_name = 'training_data-{}.npy'.format(starting_value)
if os.path.isfile(file_name):
print('File exists, moving along', starting_value)
starting_value += 1
else:
print('File does not exist, starting fresh!', starting_value)
break
record()
|
[
"35601060+my0sotis@users.noreply.github.com"
] |
35601060+my0sotis@users.noreply.github.com
|
42699e3531f4f25037f2eb9c5851677670f8743b
|
5f6ae52636b33f55c46f70aa1baf2a670e98bd87
|
/setup.py
|
9675b617e9378e0ab7de4fa3d20c5c31907fc572
|
[] |
no_license
|
AstroKolb/stellar-environments
|
5c74011c0e213220bdbf6e40d1682815dae05042
|
d5158b864c731ced65ad78565c9ce73888dc0154
|
refs/heads/master
| 2021-05-06T04:04:07.865419
| 2018-02-19T23:34:51
| 2018-02-19T23:34:51
| 114,911,801
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
import os
# open icons file
icons = open('icons')
# determine prefix
prefix = ''
for LINE in icons:
if 'prefix' in LINE: prefix = LINE.split("'")[1]
# copy icons file
os.system('cp -p icons output/'+prefix+'.icons')
|
[
"astro.cekolb@gmail.com"
] |
astro.cekolb@gmail.com
|
99e84b7ea2bd7236f43997768a66d9d61bebd36f
|
d48bd4f305b834a94435e79ad93471ae419b0782
|
/tests/factories/promo.py
|
31ba9379d2650484da7d5377af34eb27e0f3c668
|
[
"MIT"
] |
permissive
|
stefanitsky/yandex_market_language
|
aeec6d0e7828b33d32587d2a3f5511646cca680f
|
e17595b556fc55e183cf366227b2739c5c6178dc
|
refs/heads/master
| 2021-11-04T18:17:06.973033
| 2021-09-10T11:40:55
| 2021-09-10T11:40:55
| 250,661,059
| 9
| 7
|
MIT
| 2021-09-01T14:12:08
| 2020-03-27T22:21:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
from yandex_market_language import models
from faker import Faker
fake = Faker()
def create_random_product(
offer_id=fake.pystr(),
category_id=fake.pystr(),
) -> "models.Product":
return models.Product(offer_id, category_id)
Product = create_random_product
def create_random_purchase(
required_quantity=str(fake.pyint()),
products=None,
) -> "models.Purchase":
if products is None:
products = [Product() for _ in range(3)]
return models.Purchase(products, required_quantity)
Purchase = create_random_purchase
def create_random_promo_gift(
gift_id=None,
offer_id=None,
) -> "models.PromoGift":
if gift_id is None and offer_id is None:
random_id_key = fake.random_element(["gift_id", "offer_id"])
kwargs = {random_id_key: fake.pystr()}
elif gift_id:
kwargs = {"gift_id": gift_id}
elif offer_id:
kwargs = {"offer_id": offer_id}
else:
raise AttributeError("only one attr must be specified!")
return models.PromoGift(**kwargs)
PromoGift = create_random_promo_gift
def create_random_promo(
promo_id=fake.pystr(),
promo_type="gift with purchase",
start_date=str(fake.date()),
end_date=str(fake.date()),
description=fake.text(),
url=fake.url(),
purchase=Purchase(),
promo_gifts=None,
) -> "models.Promo":
if promo_gifts is None:
promo_gifts = [PromoGift() for _ in range(3)]
return models.Promo(
promo_id=promo_id,
promo_type=promo_type,
start_date=start_date,
end_date=end_date,
description=description,
url=url,
purchase=purchase,
promo_gifts=promo_gifts,
)
Promo = create_random_promo
|
[
"stefanitsky.mozdor@gmail.com"
] |
stefanitsky.mozdor@gmail.com
|
fe860a0b07590640e45f33cac562b0db9e321232
|
6b43767550330715fbfbf3bace4be5ab5d551066
|
/projects/scapy/pcap_fuzzer.py
|
aaf1f5ffb439072c3f613eed5b38a948b2b6ee9f
|
[
"Apache-2.0"
] |
permissive
|
jfoote/oss-fuzz
|
7007cece101b8452acb2970b179410cab7a9d7ad
|
3510e3e3b3a4aee9d8d517abeb9a12f284480727
|
refs/heads/master
| 2021-07-02T10:58:00.913127
| 2021-05-17T17:17:08
| 2021-05-17T17:17:08
| 368,248,397
| 1
| 0
|
Apache-2.0
| 2021-05-17T16:12:45
| 2021-05-17T16:12:45
| null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
#!/usr/bin/python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import sys
import atheris
import scapy
import scapy.error
import scapy.utils
def TestOneInput(input_bytes):
try:
scapy.utils.rdpcap(io.BytesIO(input_bytes))
except scapy.error.Scapy_Exception:
pass
def main():
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=True)
atheris.Fuzz()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
jfoote.noreply@github.com
|
417c2c2bac412b2144f231650fc8727ff90c72db
|
343bd6d6ef8d40e4bddbfef70feb78351faac1ca
|
/Scrap/old/subir_tipo_cambio.py
|
1329f3b1b4ec67179ecc6d05119f0f665b967afb
|
[] |
no_license
|
diego-san/conmanzanas
|
1462a214a27eaaba07adb4b15e3140a96d94206e
|
5b9f8c17a7f9da33bd71df8365e5b8e0c6263b41
|
refs/heads/master
| 2020-08-13T01:14:17.470222
| 2019-12-13T12:54:26
| 2019-12-13T12:54:26
| 189,672,325
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 706
|
py
|
import json
import sqlite3
def ac(name):
name = name.replace('á','a')
name = name.replace('é', 'e')
name = name.replace('í', 'i')
name = name.replace('ó', 'o')
name = name.replace('ú', 'u')
return (name)
with open('Json/tipo_cambio.json') as file:
moneda= json.load(file)
for m in moneda['paises']:
conn = sqlite3.connect('../db.sqlite3')
c = conn.cursor()
nombre = c.execute("SELECT * FROM basedatos_tipocambio WHERE nombrecamp=?", (ac(str(m['tipo cambio'])),)).fetchall()
if len(nombre) == 0:
c.execute('''INSERT INTO basedatos_tipocambio VALUES(?,?)''', (None, ac(str(m['tipo cambio']))))
conn.commit()
conn.close()
pass
|
[
"diego.sanchez@gropoz.com"
] |
diego.sanchez@gropoz.com
|
1173831ef3cd2368f9d6b8d67bdd2394bd694bec
|
5b91f6e11509e40401dd404cf8eb4097eeada5a7
|
/fb.py
|
ce3203e819be3663042a2c8ac251361214f052c9
|
[] |
no_license
|
corootxnova/facebook-toolkit
|
83a97ae1a09daa080de8375bbb1f29e148fe4f94
|
55ba00879ed93d4969a2c12303406c71eecf7651
|
refs/heads/master
| 2022-12-08T18:15:34.365733
| 2020-09-09T09:44:35
| 2020-09-09T09:44:35
| 289,542,489
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,632
|
py
|
try:
from selenium import webdriver
from time import sleep
from colored import fg, attr
except:
print('[-] Please Install Selenium And Colored')
sleep(5)
print(fg('#f1c40f')+'''
███████╗ █████╗ ██████╗███████╗██████╗ ██████╗ ██████╗ ██╗ ██╗
██╔════╝██╔══██╗██╔════╝██╔════╝██╔══██╗██╔═══██╗██╔═══██╗██║ ██╔╝
█████╗ ███████║██║ █████╗ ██████╔╝██║ ██║██║ ██║█████╔╝
██╔══╝ ██╔══██║██║ ██╔══╝ ██╔══██╗██║ ██║██║ ██║██╔═██╗
██║ ██║ ██║╚██████╗███████╗██████╔╝╚██████╔╝╚██████╔╝██║ ██╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝╚══════╝╚═════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═╝
[1] Extract Access Token
[2] Auto Followers
[3] Auto Likes
[4] Auto Comments (25=>1ACC)
[5] Convert Link To ID
''')
# Select Services
select_service = input('[+] Enter Service ID : ')
# Run Service
if select_service == "1":
exreact_upload = input('[+] Enter Email List Path : ')
extract_emails = open(exreact_upload, 'r')
# Read Accounts
for email in extract_emails:
sp = email.split(":")
username = sp[0]
password = sp[1]
driver = webdriver.Chrome()
driver.set_window_position(-10000,0)
ok = '\n'
driver.get('https://web.ftcontrolsv3.com/free_form_fb_get_access_token.php')
driver.find_element_by_id('email').send_keys(username)
driver.find_element_by_id('pass').send_keys(password + ok)
data = driver.find_element_by_id('data')
access = data.text
if 'Invalid username or password (401)' in access:
with open('die_access.txt', 'a+') as die:
die.write(f'[DIE] => {username} : {password} \n')
print(fg('red')+'[>>>] INVALID EMAIL OR PASSWORD [EXTRACTED > FALSE]'+attr('reset'))
driver.quit()
elif 'Invalid username or email address (400)' in access:
with open('die_access.txt', 'a+') as die:
die.write(f'[DIE] => {username} : {password} \n')
print(fg('red')+'[>>>] INVALID EMAIL OR PASSWORD [EXTRACTED > FALSE]'+attr('reset'))
driver.quit()
elif 'User must verify their account on www.facebook.com (405)' in access:
with open('die_access.txt', 'a+') as die:
die.write(f'[DIE] => {username} : {password} \n')
print(fg('red')+'[>>>] INVALID EMAIL OR PASSWORD [EXTRACTED > FALSE]'+attr('reset'))
driver.quit()
else:
with open('live_access.txt', 'a+') as live:
live.write(access + '\n')
print(fg('green')+'[>>>] VALID EMAIL AND PASSWORD [EXTRACTED > TRUE]'+attr('reset'))
driver.quit()
elif select_service == "2":
follow_upload = input('[+] Enter Access Tokens List Path : ')
follow_profil = input('[+] Enter Profile ID : ')
follow_access = open(follow_upload, 'r')
for access in follow_access:
driver = webdriver.Chrome()
driver.set_window_position(-10000,0)
driver.get('https://web.ftcontrolsv3.com/free_form_fb_send_auto_follow_access_token.php')
driver.find_element_by_id('data').send_keys(access)
driver.find_element_by_id('urlpost').send_keys(follow_profil)
driver.find_element_by_id('checktoken').click()
sleep(15)
print(fg('green')+'[+] Followers Sent Successfully Now Check Your Profile .'+attr('reset'))
driver.quit()
elif select_service == "3":
likes_upload = input('[+] Enter Access Tokens List Path : ')
likes_postid = input('[+] Enter Post URL : ')
likes_access = open(likes_upload, 'r')
for access in likes_access:
driver = webdriver.Chrome()
driver.set_window_position(-10000,0)
driver.get('https://web.ftcontrolsv3.com/free_form_fb_send_like_access_token.php')
driver.find_element_by_id('data').send_keys(access)
driver.find_element_by_id('urlpost').send_keys(likes_postid)
driver.find_element_by_id('checktoken').click()
sleep(10)
print(fg('green')+'[+] Likes Sent Successfully Now Check Your Post .'+attr('reset'))
driver.quit()
elif select_service == "4":
comments_upload = input("[+] Enter Email List (username:password) Path : ")
comments_postid = input("[+] Enter Post URL (mobile version) : ")
comments_emails = open(comments_upload, 'r')
for email in comments_emails:
sp = email.split(":")
username = sp[0]
password = sp[1]
driver = webdriver.Chrome()
#driver.set_window_position(-10000,0)
driver.get('https://m.facebook.com/login')
ok = '\n'
driver.find_element_by_name('email').send_keys(username)
sleep(1)
driver.find_element_by_name('pass').send_keys(password + ok)
sleep(3)
driver.find_element_by_xpath('//*[@id="root"]/div[1]/div/div/div[3]/div[2]/form/div/button').click()
sleep(1)
a = 0
while a < 50:
a += 1
driver.get(comments_postid)
sleep(2)
driver.find_element_by_xpath('//*[@id="composerInput"]').send_keys('[ACTIVE] => TRUE <3 :)')
sleep(1)
driver.find_element_by_name('submit').click()
sleep(3)
print(fg('green')+'[COMMENTED] => TRUE'+attr('reset'))
driver.quit()
elif select_service == "5":
id_upload = input('[+] Enter Links File Path : ')
id_emails = open(id_upload, 'r')
for link in id_emails:
driver = webdriver.Chrome()
driver.set_window_position(-10000,0)
driver.get('https://lookup-id.com/')
ok = '\n'
driver.find_element_by_name('fburl').send_keys(link + ok)
data = driver.find_element_by_id('code').text
with open('IDS.txt', 'a+') as ids:
ids.write(data + '\n')
print(fg('green')+f'[ID]=> {data} [LINK]=> {link}'+attr('reset'))
|
[
"noreply@github.com"
] |
corootxnova.noreply@github.com
|
5d67b71c35158445080ba259360d58682d8c9eb1
|
86f967332ca6b8b7ed291b8fd0e398c451f51e50
|
/小猪佩奇/venv/Scripts/pip-script.py
|
46f55b5ccfe25512986e353a6b91f893ef48aa03
|
[] |
no_license
|
futurefuturehx/HX
|
df71e290db4f81f0f385de8a1202746614696500
|
a202fc0e8bea64f640abc6ca2e9374343ad53808
|
refs/heads/master
| 2020-03-16T13:28:51.802692
| 2018-06-06T01:42:24
| 2018-06-06T01:42:24
| 132,691,584
| 0
| 0
| null | null | null | null |
MacCentralEurope
|
Python
| false
| false
| 397
|
py
|
#!E:\HX\÷«ń‹python\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"360574435@qq.com"
] |
360574435@qq.com
|
6507f5d6e70cf961a015eeebf81dddaca72965ce
|
b1585dd0bf02b08e3938bd84352bfe062eb9aaf2
|
/docs/components_page/components/__tests__/wrappers.py
|
16119b8bbc803a7b8359827fd20d947818c5fa1c
|
[
"Apache-2.0"
] |
permissive
|
astewart19/dash-bootstrap-components
|
1c326ebf9e0a037118ffa09aab7b021ee0884d74
|
79c0f19e139f48862fcc6e84ec9c6c14c343fbe4
|
refs/heads/main
| 2023-06-09T23:29:30.361196
| 2021-06-19T21:58:28
| 2021-06-19T21:58:28
| 379,700,567
| 0
| 0
|
Apache-2.0
| 2021-06-23T18:48:59
| 2021-06-23T18:48:58
| null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
PY_WRAPPER = """
import dash
import dash_bootstrap_components as dbc
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
{snippet}
app.layout = html.Div([{components}])
"""
R_WRAPPER = """
library(dash)
library(dashBootstrapComponents)
library(dashHtmlComponents)
app <- Dash$new(external_stylesheets = dbcThemes$BOOTSTRAP)
{snippet}
app$layout(htmlDiv(list({components})))
app$run_server(port = {port})
"""
JL_WRAPPER = """
using Dash, DashBootstrapComponents
app = dash(external_stylesheets=[dbc_themes.BOOTSTRAP]);
{snippet}
app.layout = html_div([{components}]);
run_server(app, "127.0.0.1", {port});
"""
|
[
"tomcbegley@gmail.com"
] |
tomcbegley@gmail.com
|
b99d9feb1836a954223ce00dbe77a3ffac8d7247
|
6b811ff3b20181bea493bbf94932e2c2a4245c5a
|
/tutorial/spiders/test_spider.py
|
e61d3ac1ffedf51792528e0441119ff1440a9293
|
[] |
no_license
|
strongball/ecommerce-spider-project
|
0a475457cb23c1c199db2baaed01d3464b751ffe
|
f3251a90d058fda02cd767400090b185084fcaf8
|
refs/heads/master
| 2022-03-01T13:02:09.419765
| 2018-12-21T15:33:17
| 2018-12-21T15:33:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from tutorial.items import TorrentItem
class MininovaSpider(CrawlSpider):
name = 'mininova'
allowed_domains = ['mininova.org']
start_urls = ['http://www.mininova.org/']
rules = [Rule(LinkExtractor(allow=['/tor/\d+']), 'parse_torrent')]
def parse_torrent(self, response):
torrent = TorrentItem()
torrent['url'] = response.url
torrent['name'] = response.xpath("//h1/text()").extract()
torrent['description'] = response.xpath("//div[@id='description']").extract()
torrent['size'] = response.xpath("//div[@id='specifications']/p[2]/text()[2]").extract()
return torrent
|
[
"ms0407954@gmail.com"
] |
ms0407954@gmail.com
|
1d1bc387939be6a2d1eb78a7319a31b50b33c9b9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02664/s534445203.py
|
7254791b5bbd97247e746caa1a1da0a5e629cad5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 36
|
py
|
s=input()
print(s.replace("?", "D"))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
955ccffd058a8abc739459315c6ca73e98a72e3d
|
fcf704aaa4ec7827aa826c341c89f7d5fcb9477e
|
/lang/programming/python/opencv/模糊检测/detect_blur.py
|
f0556030835bdebbebad95f7305dc08cc0db1c3d
|
[] |
no_license
|
dlxj/doc
|
afe470f465617bd239b5b4dc8b546eb82cf115c6
|
b4a9ddcc2820fd0e3c9bbd81c26a8fa35f348c23
|
refs/heads/master
| 2023-08-05T06:20:05.573100
| 2023-08-05T03:48:29
| 2023-08-05T03:48:29
| 203,584,726
| 10
| 0
| null | 2022-12-15T08:14:58
| 2019-08-21T12:58:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,003
|
py
|
# detect_blur.py
# https://yanshilin.xyz/%e4%bd%bf%e7%94%a8opencv%e5%af%b9%e6%89%8b%e6%9c%ba%e4%b8%ad%e7%85%a7%e7%89%87%e8%bf%9b%e8%a1%8c%e6%a8%a1%e7%b3%8a%e6%a3%80%e6%b5%8b/
# import the necessary packages
from imutils import paths
import argparse
import cv2
import numpy as np
def variance_of_laplacian(image):
# compute the Laplacian of the image and then return the focus
# measure, which is simply the variance of the Laplacian
return cv2.Laplacian(image, cv2.CV_64F).var()
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required=False, default=r"D:\GitHub\doc\lang\programming\python\opencv\模糊检测",
help="path to input directory of images")
ap.add_argument("-t", "--threshold", type=float, default=100.0,
help="focus measures that fall below this value will be considered 'blurry'")
args = vars(ap.parse_args())
# loop over the input images
for imagePath in paths.list_images(args["images"]):
# load the image, convert it to grayscale, and compute the
# focus measure of the image using the Variance of Laplacian
# method
"""
虽然python 3 使用统一编码解决了中文字符串的问题, 但在使用opencv中imread函数读取中文路径图像文件时仍会报错
此时可借助于numpy 先将文件数据读取出来, 然后使用opencv中imdecode函数将其解码成图像数据。此方法对python 2 和3均使用。
"""
image = cv2.imdecode(np.fromfile(imagePath, dtype=np.uint8), -1)
# image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(gray)
text = "Not Blurry"
# if the focus measure is less than the supplied threshold,
# then the image should be considered "blurry"
if fm < args["threshold"]:
text = "Blurry"
# show the image
cv2.putText(image, "{}: {:.2f}".format(text, fm), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)
cv2.imshow("Image", image)
key = cv2.waitKey(0)
|
[
"guandong@qq.com"
] |
guandong@qq.com
|
1e234e2f94b8e313965fe7fb2d4cef35a9ae56fb
|
da5bc6efaebc9ff015938d207b25c7804bc03b33
|
/15_loop_quiz/quiz01/quiz01_5.py
|
fa22245ca580bcbc03c3005fd4fa5565cc48c0cb
|
[] |
no_license
|
codud0954/megait_python_20201116
|
b0f68f50a1e0d41c3c35535e718d5a236a7b1a98
|
a71f57d4332027406953599612cd014de2d26713
|
refs/heads/master
| 2023-01-31T11:14:27.611468
| 2020-12-18T09:03:11
| 2020-12-18T09:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
# 수를 입력 받고 그 수가 소수인지 아닌지 판별
num = int(input("수를 입력하세요: "))
is_prime = True
if num != 2:
for i in range(2, num):
if num % i == 0:
is_prime = False
break
if is_prime:
print("소수입니다.")
else:
print("소수가 아닙니다.")
|
[
"noreply@github.com"
] |
codud0954.noreply@github.com
|
ef9df75ae18551d900c9219eef16919d8b4567f1
|
7f09805ebff6d8604210b7a1118c4103268561ea
|
/bakery/admin.py
|
71c1f2084d4ab2d124001249f42f5d7eb6e4b0f9
|
[] |
no_license
|
iam-hitesh/bakery-management
|
02061bbb1d05a661c3138a9c4127e48d30e553da
|
28ff62bf21cd7a5ac48c6e396f43c7ccedfc6032
|
refs/heads/main
| 2023-02-24T00:34:59.253570
| 2021-01-26T18:06:25
| 2021-01-26T18:06:25
| 333,172,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from django.contrib import admin
import bakery.models as models
admin.site.register(models.User)
admin.site.register(models.BakeryIngredient)
admin.site.register(models.BakeryItemIngredient)
admin.site.register(models.Order)
|
[
"hitesh@shortcastle.com"
] |
hitesh@shortcastle.com
|
e1a694282c9b54bd1ac651bfa251ebc7cbb84d76
|
53e65ac900eeae3dcf2e7203e3b302c4bd7b33e5
|
/main.py
|
91188246ef419500d321929a196f1f5a671a3b5d
|
[] |
no_license
|
bells64/count_liters
|
81c58a477e9c79ced381cbe06e0d979e001ee1b9
|
1bf942a4f5e17cac1e2b6a64f5ce36e05c829288
|
refs/heads/master
| 2020-09-22T17:41:08.300154
| 2019-12-02T05:03:07
| 2019-12-02T05:03:07
| 225,289,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
txt = 'helllllllo'
def count(a, char):
count = 0
for i in a:
if i == char:
count += 1
return count
print(count(txt, 'l'))
|
[
"bells64@github.com"
] |
bells64@github.com
|
98b50646239bb17d4ade8aa489894d42c6bb1794
|
d602a31dcd5e1cb2fff4a4d7eb5413f27c420f2f
|
/random_forest_error.py
|
2894a83a8e48ac41addf3259cc3747f7301ee4b9
|
[] |
no_license
|
TheOpti/classification-experiments
|
f40fe44c844497e702e4d65c10421914d9b5b013
|
d8e6d57d2df3f4fbbbb80e6ed2e1ae25be9eec98
|
refs/heads/master
| 2020-03-06T19:00:41.958843
| 2018-06-11T21:56:01
| 2018-06-11T21:56:01
| 127,018,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine, load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
import os
if not os.path.exists('visualizations'):
os.makedirs('visualizations')
np.random.seed(1000)
nb_classifications = 100
datasets = {
'wine': load_wine(),
'digits': load_digits()
}
for key in datasets:
print '\nData: ', key
accuracy = []
for i in range(1, nb_classifications):
print '\rIteration number: ', i + 1,
rf = RandomForestClassifier(n_estimators = i)
acc = cross_val_score(rf, datasets[key].data, datasets[key].target, scoring='accuracy', cv=10).mean()
accuracy.append(acc)
# Show results
plt.figure()
plt.xlabel('Number of trees')
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(accuracy)
plt.savefig('./visualizations/random_forest_error_' + key + '.png')
plt.clf()
|
[
"wieckowski.piotr.93@gmail.com"
] |
wieckowski.piotr.93@gmail.com
|
7c6373d2a700c8aad6d3d29622b179179f39ac31
|
cbfa01422f122b75cec001bfc7649af1ca2d56dc
|
/piscine_python_django/d01/ex02/var_to_dict.py
|
f0e9659e0cd1db125c0ae0cf4fc22b728a4b3aba
|
[] |
no_license
|
gvannest/42-projects
|
bb16e0f4625563c3198e620ffa0787cb06ba4041
|
11cca79e5c662986cbdf1622a02c77da59c58987
|
refs/heads/master
| 2022-12-10T01:48:59.253163
| 2019-08-07T13:43:19
| 2019-08-07T13:43:19
| 112,237,754
| 1
| 0
| null | 2022-12-08T04:57:19
| 2017-11-27T19:16:42
|
Python
|
UTF-8
|
Python
| false
| false
| 921
|
py
|
def create_dict():
d = [
('Hendrix' , '1942'),
('Allman' , '1946'),
('King' , '1925'),
('Clapton' , '1945'),
('Johnson' , '1911'),
('Berry' , '1926'),
('Vaughan' , '1954'),
('Cooder' , '1947'),
('Page' , '1944'),
('Richards' , '1943'),
('Hammett' , '1962'),
('Cobain' , '1967'),
('Garcia' , '1942'),
('Beck' , '1944'),
('Santana' , '1947'),
('Ramone' , '1948'),
('White' , '1975'),
('Frusciante', '1970'),
('Thompson' , '1949'),
('Burton' , '1939')
]
d_dict = dict(d)
d_dict = {v:k for k,v in d_dict.items()}
for key,value in d_dict.items():
print("{} : {}".format(key,value))
return None
if __name__ == "__main__":
create_dict()
|
[
"gautier.vanneste42@gmail.com"
] |
gautier.vanneste42@gmail.com
|
47b80596a317932a80e48faa73c99d137bc82e2c
|
80b6685155881209bc18bb71ff1a9c82ca0508d2
|
/helper_classes/test_data/filenames.py
|
55fe80acf366bd23355817832a6801a64c9a0db8
|
[] |
no_license
|
DarksightKellar/HMS
|
7cd96b36a0b51cab350dfe75a2d29b9e4f184ba2
|
e7357310f514e34e0afcd32d8bfbcc52d2c2c395
|
refs/heads/master
| 2022-02-16T17:34:37.242089
| 2019-09-19T13:46:16
| 2019-09-19T13:46:16
| 193,266,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 722
|
py
|
sprint01 = 'helper_classes/test_data/sprint_early/sprint01.xml'
sprint02 = 'helper_classes/test_data/sprint_early/sprint02.xml'
sprint03 = 'helper_classes/test_data/sprint_early/sprint03.xml'
sprint04 = 'helper_classes/test_data/sprint_early/sprint04.xml'
sprint05 = 'helper_classes/test_data/sprint_early/sprint05.xml'
sprint06 = 'helper_classes/test_data/sprint_early/sprint06.xml'
sprint07 = 'helper_classes/test_data/sprint_early/sprint07.xml'
sprint08 = 'helper_classes/test_data/sprint_early/sprint08.xml'
sprint09 = 'helper_classes/test_data/sprint_early/sprint09.xml'
sprint10 = 'helper_classes/test_data/sprint_early/sprint10.xml'
test = 'data/test.xml'
INSTANCE_XML = sprint02
SOLUTION_XML = 'data/solution.xml'
|
[
"kellar.base09@gmail.com"
] |
kellar.base09@gmail.com
|
6a74075439eb10e19dd38ffdfffc1858330ec6ae
|
a60c33fc740bf4941a9b5445fd6e5ad19d61e571
|
/基础/案例/(10,20)按合数质数分类.py
|
3dc6a32d154ee918af76e5674875e4044bd0671e
|
[] |
no_license
|
unixcs/python_study
|
6e3bc976b6a4e8d2c4666410a3741f00394fdeac
|
8a4e4b943f9a8136eddb1f65478592fd9c4efd8a
|
refs/heads/master
| 2023-07-12T18:41:22.376261
| 2021-08-15T14:59:16
| 2021-08-15T14:59:16
| 395,249,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
for num in range (10,20):
for i in range(2,num):
if num % i == 0:
print(num , '是合数')
break
else:
print(num , '是质数')
break
|
[
"unixcs@qq.com"
] |
unixcs@qq.com
|
fb118d00ae7da40b529f47a972bbc6ab70c24353
|
e2de3bcf4466a8542c94a973228026ca4d7ba46a
|
/test.py
|
c20c6c76ba914627e9cc9e5819963c38cd1d5f54
|
[] |
no_license
|
Grayosn/PilotTV
|
ce6147a5e3f0b08f19cf1a8c0f2d43761b97bd9a
|
748b4e0f7ea9b8f09358cd9bff3029498144445a
|
refs/heads/master
| 2016-09-13T22:08:56.338445
| 2016-04-21T09:10:43
| 2016-04-21T09:10:43
| 56,573,511
| 0
| 0
| null | 2016-04-20T02:18:41
| 2016-04-19T07:12:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,046
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class Test(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(30)
self.base_url = "http://172.16.2.9:8081/"
self.verificationErrors = []
self.accept_next_alert = True
def test_(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_id("Account").clear()
driver.find_element_by_id("Account").send_keys("grayson")
driver.find_element_by_id("Password").clear()
driver.find_element_by_id("Password").send_keys("123456")
driver.find_element_by_id("btnSubmit").click()
self.assertEqual(u"登入成功", self.close_alert_and_get_its_text())
driver.find_element_by_css_selector("b").click()
driver.find_element_by_id("customer_Link_M").click()
time.sleep(1)
driver.find_element_by_id("customer_Link").click()
time.sleep(1)
driver.find_element_by_id("store_Link_M").click()
time.sleep(2)
driver.find_element_by_id("store_Link").click()
time.sleep(1)
self.assertEqual(u"店家資料載入失敗", self.close_alert_and_get_its_text())
time.sleep(1)
driver.find_element_by_id("store_Link_M").click()
time.sleep(1)
driver.find_element_by_id("player_Link").click()
time.sleep(1)
driver.find_element_by_id("sales_Link_M").click()
time.sleep(1)
driver.find_element_by_id("sales_Link").click()
time.sleep(1)
Select(driver.find_element_by_id("searchWay")).select_by_visible_text(u"英語名稱")
time.sleep(1)
Select(driver.find_element_by_id("searchStatus")).select_by_visible_text(u"離職")
time.sleep(1)
Select(driver.find_element_by_id("searchStatus_All")).select_by_visible_text(u"離職")
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
[
"Grayson@pilottv.com.tw"
] |
Grayson@pilottv.com.tw
|
c5d8fa1842e47575ae26b3fc8e1dd5f7fdb9342a
|
5bae8f94dd4ab5779d2f735c3aa6b9bc1b2dc6ae
|
/vcf_tools/vcf2xpclr.py
|
857caaf612f99a4329a502991dd06cb0791d8364
|
[] |
no_license
|
stsmall/Wb_sWGA
|
f9dfc03e5137ad392be13a95a808fe95b26df6eb
|
6d0b25a7999459c6468fbaa5a1ba10d290ed449e
|
refs/heads/master
| 2020-06-30T12:09:21.054461
| 2020-05-06T17:14:41
| 2020-05-06T17:14:41
| 56,272,654
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,706
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 18 16:09:29 2017
@author: stsmall
creates input file for XPCLR, the centimorgan stuff is still a bit wooly. In
Wb the recombination rate is 4x greater than the mutation rate. Assuming
uniform recombination the distance between 2 snps to have a prob of crossing-
over of 0.01 is 850,000 bases. 0.01/(4 * 2.9E-9) = 850000
"""
from __future__ import print_function
from __future__ import division
import argparse
import numpy as np
import re
from collections import defaultdict
parser = argparse.ArgumentParser()
parser.add_argument('-i', "--vcf", type=str,
help='path to vcf file')
parser.add_argument('-ped', "--pedfile", type=str, required=True,
help="path to pedfile with pop and individuals")
parser.add_argument('-pops', "--poplist", type=str, nargs='+', required=True,
help="list of pops")
parser.add_argument('-s', "--size", type=str, nargs='+', required=False,
default='', help="how many from each pop, blank uses all")
parser.add_argument('-c', "--cmorgan", type=int, required=True,
default=850000, help="physical length where prob recomb is"
"0.01")
args = parser.parse_args()
def GetPopInfo(popinfo, sizes, pops, rand=True):
"""
"""
# parse ped
peddict = defaultdict(list)
with open(popinfo, 'r') as ped:
for line in ped:
if line.strip():
x = line.strip().split()
if (x[0] in pops):
peddict[x[0]].append(x[1])
else:
continue
poplist = pops
if sizes:
sizes = map(int, sizes)
if rand:
for pop in poplist:
i = pops.index(pop)
peddict[pop] = np.random.choice(peddict[pop], sizes[i],
replace=False)
else:
for pop in poplist:
i = pops.index(pop)
peddict[pop] = peddict[pop][:sizes[i]]
return(peddict)
def Vcf2Dict(vcfin):
"""
"""
xpclrdict = defaultdict(list)
with open(vcfin, 'r') as vcf:
for line in vcf:
if line.startswith("#CHROM"):
samples = line.split()
if not line.startswith("#"):
x = line.split()
xpclrdict[x[0]].append(x)
return(xpclrdict, samples)
def WriteXpclr(xpclrdict, peddict, samples):
"""
"""
for chrom in xpclrdict.keys():
for pop in peddict.keys():
p_ix = [samples.index(s) for s in peddict[pop]]
f = open("{}.{}.geno".format(pop, chrom), 'w')
for pos in xpclrdict[chrom]:
countgt = []
for s in p_ix:
countgt.extend(re.split("/|\|", pos[s].split(":")[0]))
gt = " ".join(countgt)
gt = gt.replace(".", "9")
f.write("{} \n".format(gt))
f.close()
return(None)
def WriteMap(xpclrdict, cmorgan):
"""
"""
for chrom in xpclrdict.keys():
f = open("{}.map".format(chrom), 'w')
for pos in xpclrdict[chrom]:
snp = chrom + pos[1]
centi = int(pos[1])/cmorgan
f.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(snp, chrom, centi, pos[1], pos[3], pos[4]))
f.close()
return(None)
if __name__ == "__main__":
vcf = args.vcf
popinfo = args.pedfile
pops = args.poplist
sizes = args.size
xpclrdict, samples = Vcf2Dict(vcf)
peddict = GetPopInfo(popinfo, sizes, pops)
WriteXpclr(xpclrdict, peddict, samples)
WriteMap(xpclrdict, args.cmorgan)
|
[
"stsmall@gmail.com"
] |
stsmall@gmail.com
|
2ceb6ea206bc87d1740db3a9bab33f2d5924002c
|
d5d3851edf71f6e52521838b121854a1aafc743c
|
/mqtt/publisher.py
|
e3e88cf186735a36c0decad49be03feb7fb3b470
|
[] |
no_license
|
Indra-Sunariyana/netprog_course
|
fb7a4c9d17a86486939849f537d18b113a5e3417
|
c39f0db8790398403fcf664e2c12c842df5aff50
|
refs/heads/master
| 2022-07-12T06:21:45.765125
| 2020-05-18T03:09:34
| 2020-05-18T03:09:34
| 264,425,200
| 1
| 0
| null | 2020-05-16T11:48:01
| 2020-05-16T11:48:00
| null |
UTF-8
|
Python
| false
| false
| 1,465
|
py
|
# Thanks to: https://techtutorialsx.com/2017/04/14/python-publishing-messages-to-mqtt-topic/
"""Publish a Topic to MQTT Broker"""
import paho.mqtt.client as mqttClient
import time
def on_connect(client, userdata, flags, rc):
if rc == 0:
print('Connected to broker')
global Connected #Use global variable
Connected = True #Signal connection
else:
print('Connection failed')
Connected = False #global variable for the state of the connection
# After you successfully the publisher and subscribe code,
# try to create your own MQTT broker by create an account at cloudmqtt.com
broker_address= 'your cloud broker url'
port = 'your cloud broker port' # do not type string, but change to integer
username = 'your username'
password = 'your password'
client = mqttClient.Client('Python') #create new instance
client.username_pw_set(user, password=password) #set username and password
client.on_connect= on_connect #attach function to callback
client.connect(broker_address, port=port) #connect to broker
client.loop_start() #start the loop
while Connected != True: #Wait for connection
time.sleep(0.1)
try:
while True:
value = input('Enter the message:')
client.publish('python/', value)
except KeyboardInterrupt:
client.disconnect()
client.loop_stop()
|
[
"rudyhend@gmail.com"
] |
rudyhend@gmail.com
|
0c70d50de891b12b2414c3315866ce401076383b
|
70be7c9588cef203c813a1dc6abaf490ffce75ab
|
/alexa-program/com/vmware/cis/tagging_client.py
|
44daf861ae34246996014ea45c8a33eb6ad36784
|
[] |
no_license
|
taromurata/TDP2018_VMCAPI
|
3268fa7b62ffd820ac580beab50278ddcfa108cc
|
5d395700ab3d0d1d45b497e48beab8c366fca9f5
|
refs/heads/master
| 2020-03-21T17:24:29.583217
| 2018-09-12T02:21:01
| 2018-09-12T02:21:01
| 138,830,114
| 1
| 1
| null | 2018-06-27T10:12:31
| 2018-06-27T04:37:12
| null |
UTF-8
|
Python
| false
| false
| 100,505
|
py
|
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2018 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.cis.tagging.
#---------------------------------------------------------------------------
"""
The ``com.vmware.cis.tagging_client`` component provides methods and classes to
attach metadata, by means of tags, to vSphere objects to make these objects
more sortable and searchable. You can use it to create, manage, and enumerate
tags and their categories (the group a tag belongs to). You can also query the
attached tags and attached objects.
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class CategoryModel(VapiStruct):
"""
The ``CategoryModel`` class defines a category that is used to group one or
more tags.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
id=None,
name=None,
description=None,
cardinality=None,
associable_types=None,
used_by=None,
):
"""
:type id: :class:`str`
:param id: The unique identifier of the category.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.cis.tagging.Category``.
:type name: :class:`str`
:param name: The display name of the category.
:type description: :class:`str`
:param description: The description of the category.
:type cardinality: :class:`CategoryModel.Cardinality`
:param cardinality: The associated cardinality (SINGLE, MULTIPLE) of the category.
:type associable_types: :class:`set` of :class:`str`
:param associable_types: The types of objects that the tags in this category can be attached
to. If the :class:`set` is empty, then tags can be attached to all
types of objects. This field works only for objects that reside in
Inventory Service (IS). For non IS objects, this check is not
performed today and hence a tag can be attached to any non IS
object.
:type used_by: :class:`set` of :class:`str`
:param used_by: The :class:`set` of users that can use this category. To add users
to this, you need to have the edit privilege on the category.
Similarly, to unsubscribe from this category, you need the edit
privilege on the category. You should not modify other users
subscription from this :class:`set`.
"""
self.id = id
self.name = name
self.description = description
self.cardinality = cardinality
self.associable_types = associable_types
self.used_by = used_by
VapiStruct.__init__(self)
class Cardinality(Enum):
"""
The ``CategoryModel.Cardinality`` class defines the number of tags in a
category that can be assigned to an object.
.. note::
This class represents an enumerated type in the interface language
definition. The class contains class attributes which represent the
values in the current version of the enumerated type. Newer versions of
the enumerated type may contain new values. To use new values of the
enumerated type in communication with a server that supports the newer
version of the API, you instantiate this class. See :ref:`enumerated
type description page <enumeration_description>`.
"""
SINGLE = None
"""
An object can only be assigned one of the tags in this category. For
example, if a category is "Operating System", then different tags of this
category would be "Windows", "Linux", and so on. In this case a VM object
can be assigned only one of these tags and hence the cardinality of the
associated category here is single.
"""
MULTIPLE = None
"""
An object can be assigned several of the tags in this category. For
example, if a category is "Server", then different tags of this category
would be "AppServer", "DatabaseServer" and so on. In this case a VM object
can be assigned more than one of the above tags and hence the cardinality
of the associated category here is multiple.
"""
def __init__(self, string):
"""
:type string: :class:`str`
:param string: String value for the :class:`Cardinality` instance.
"""
Enum.__init__(string)
Cardinality._set_values([
Cardinality('SINGLE'),
Cardinality('MULTIPLE'),
])
Cardinality._set_binding_type(type.EnumType(
'com.vmware.cis.tagging.category_model.cardinality',
Cardinality))
CategoryModel._set_binding_type(type.StructType(
'com.vmware.cis.tagging.category_model', {
'id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
'name': type.StringType(),
'description': type.StringType(),
'cardinality': type.ReferenceType(__name__, 'CategoryModel.Cardinality'),
'associable_types': type.SetType(type.StringType()),
'used_by': type.SetType(type.StringType()),
},
CategoryModel,
True,
["id"]))
class TagModel(VapiStruct):
"""
The ``TagModel`` class defines a tag that can be attached to vSphere
objects.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
id=None,
category_id=None,
name=None,
description=None,
used_by=None,
):
"""
:type id: :class:`str`
:param id: The unique identifier of the tag.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.cis.tagging.Tag``.
:type category_id: :class:`str`
:param category_id: The identifier of the parent category in which this tag will be
created.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.cis.tagging.Category``.
:type name: :class:`str`
:param name: The display name of the tag.
:type description: :class:`str`
:param description: The description of the tag.
:type used_by: :class:`set` of :class:`str`
:param used_by: The :class:`set` of users that can use this tag. To add users to
this, you need to have the edit privilege on the tag. Similarly, to
unsubscribe from this tag, you need the edit privilege on the tag.
You should not modify other users subscription from this
:class:`set`.
"""
self.id = id
self.category_id = category_id
self.name = name
self.description = description
self.used_by = used_by
VapiStruct.__init__(self)
TagModel._set_binding_type(type.StructType(
'com.vmware.cis.tagging.tag_model', {
'id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'category_id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
'name': type.StringType(),
'description': type.StringType(),
'used_by': type.SetType(type.StringType()),
},
TagModel,
True,
["id"]))
class Category(VapiInterface):
"""
The ``Category`` class provides methods to create, read, update, delete,
and enumerate categories.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _CategoryStub)
class CreateSpec(VapiStruct):
"""
The ``Category.CreateSpec`` class is used to create a category.
Use the :func:`Category.create` method to create a category defined by the
create specification.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
name=None,
description=None,
cardinality=None,
associable_types=None,
category_id=None,
):
"""
:type name: :class:`str`
:param name: The display name of the category.
:type description: :class:`str`
:param description: The description of the category.
:type cardinality: :class:`CategoryModel.Cardinality`
:param cardinality: The associated cardinality (SINGLE, MULTIPLE) of the category.
:type associable_types: :class:`set` of :class:`str`
:param associable_types: Object types to which this category's tags can be attached.
:type category_id: :class:`str` or ``None``
:param category_id: This attribute was added in vSphere API 6.7
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.cis.tagging.Category``.
If None an identifier will be generated by the server
"""
self.name = name
self.description = description
self.cardinality = cardinality
self.associable_types = associable_types
self.category_id = category_id
VapiStruct.__init__(self)
CreateSpec._set_binding_type(type.StructType(
'com.vmware.cis.tagging.category.create_spec', {
'name': type.StringType(),
'description': type.StringType(),
'cardinality': type.ReferenceType(__name__, 'CategoryModel.Cardinality'),
'associable_types': type.SetType(type.StringType()),
'category_id': type.OptionalType(type.IdType()),
},
CreateSpec,
False,
None))
class UpdateSpec(VapiStruct):
"""
The ``Category.UpdateSpec`` class describes the updates to be made to an
existing category.
Use the :func:`Category.update` method to modify a category. When you call
the method, specify the category identifier. You obtain the category
identifier when you call the :func:`Category.create` method. You can also
retrieve an identifier by using the :func:`Category.list` method.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
name=None,
description=None,
cardinality=None,
associable_types=None,
):
"""
:type name: :class:`str` or ``None``
:param name: The display name of the category.
If None the name will not be modified.
:type description: :class:`str` or ``None``
:param description: The description of the category.
If None the description will not be modified.
:type cardinality: :class:`CategoryModel.Cardinality` or ``None``
:param cardinality: The associated cardinality (SINGLE, MULTIPLE) of the category.
If None the cardinality will not be modified.
:type associable_types: :class:`set` of :class:`str` or ``None``
:param associable_types: Object types to which this category's tags can be attached.
The :class:`set` of associable types cannot be updated
incrementally. For example, if
:attr:`Category.UpdateSpec.associable_types` originally contains
{A,B,C} and you want to add D, then you need to pass {A,B,C,D} in
your update specification. You also cannot remove any item from
this :class:`set`. For example, if you have {A,B,C}, then you
cannot remove say {A} from it. Similarly, if you start with an
empty :class:`set`, then that implies that you can tag any object
and hence you cannot later pass say {A}, because that would be
restricting the type of objects you want to tag. Thus, associable
types can only grow and not shrink.
If None the associable types will not be modified.
"""
self.name = name
self.description = description
self.cardinality = cardinality
self.associable_types = associable_types
VapiStruct.__init__(self)
UpdateSpec._set_binding_type(type.StructType(
'com.vmware.cis.tagging.category.update_spec', {
'name': type.OptionalType(type.StringType()),
'description': type.OptionalType(type.StringType()),
'cardinality': type.OptionalType(type.ReferenceType(__name__, 'CategoryModel.Cardinality')),
'associable_types': type.OptionalType(type.SetType(type.StringType())),
},
UpdateSpec,
False,
None))
def create(self,
create_spec,
):
"""
Creates a category. To invoke this method, you need the create category
privilege.
:type create_spec: :class:`Category.CreateSpec`
:param create_spec: Specification for the new category to be created.
:rtype: :class:`str`
:return: The identifier of the created category.
The return value will be an identifier for the resource type:
``com.vmware.cis.tagging.Category``.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyExists`
if the :attr:`Category.CreateSpec.name` provided in the
``create_spec`` is the name of an already existing category.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if any of the information in the ``create_spec`` is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to create a category.
"""
return self._invoke('create',
{
'create_spec': create_spec,
})
def get(self,
category_id,
):
"""
Fetches the category information for the given category identifier. In
order to view the category information, you need the read privilege on
the category.
:type category_id: :class:`str`
:param category_id: The identifier of the input category.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``.
:rtype: :class:`CategoryModel`
:return: The :class:`CategoryModel` that corresponds to ``category_id``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the category for the given ``category_id`` does not exist in the
system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to read the category.
"""
return self._invoke('get',
{
'category_id': category_id,
})
def update(self,
category_id,
update_spec,
):
"""
Updates an existing category. To invoke this method, you need the edit
privilege on the category.
:type category_id: :class:`str`
:param category_id: The identifier of the category to be updated.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``.
:type update_spec: :class:`Category.UpdateSpec`
:param update_spec: Specification to update the category.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyExists`
if the :attr:`Category.UpdateSpec.name` provided in the
``update_spec`` is the name of an already existing category.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if any of the information in the ``update_spec`` is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the category for the given ``category_id`` does not exist in the
system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to update the category.
"""
return self._invoke('update',
{
'category_id': category_id,
'update_spec': update_spec,
})
def delete(self,
category_id,
):
"""
Deletes an existing category. To invoke this method, you need the
delete privilege on the category.
:type category_id: :class:`str`
:param category_id: The identifier of category to be deleted.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the category for the given ``category_id`` does not exist in the
system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to delete the category.
"""
return self._invoke('delete',
{
'category_id': category_id,
})
def list(self):
"""
Enumerates the categories in the system. To invoke this method, you
need the read privilege on the individual categories. The :class:`list`
will only contain those categories for which you have read privileges.
:rtype: :class:`list` of :class:`str`
:return: The :class:`list` of resource identifiers for the categories in the
system.
The return value will contain identifiers for the resource type:
``com.vmware.cis.tagging.Category``.
"""
return self._invoke('list', None)
def list_used_categories(self,
used_by_entity,
):
"""
Enumerates all categories for which the ``used_by_entity`` is part of
the :attr:`CategoryModel.used_by` subscribers :class:`set`. To invoke
this method, you need the read privilege on the individual categories.
:type used_by_entity: :class:`str`
:param used_by_entity: The field on which the results will be filtered.
:rtype: :class:`list` of :class:`str`
:return: The :class:`list` of resource identifiers for the categories in the
system that are used by ``used_by_entity``.
The return value will contain identifiers for the resource type:
``com.vmware.cis.tagging.Category``.
"""
return self._invoke('list_used_categories',
{
'used_by_entity': used_by_entity,
})
def add_to_used_by(self,
category_id,
used_by_entity,
):
"""
Adds the ``used_by_entity`` to the :attr:`CategoryModel.used_by`
subscribers :class:`set` for the specified category. If the
``used_by_entity`` is already in the :class:`set`, then this becomes an
idempotent no-op. To invoke this method, you need the modify
:attr:`CategoryModel.used_by` privilege on the category.
:type category_id: :class:`str`
:param category_id: The identifier of the input category.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``.
:type used_by_entity: :class:`str`
:param used_by_entity: The name of the user to be added to the
:attr:`CategoryModel.used_by` :class:`set`.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the category for the given ``category_id`` does not exist in the
system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to add an entity to the
:attr:`CategoryModel.used_by` field.
"""
return self._invoke('add_to_used_by',
{
'category_id': category_id,
'used_by_entity': used_by_entity,
})
def remove_from_used_by(self,
category_id,
used_by_entity,
):
"""
Removes the ``used_by_entity`` from the :attr:`CategoryModel.used_by`
subscribers :class:`set`. If the ``used_by_entity`` is not using this
category, then this becomes a no-op. To invoke this method, you need
the modify :attr:`CategoryModel.used_by` privilege on the category.
:type category_id: :class:`str`
:param category_id: The identifier of the input category.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``.
:type used_by_entity: :class:`str`
:param used_by_entity: The name of the user to be removed from the
:attr:`CategoryModel.used_by` :class:`set`.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the category for the given ``category_id`` does not exist in the
system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to remove an entity from the
:attr:`CategoryModel.used_by` field.
"""
return self._invoke('remove_from_used_by',
{
'category_id': category_id,
'used_by_entity': used_by_entity,
})
def revoke_propagating_permissions(self,
category_id,
):
"""
Revokes all propagating permissions on the given category. You should
then attach a direct permission with tagging privileges on the given
category. To invoke this method, you need category related privileges
(direct or propagating) on the concerned category.
:type category_id: :class:`str`
:param category_id: The identifier of the input category.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the category for the given ``category_id`` does not exist in the
system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to revoke propagating permissions
on the category.
"""
return self._invoke('revoke_propagating_permissions',
{
'category_id': category_id,
})
class Tag(VapiInterface):
"""
The ``Tag`` class provides methods to create, read, update, delete, and
enumerate tags.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _TagStub)
class CreateSpec(VapiStruct):
"""
The ``Tag.CreateSpec`` class describes a tag.
Use the :func:`Tag.create` method to create a tag defined by the create
specification.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
name=None,
description=None,
category_id=None,
tag_id=None,
):
"""
:type name: :class:`str`
:param name: The display name of the tag. The name must be unique within its
category.
:type description: :class:`str`
:param description: The description of the tag.
:type category_id: :class:`str`
:param category_id: The unique identifier of the parent category in which this tag will
be created.
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``. When methods return a value of
this class as a return value, the attribute will be an identifier
for the resource type: ``com.vmware.cis.tagging.Category``.
:type tag_id: :class:`str` or ``None``
:param tag_id: This attribute was added in vSphere API 6.7
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.cis.tagging.Tag``.
If None an identifier will be generated by the server
"""
self.name = name
self.description = description
self.category_id = category_id
self.tag_id = tag_id
VapiStruct.__init__(self)
CreateSpec._set_binding_type(type.StructType(
'com.vmware.cis.tagging.tag.create_spec', {
'name': type.StringType(),
'description': type.StringType(),
'category_id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
'tag_id': type.OptionalType(type.IdType()),
},
CreateSpec,
False,
None))
class UpdateSpec(VapiStruct):
"""
The ``Tag.UpdateSpec`` class describes the updates to be made to an
existing tag.
Use the :func:`Tag.update` method to modify a tag. When you call the
method, you specify the tag identifier. You obtain the tag identifier when
you call the :func:`Tag.create` method. You can also retrieve an identifier
by using the :func:`Tag.list` method.
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
name=None,
description=None,
):
"""
:type name: :class:`str` or ``None``
:param name: The display name of the tag.
If None the name will not be modified.
:type description: :class:`str` or ``None``
:param description: The description of the tag.
If None the description will not be modified.
"""
self.name = name
self.description = description
VapiStruct.__init__(self)
UpdateSpec._set_binding_type(type.StructType(
'com.vmware.cis.tagging.tag.update_spec', {
'name': type.OptionalType(type.StringType()),
'description': type.OptionalType(type.StringType()),
},
UpdateSpec,
False,
None))
def create(self,
create_spec,
):
"""
Creates a tag. To invoke this method, you need the create tag privilege
on the input category.
:type create_spec: :class:`Tag.CreateSpec`
:param create_spec: Specification for the new tag to be created.
:rtype: :class:`str`
:return: The identifier of the created tag.
The return value will be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyExists`
if the :attr:`Tag.CreateSpec.name` provided in the ``create_spec``
is the name of an already existing tag in the input category.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if any of the input information in the ``create_spec`` is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the category for in the given ``create_spec`` does not exist in
the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to create tag.
"""
return self._invoke('create',
{
'create_spec': create_spec,
})
def get(self,
tag_id,
):
"""
Fetches the tag information for the given tag identifier. To invoke
this method, you need the read privilege on the tag in order to view
the tag info.
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:rtype: :class:`TagModel`
:return: The :class:`TagModel` that corresponds to ``tag_id``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if the user does not have the privilege to read the tag.
"""
return self._invoke('get',
{
'tag_id': tag_id,
})
def update(self,
tag_id,
update_spec,
):
"""
Updates an existing tag. To invoke this method, you need the edit
privilege on the tag.
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:type update_spec: :class:`Tag.UpdateSpec`
:param update_spec: Specification to update the tag.
:raise: :class:`com.vmware.vapi.std.errors_client.AlreadyExists`
if the :attr:`Tag.UpdateSpec.name` provided in the ``update_spec``
is the name of an already existing tag in the same category.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if any of the input information in the ``update_spec`` is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to update the tag.
"""
return self._invoke('update',
{
'tag_id': tag_id,
'update_spec': update_spec,
})
def delete(self,
tag_id,
):
"""
Deletes an existing tag. To invoke this method, you need the delete
privilege on the tag.
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to delete the tag.
"""
return self._invoke('delete',
{
'tag_id': tag_id,
})
def list(self):
"""
Enumerates the tags in the system. To invoke this method, you need read
privilege on the individual tags. The :class:`list` will only contain
tags for which you have read privileges.
:rtype: :class:`list` of :class:`str`
:return: The :class:`list` of resource identifiers for the tags in the
system.
The return value will contain identifiers for the resource type:
``com.vmware.cis.tagging.Tag``.
"""
return self._invoke('list', None)
def list_used_tags(self,
used_by_entity,
):
"""
Enumerates all tags for which the ``used_by_entity`` is part of the
:attr:`TagModel.used_by` subscribers :class:`set`. To invoke this
method, you need the read privilege on the individual tags.
:type used_by_entity: :class:`str`
:param used_by_entity: The field on which the results will be filtered.
:rtype: :class:`list` of :class:`str`
:return: The :class:`list` of resource identifiers for the tags in the
system that are used by ``used_by_entity``.
The return value will contain identifiers for the resource type:
``com.vmware.cis.tagging.Tag``.
"""
return self._invoke('list_used_tags',
{
'used_by_entity': used_by_entity,
})
def list_tags_for_category(self,
category_id,
):
"""
Enumerates all tags for the given category. To invoke this method, you
need the read privilege on the given category and the individual tags
in that category.
:type category_id: :class:`str`
:param category_id: The identifier of the input category.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Category``.
:rtype: :class:`list` of :class:`str`
:return: The :class:`list` of resource identifiers for the tags in the given
input category.
The return value will contain identifiers for the resource type:
``com.vmware.cis.tagging.Tag``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the category for the given ``category_id`` does not exist in the
system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to read the category.
"""
return self._invoke('list_tags_for_category',
{
'category_id': category_id,
})
def add_to_used_by(self,
tag_id,
used_by_entity,
):
"""
Adds the ``used_by_entity`` to the :attr:`TagModel.used_by` subscribers
:class:`set`. If the ``used_by_entity`` is already in the :class:`set`,
then this becomes a no-op. To invoke this method, you need the modify
:attr:`TagModel.used_by` privilege on the tag.
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:type used_by_entity: :class:`str`
:param used_by_entity: The name of the user to be added to the :attr:`TagModel.used_by`
:class:`set`.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to add an entity to the
:attr:`TagModel.used_by` field.
"""
return self._invoke('add_to_used_by',
{
'tag_id': tag_id,
'used_by_entity': used_by_entity,
})
def remove_from_used_by(self,
tag_id,
used_by_entity,
):
"""
Removes the ``used_by_entity`` from the :attr:`TagModel.used_by`
subscribers set. If the ``used_by_entity`` is not using this tag, then
this becomes a no-op. To invoke this method, you need modify
:attr:`TagModel.used_by` privilege on the tag.
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:type used_by_entity: :class:`str`
:param used_by_entity: The name of the user to be removed from the
:attr:`TagModel.used_by` :class:`set`.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to remove an entity from the
:attr:`TagModel.used_by` field.
"""
return self._invoke('remove_from_used_by',
{
'tag_id': tag_id,
'used_by_entity': used_by_entity,
})
def revoke_propagating_permissions(self,
tag_id,
):
"""
Revokes all propagating permissions on the given tag. You should then
attach a direct permission with tagging privileges on the given tag. To
invoke this method, you need tag related privileges (direct or
propagating) on the concerned tag.
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to revoke propagating permissions
on the tag.
"""
return self._invoke('revoke_propagating_permissions',
{
'tag_id': tag_id,
})
class TagAssociation(VapiInterface):
"""
The ``TagAssociation`` class provides methods to attach, detach, and query
tags.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _TagAssociationStub)
class BatchResult(VapiStruct):
"""
The ``TagAssociation.BatchResult`` class describes the result of performing
the same method on several tags or objects in a single invocation. This
class was added in vSphere API 6.5
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
success=None,
error_messages=None,
):
"""
:type success: :class:`bool`
:param success: This is true if the batch method completed without any errors.
Otherwise it is false and all or some methods have failed. This
attribute was added in vSphere API 6.5
:type error_messages: :class:`list` of :class:`com.vmware.vapi.std_client.LocalizableMessage`
:param error_messages: The :class:`list` of error messages. This attribute was added in
vSphere API 6.5
"""
self.success = success
self.error_messages = error_messages
VapiStruct.__init__(self)
BatchResult._set_binding_type(type.StructType(
'com.vmware.cis.tagging.tag_association.batch_result', {
'success': type.BooleanType(),
'error_messages': type.ListType(type.ReferenceType('com.vmware.vapi.std_client', 'LocalizableMessage')),
},
BatchResult,
False,
None))
class TagToObjects(VapiStruct):
"""
The ``TagAssociation.TagToObjects`` class describes a tag and its related
objects. Use the :func:`TagAssociation.list_attached_objects_on_tags`
method to retrieve a :class:`list` with each element containing a tag and
the objects to which it is attached. This class was added in vSphere API
6.5
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
tag_id=None,
object_ids=None,
):
"""
:type tag_id: :class:`str`
:param tag_id: The identifier of the tag. This attribute was added in vSphere API
6.5
When clients pass a value of this class as a parameter, the
attribute must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``. When methods return a value of this
class as a return value, the attribute will be an identifier for
the resource type: ``com.vmware.cis.tagging.Tag``.
:type object_ids: :class:`list` of :class:`com.vmware.vapi.std_client.DynamicID`
:param object_ids: The identifiers of the related objects. This attribute was added in
vSphere API 6.5
"""
self.tag_id = tag_id
self.object_ids = object_ids
VapiStruct.__init__(self)
TagToObjects._set_binding_type(type.StructType(
'com.vmware.cis.tagging.tag_association.tag_to_objects', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'object_ids': type.ListType(type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID')),
},
TagToObjects,
False,
None))
class ObjectToTags(VapiStruct):
"""
The ``TagAssociation.ObjectToTags`` class describes an object and its
related tags. Use the :func:`TagAssociation.list_attached_tags_on_objects`
method to retrieve a :class:`list` with each element containing an object
and the tags attached to it. This class was added in vSphere API 6.5
.. tip::
The arguments are used to initialize data attributes with the same
names.
"""
def __init__(self,
object_id=None,
tag_ids=None,
):
"""
:type object_id: :class:`com.vmware.vapi.std_client.DynamicID`
:param object_id: The identifier of the object. This attribute was added in vSphere
API 6.5
:type tag_ids: :class:`list` of :class:`str`
:param tag_ids: The identifiers of the related tags. This attribute was added in
vSphere API 6.5
When clients pass a value of this class as a parameter, the
attribute must contain identifiers for the resource type:
``com.vmware.cis.tagging.Tag``. When methods return a value of this
class as a return value, the attribute will contain identifiers for
the resource type: ``com.vmware.cis.tagging.Tag``.
"""
self.object_id = object_id
self.tag_ids = tag_ids
VapiStruct.__init__(self)
ObjectToTags._set_binding_type(type.StructType(
'com.vmware.cis.tagging.tag_association.object_to_tags', {
'object_id': type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID'),
'tag_ids': type.ListType(type.IdType()),
},
ObjectToTags,
False,
None))
def attach(self,
tag_id,
object_id,
):
"""
Attaches the given tag to the input object. The tag needs to meet the
cardinality (:attr:`CategoryModel.cardinality`) and associability
(:attr:`CategoryModel.associable_types`) criteria in order to be
eligible for attachment. If the tag is already attached to the object,
then this method is a no-op and an error will not be thrown. To invoke
this method, you need the attach tag privilege on the tag and the read
privilege on the object.
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:type object_id: :class:`com.vmware.vapi.std_client.DynamicID`
:param object_id: The identifier of the input object.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidArgument`
if the input tag is not eligible to be attached to this object or
if the ``object_id`` is not valid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to attach the tag or do not have
the privilege to read the object.
"""
return self._invoke('attach',
{
'tag_id': tag_id,
'object_id': object_id,
})
def attach_multiple_tags_to_object(self,
object_id,
tag_ids,
):
"""
Attaches the given tags to the input object. If a tag is already
attached to the object, then the individual method is a no-op and an
error will not be added to
:attr:`TagAssociation.BatchResult.error_messages`. To invoke this
method, you need the read privilege on the object and the attach tag
privilege on each tag. This method was added in vSphere API 6.5
:type object_id: :class:`com.vmware.vapi.std_client.DynamicID`
:param object_id: The identifier of the input object.
:type tag_ids: :class:`list` of :class:`str`
:param tag_ids: The identifiers of the input tags.
The parameter must contain identifiers for the resource type:
``com.vmware.cis.tagging.Tag``.
:rtype: :class:`TagAssociation.BatchResult`
:return: The outcome of the batch method and the :class:`list` of error
messages (:attr:`TagAssociation.BatchResult.error_messages`)
describing attachment failures.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to read the object.
"""
return self._invoke('attach_multiple_tags_to_object',
{
'object_id': object_id,
'tag_ids': tag_ids,
})
def attach_tag_to_multiple_objects(self,
tag_id,
object_ids,
):
"""
Attaches the given tag to the input objects. If a tag is already
attached to the object, then the individual method is a no-op and an
error will not be added to
:attr:`TagAssociation.BatchResult.error_messages`. To invoke this
method, you need the attach tag privilege on the tag and the read
privilege on each object. This method was added in vSphere API 6.5
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:type object_ids: :class:`list` of :class:`com.vmware.vapi.std_client.DynamicID`
:param object_ids: The identifiers of the input objects.
:rtype: :class:`TagAssociation.BatchResult`
:return: The outcome of the batch method and the :class:`list` of error
messages (:attr:`TagAssociation.BatchResult.error_messages`)
describing attachment failures.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the attach tag privilege on the tag.
"""
return self._invoke('attach_tag_to_multiple_objects',
{
'tag_id': tag_id,
'object_ids': object_ids,
})
def detach(self,
tag_id,
object_id,
):
"""
Detaches the tag from the given object. If the tag is already removed
from the object, then this method is a no-op and an error will not be
thrown. To invoke this method, you need the attach tag privilege on the
tag and the read privilege on the object.
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:type object_id: :class:`com.vmware.vapi.std_client.DynamicID`
:param object_id: The identifier of the input object.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to detach the tag or do not have
the privilege to read the given object.
"""
return self._invoke('detach',
{
'tag_id': tag_id,
'object_id': object_id,
})
def detach_multiple_tags_from_object(self,
object_id,
tag_ids,
):
"""
Detaches the given tags from the input object. If a tag is already
removed from the object, then the individual method is a no-op and an
error will not be added to
:attr:`TagAssociation.BatchResult.error_messages`. To invoke this
method, you need the read privilege on the object and the attach tag
privilege each tag. This method was added in vSphere API 6.5
:type object_id: :class:`com.vmware.vapi.std_client.DynamicID`
:param object_id: The identifier of the input object.
:type tag_ids: :class:`list` of :class:`str`
:param tag_ids: The identifiers of the input tags.
The parameter must contain identifiers for the resource type:
``com.vmware.cis.tagging.Tag``.
:rtype: :class:`TagAssociation.BatchResult`
:return: The outcome of the batch method and the :class:`list` of error
messages (:attr:`TagAssociation.BatchResult.error_messages`)
describing detachment failures.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to read the object.
"""
return self._invoke('detach_multiple_tags_from_object',
{
'object_id': object_id,
'tag_ids': tag_ids,
})
def detach_tag_from_multiple_objects(self,
tag_id,
object_ids,
):
"""
Detaches the given tag from the input objects. If a tag is already
removed from the object, then the individual method is a no-op and an
error will not be added to
:attr:`TagAssociation.BatchResult.error_messages`. To invoke this
method, you need the attach tag privilege on the tag and the read
privilege on each object. This method was added in vSphere API 6.5
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:type object_ids: :class:`list` of :class:`com.vmware.vapi.std_client.DynamicID`
:param object_ids: The identifiers of the input objects.
:rtype: :class:`TagAssociation.BatchResult`
:return: The outcome of the batch method and the :class:`list` of error
messages (:attr:`TagAssociation.BatchResult.error_messages`)
describing detachment failures.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given tag does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the attach tag privilege on the tag.
"""
return self._invoke('detach_tag_from_multiple_objects',
{
'tag_id': tag_id,
'object_ids': object_ids,
})
def list_attached_objects(self,
tag_id,
):
"""
Fetches the :class:`list` of attached objects for the given tag. To
invoke this method, you need the read privilege on the input tag. Only
those objects for which you have the read privilege will be returned.
:type tag_id: :class:`str`
:param tag_id: The identifier of the input tag.
The parameter must be an identifier for the resource type:
``com.vmware.cis.tagging.Tag``.
:rtype: :class:`list` of :class:`com.vmware.vapi.std_client.DynamicID`
:return: The :class:`list` of attached object identifiers.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
if the tag for the given ``tag_id`` does not exist in the system.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to read the tag.
"""
return self._invoke('list_attached_objects',
{
'tag_id': tag_id,
})
def list_attached_objects_on_tags(self,
tag_ids,
):
"""
Fetches the :class:`list` of :class:`TagAssociation.TagToObjects`
describing the input tag identifiers and the objects they are attached
to. To invoke this method, you need the read privilege on each input
tag. The :attr:`TagAssociation.TagToObjects.object_ids` will only
contain those objects for which you have the read privilege. This
method was added in vSphere API 6.5
:type tag_ids: :class:`list` of :class:`str`
:param tag_ids: The identifiers of the input tags.
The parameter must contain identifiers for the resource type:
``com.vmware.cis.tagging.Tag``.
:rtype: :class:`list` of :class:`TagAssociation.TagToObjects`
:return: The :class:`list` of the tag identifiers to all object identifiers
that each tag is attached to.
"""
return self._invoke('list_attached_objects_on_tags',
{
'tag_ids': tag_ids,
})
def list_attached_tags(self,
object_id,
):
"""
Fetches the :class:`list` of tags attached to the given object. To
invoke this method, you need the read privilege on the input object.
The :class:`list` will only contain those tags for which you have the
read privileges.
:type object_id: :class:`com.vmware.vapi.std_client.DynamicID`
:param object_id: The identifier of the input object.
:rtype: :class:`list` of :class:`str`
:return: The :class:`list` of all tag identifiers that correspond to the
tags attached to the given object.
The return value will contain identifiers for the resource type:
``com.vmware.cis.tagging.Tag``.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to read the object.
"""
return self._invoke('list_attached_tags',
{
'object_id': object_id,
})
def list_attached_tags_on_objects(self,
object_ids,
):
"""
Fetches the :class:`list` of :class:`TagAssociation.ObjectToTags`
describing the input object identifiers and the tags attached to each
object. To invoke this method, you need the read privilege on each
input object. The :attr:`TagAssociation.ObjectToTags.tag_ids` will only
contain those tags for which you have the read privilege. This method
was added in vSphere API 6.5
:type object_ids: :class:`list` of :class:`com.vmware.vapi.std_client.DynamicID`
:param object_ids: The identifiers of the input objects.
:rtype: :class:`list` of :class:`TagAssociation.ObjectToTags`
:return: The :class:`list` of the object identifiers to all tag identifiers
that are attached to that object.
"""
return self._invoke('list_attached_tags_on_objects',
{
'object_ids': object_ids,
})
def list_attachable_tags(self,
object_id,
):
"""
Fetches the :class:`list` of attachable tags for the given object,
omitting the tags that have already been attached. Criteria for
attachability is calculated based on tagging cardinality
(:attr:`CategoryModel.cardinality`) and associability
(:attr:`CategoryModel.associable_types`) constructs. To invoke this
method, you need the read privilege on the input object. The
:class:`list` will only contain those tags for which you have read
privileges.
:type object_id: :class:`com.vmware.vapi.std_client.DynamicID`
:param object_id: The identifier of the input object.
:rtype: :class:`list` of :class:`str`
:return: The :class:`list` of tag identifiers that are eligible to be
attached to the given object.
The return value will contain identifiers for the resource type:
``com.vmware.cis.tagging.Tag``.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
if you do not have the privilege to read the object.
"""
return self._invoke('list_attachable_tags',
{
'object_id': object_id,
})
class _CategoryStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'create_spec': type.ReferenceType(__name__, 'Category.CreateSpec'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.already_exists':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyExists'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = None
# properties for get operation
get_input_type = type.StructType('operation-input', {
'category_id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = None
# properties for update operation
update_input_type = type.StructType('operation-input', {
'category_id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
'update_spec': type.ReferenceType(__name__, 'Category.UpdateSpec'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.already_exists':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyExists'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = None
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'category_id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = None
# properties for list operation
list_input_type = type.StructType('operation-input', {})
list_error_dict = {}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = None
# properties for list_used_categories operation
list_used_categories_input_type = type.StructType('operation-input', {
'used_by_entity': type.StringType(),
})
list_used_categories_error_dict = {}
list_used_categories_input_value_validator_list = [
]
list_used_categories_output_validator_list = [
]
list_used_categories_rest_metadata = None
# properties for add_to_used_by operation
add_to_used_by_input_type = type.StructType('operation-input', {
'category_id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
'used_by_entity': type.StringType(),
})
add_to_used_by_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
add_to_used_by_input_value_validator_list = [
]
add_to_used_by_output_validator_list = [
]
add_to_used_by_rest_metadata = None
# properties for remove_from_used_by operation
remove_from_used_by_input_type = type.StructType('operation-input', {
'category_id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
'used_by_entity': type.StringType(),
})
remove_from_used_by_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
remove_from_used_by_input_value_validator_list = [
]
remove_from_used_by_output_validator_list = [
]
remove_from_used_by_rest_metadata = None
# properties for revoke_propagating_permissions operation
revoke_propagating_permissions_input_type = type.StructType('operation-input', {
'category_id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
})
revoke_propagating_permissions_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
revoke_propagating_permissions_input_value_validator_list = [
]
revoke_propagating_permissions_output_validator_list = [
]
revoke_propagating_permissions_rest_metadata = None
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'CategoryModel'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ListType(type.IdType()),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'list_used_categories': {
'input_type': list_used_categories_input_type,
'output_type': type.ListType(type.IdType()),
'errors': list_used_categories_error_dict,
'input_value_validator_list': list_used_categories_input_value_validator_list,
'output_validator_list': list_used_categories_output_validator_list,
'task_type': TaskType.NONE,
},
'add_to_used_by': {
'input_type': add_to_used_by_input_type,
'output_type': type.VoidType(),
'errors': add_to_used_by_error_dict,
'input_value_validator_list': add_to_used_by_input_value_validator_list,
'output_validator_list': add_to_used_by_output_validator_list,
'task_type': TaskType.NONE,
},
'remove_from_used_by': {
'input_type': remove_from_used_by_input_type,
'output_type': type.VoidType(),
'errors': remove_from_used_by_error_dict,
'input_value_validator_list': remove_from_used_by_input_value_validator_list,
'output_validator_list': remove_from_used_by_output_validator_list,
'task_type': TaskType.NONE,
},
'revoke_propagating_permissions': {
'input_type': revoke_propagating_permissions_input_type,
'output_type': type.VoidType(),
'errors': revoke_propagating_permissions_error_dict,
'input_value_validator_list': revoke_propagating_permissions_input_value_validator_list,
'output_validator_list': revoke_propagating_permissions_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'get': get_rest_metadata,
'update': update_rest_metadata,
'delete': delete_rest_metadata,
'list': list_rest_metadata,
'list_used_categories': list_used_categories_rest_metadata,
'add_to_used_by': add_to_used_by_rest_metadata,
'remove_from_used_by': remove_from_used_by_rest_metadata,
'revoke_propagating_permissions': revoke_propagating_permissions_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.cis.tagging.category',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _TagStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'create_spec': type.ReferenceType(__name__, 'Tag.CreateSpec'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.already_exists':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyExists'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = None
# properties for get operation
get_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
})
get_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = None
# properties for update operation
update_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'update_spec': type.ReferenceType(__name__, 'Tag.UpdateSpec'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.already_exists':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'AlreadyExists'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = None
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = None
# properties for list operation
list_input_type = type.StructType('operation-input', {})
list_error_dict = {}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = None
# properties for list_used_tags operation
list_used_tags_input_type = type.StructType('operation-input', {
'used_by_entity': type.StringType(),
})
list_used_tags_error_dict = {}
list_used_tags_input_value_validator_list = [
]
list_used_tags_output_validator_list = [
]
list_used_tags_rest_metadata = None
# properties for list_tags_for_category operation
list_tags_for_category_input_type = type.StructType('operation-input', {
'category_id': type.IdType(resource_types='com.vmware.cis.tagging.Category'),
})
list_tags_for_category_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
list_tags_for_category_input_value_validator_list = [
]
list_tags_for_category_output_validator_list = [
]
list_tags_for_category_rest_metadata = None
# properties for add_to_used_by operation
add_to_used_by_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'used_by_entity': type.StringType(),
})
add_to_used_by_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
add_to_used_by_input_value_validator_list = [
]
add_to_used_by_output_validator_list = [
]
add_to_used_by_rest_metadata = None
# properties for remove_from_used_by operation
remove_from_used_by_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'used_by_entity': type.StringType(),
})
remove_from_used_by_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
remove_from_used_by_input_value_validator_list = [
]
remove_from_used_by_output_validator_list = [
]
remove_from_used_by_rest_metadata = None
# properties for revoke_propagating_permissions operation
revoke_propagating_permissions_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
})
revoke_propagating_permissions_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
revoke_propagating_permissions_input_value_validator_list = [
]
revoke_propagating_permissions_output_validator_list = [
]
revoke_propagating_permissions_rest_metadata = None
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType(__name__, 'TagModel'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ListType(type.IdType()),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'list_used_tags': {
'input_type': list_used_tags_input_type,
'output_type': type.ListType(type.IdType()),
'errors': list_used_tags_error_dict,
'input_value_validator_list': list_used_tags_input_value_validator_list,
'output_validator_list': list_used_tags_output_validator_list,
'task_type': TaskType.NONE,
},
'list_tags_for_category': {
'input_type': list_tags_for_category_input_type,
'output_type': type.ListType(type.IdType()),
'errors': list_tags_for_category_error_dict,
'input_value_validator_list': list_tags_for_category_input_value_validator_list,
'output_validator_list': list_tags_for_category_output_validator_list,
'task_type': TaskType.NONE,
},
'add_to_used_by': {
'input_type': add_to_used_by_input_type,
'output_type': type.VoidType(),
'errors': add_to_used_by_error_dict,
'input_value_validator_list': add_to_used_by_input_value_validator_list,
'output_validator_list': add_to_used_by_output_validator_list,
'task_type': TaskType.NONE,
},
'remove_from_used_by': {
'input_type': remove_from_used_by_input_type,
'output_type': type.VoidType(),
'errors': remove_from_used_by_error_dict,
'input_value_validator_list': remove_from_used_by_input_value_validator_list,
'output_validator_list': remove_from_used_by_output_validator_list,
'task_type': TaskType.NONE,
},
'revoke_propagating_permissions': {
'input_type': revoke_propagating_permissions_input_type,
'output_type': type.VoidType(),
'errors': revoke_propagating_permissions_error_dict,
'input_value_validator_list': revoke_propagating_permissions_input_value_validator_list,
'output_validator_list': revoke_propagating_permissions_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'get': get_rest_metadata,
'update': update_rest_metadata,
'delete': delete_rest_metadata,
'list': list_rest_metadata,
'list_used_tags': list_used_tags_rest_metadata,
'list_tags_for_category': list_tags_for_category_rest_metadata,
'add_to_used_by': add_to_used_by_rest_metadata,
'remove_from_used_by': remove_from_used_by_rest_metadata,
'revoke_propagating_permissions': revoke_propagating_permissions_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.cis.tagging.tag',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class _TagAssociationStub(ApiInterfaceStub):
def __init__(self, config):
# properties for attach operation
attach_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'object_id': type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID'),
})
attach_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.invalid_argument':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidArgument'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
attach_input_value_validator_list = [
]
attach_output_validator_list = [
]
attach_rest_metadata = None
# properties for attach_multiple_tags_to_object operation
attach_multiple_tags_to_object_input_type = type.StructType('operation-input', {
'object_id': type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID'),
'tag_ids': type.ListType(type.IdType()),
})
attach_multiple_tags_to_object_error_dict = {
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
attach_multiple_tags_to_object_input_value_validator_list = [
]
attach_multiple_tags_to_object_output_validator_list = [
]
attach_multiple_tags_to_object_rest_metadata = None
# properties for attach_tag_to_multiple_objects operation
attach_tag_to_multiple_objects_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'object_ids': type.ListType(type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID')),
})
attach_tag_to_multiple_objects_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
attach_tag_to_multiple_objects_input_value_validator_list = [
]
attach_tag_to_multiple_objects_output_validator_list = [
]
attach_tag_to_multiple_objects_rest_metadata = None
# properties for detach operation
detach_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'object_id': type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID'),
})
detach_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
detach_input_value_validator_list = [
]
detach_output_validator_list = [
]
detach_rest_metadata = None
# properties for detach_multiple_tags_from_object operation
detach_multiple_tags_from_object_input_type = type.StructType('operation-input', {
'object_id': type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID'),
'tag_ids': type.ListType(type.IdType()),
})
detach_multiple_tags_from_object_error_dict = {
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
detach_multiple_tags_from_object_input_value_validator_list = [
]
detach_multiple_tags_from_object_output_validator_list = [
]
detach_multiple_tags_from_object_rest_metadata = None
# properties for detach_tag_from_multiple_objects operation
detach_tag_from_multiple_objects_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
'object_ids': type.ListType(type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID')),
})
detach_tag_from_multiple_objects_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
detach_tag_from_multiple_objects_input_value_validator_list = [
]
detach_tag_from_multiple_objects_output_validator_list = [
]
detach_tag_from_multiple_objects_rest_metadata = None
# properties for list_attached_objects operation
list_attached_objects_input_type = type.StructType('operation-input', {
'tag_id': type.IdType(resource_types='com.vmware.cis.tagging.Tag'),
})
list_attached_objects_error_dict = {
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
list_attached_objects_input_value_validator_list = [
]
list_attached_objects_output_validator_list = [
]
list_attached_objects_rest_metadata = None
# properties for list_attached_objects_on_tags operation
list_attached_objects_on_tags_input_type = type.StructType('operation-input', {
'tag_ids': type.ListType(type.IdType()),
})
list_attached_objects_on_tags_error_dict = {}
list_attached_objects_on_tags_input_value_validator_list = [
]
list_attached_objects_on_tags_output_validator_list = [
]
list_attached_objects_on_tags_rest_metadata = None
# properties for list_attached_tags operation
list_attached_tags_input_type = type.StructType('operation-input', {
'object_id': type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID'),
})
list_attached_tags_error_dict = {
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
list_attached_tags_input_value_validator_list = [
]
list_attached_tags_output_validator_list = [
]
list_attached_tags_rest_metadata = None
# properties for list_attached_tags_on_objects operation
list_attached_tags_on_objects_input_type = type.StructType('operation-input', {
'object_ids': type.ListType(type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID')),
})
list_attached_tags_on_objects_error_dict = {}
list_attached_tags_on_objects_input_value_validator_list = [
]
list_attached_tags_on_objects_output_validator_list = [
]
list_attached_tags_on_objects_rest_metadata = None
# properties for list_attachable_tags operation
list_attachable_tags_input_type = type.StructType('operation-input', {
'object_id': type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID'),
})
list_attachable_tags_error_dict = {
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
}
list_attachable_tags_input_value_validator_list = [
]
list_attachable_tags_output_validator_list = [
]
list_attachable_tags_rest_metadata = None
operations = {
'attach': {
'input_type': attach_input_type,
'output_type': type.VoidType(),
'errors': attach_error_dict,
'input_value_validator_list': attach_input_value_validator_list,
'output_validator_list': attach_output_validator_list,
'task_type': TaskType.NONE,
},
'attach_multiple_tags_to_object': {
'input_type': attach_multiple_tags_to_object_input_type,
'output_type': type.ReferenceType(__name__, 'TagAssociation.BatchResult'),
'errors': attach_multiple_tags_to_object_error_dict,
'input_value_validator_list': attach_multiple_tags_to_object_input_value_validator_list,
'output_validator_list': attach_multiple_tags_to_object_output_validator_list,
'task_type': TaskType.NONE,
},
'attach_tag_to_multiple_objects': {
'input_type': attach_tag_to_multiple_objects_input_type,
'output_type': type.ReferenceType(__name__, 'TagAssociation.BatchResult'),
'errors': attach_tag_to_multiple_objects_error_dict,
'input_value_validator_list': attach_tag_to_multiple_objects_input_value_validator_list,
'output_validator_list': attach_tag_to_multiple_objects_output_validator_list,
'task_type': TaskType.NONE,
},
'detach': {
'input_type': detach_input_type,
'output_type': type.VoidType(),
'errors': detach_error_dict,
'input_value_validator_list': detach_input_value_validator_list,
'output_validator_list': detach_output_validator_list,
'task_type': TaskType.NONE,
},
'detach_multiple_tags_from_object': {
'input_type': detach_multiple_tags_from_object_input_type,
'output_type': type.ReferenceType(__name__, 'TagAssociation.BatchResult'),
'errors': detach_multiple_tags_from_object_error_dict,
'input_value_validator_list': detach_multiple_tags_from_object_input_value_validator_list,
'output_validator_list': detach_multiple_tags_from_object_output_validator_list,
'task_type': TaskType.NONE,
},
'detach_tag_from_multiple_objects': {
'input_type': detach_tag_from_multiple_objects_input_type,
'output_type': type.ReferenceType(__name__, 'TagAssociation.BatchResult'),
'errors': detach_tag_from_multiple_objects_error_dict,
'input_value_validator_list': detach_tag_from_multiple_objects_input_value_validator_list,
'output_validator_list': detach_tag_from_multiple_objects_output_validator_list,
'task_type': TaskType.NONE,
},
'list_attached_objects': {
'input_type': list_attached_objects_input_type,
'output_type': type.ListType(type.ReferenceType('com.vmware.vapi.std_client', 'DynamicID')),
'errors': list_attached_objects_error_dict,
'input_value_validator_list': list_attached_objects_input_value_validator_list,
'output_validator_list': list_attached_objects_output_validator_list,
'task_type': TaskType.NONE,
},
'list_attached_objects_on_tags': {
'input_type': list_attached_objects_on_tags_input_type,
'output_type': type.ListType(type.ReferenceType(__name__, 'TagAssociation.TagToObjects')),
'errors': list_attached_objects_on_tags_error_dict,
'input_value_validator_list': list_attached_objects_on_tags_input_value_validator_list,
'output_validator_list': list_attached_objects_on_tags_output_validator_list,
'task_type': TaskType.NONE,
},
'list_attached_tags': {
'input_type': list_attached_tags_input_type,
'output_type': type.ListType(type.IdType()),
'errors': list_attached_tags_error_dict,
'input_value_validator_list': list_attached_tags_input_value_validator_list,
'output_validator_list': list_attached_tags_output_validator_list,
'task_type': TaskType.NONE,
},
'list_attached_tags_on_objects': {
'input_type': list_attached_tags_on_objects_input_type,
'output_type': type.ListType(type.ReferenceType(__name__, 'TagAssociation.ObjectToTags')),
'errors': list_attached_tags_on_objects_error_dict,
'input_value_validator_list': list_attached_tags_on_objects_input_value_validator_list,
'output_validator_list': list_attached_tags_on_objects_output_validator_list,
'task_type': TaskType.NONE,
},
'list_attachable_tags': {
'input_type': list_attachable_tags_input_type,
'output_type': type.ListType(type.IdType()),
'errors': list_attachable_tags_error_dict,
'input_value_validator_list': list_attachable_tags_input_value_validator_list,
'output_validator_list': list_attachable_tags_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'attach': attach_rest_metadata,
'attach_multiple_tags_to_object': attach_multiple_tags_to_object_rest_metadata,
'attach_tag_to_multiple_objects': attach_tag_to_multiple_objects_rest_metadata,
'detach': detach_rest_metadata,
'detach_multiple_tags_from_object': detach_multiple_tags_from_object_rest_metadata,
'detach_tag_from_multiple_objects': detach_tag_from_multiple_objects_rest_metadata,
'list_attached_objects': list_attached_objects_rest_metadata,
'list_attached_objects_on_tags': list_attached_objects_on_tags_rest_metadata,
'list_attached_tags': list_attached_tags_rest_metadata,
'list_attached_tags_on_objects': list_attached_tags_on_objects_rest_metadata,
'list_attachable_tags': list_attachable_tags_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.cis.tagging.tag_association',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=True)
class StubFactory(StubFactoryBase):
_attrs = {
'Category': Category,
'Tag': Tag,
'TagAssociation': TagAssociation,
}
|
[
"taro.murata.1992@gmail.com"
] |
taro.murata.1992@gmail.com
|
033a87d68fe01730b90c3c184a037d141c074b83
|
31e32761e3572f8adeb690053ebfcc26390a87b5
|
/pyCollections/search.py
|
1b92abd3135f09fd9fb590687653426f48fa4beb
|
[] |
no_license
|
sanshitsharma/pySamples
|
738b95c758d65e3360f3ee7221591d7b78c7ba1d
|
ce06f1e38e0c7a142af26e8883c81b7a5dfc7edc
|
refs/heads/master
| 2021-06-05T19:01:40.279148
| 2021-02-02T21:52:31
| 2021-02-02T21:52:31
| 110,218,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,348
|
py
|
#!/usr/bin/python
class Search:
'''
@params: arr - array of items in sorted order
@params: val - value to be serached
@return:
index of val if found in array
else, (-(insertion)-1) where insertion is the index at which val would be inserted
'''
def binarySearch(self, arr, val):
if arr is None or arr == []:
return -1
low = 0
high = len(arr)-1
return self.__binSearch(arr, val, low, high)
def __binSearch(self, a, val, l, h):
#print "Evaluating array[", a[l], "...", a[h], "]"
if l <= h:
mid = (l+h)/2
if val == a[mid]:
return mid
elif val < a[mid]:
if (mid > l and a[mid-1] < val) or mid == l:
return -mid - 1
else:
return self.__binSearch(a, val, l, mid - 1)
else: # val > a[mid]
if (mid < h and a[mid+1] > val) or mid == h:
return -(mid+1) - 1
else:
return self.__binSearch(a, val, mid+1, h)
#return -1
raise ValueError('value', val, 'not found in array')
if __name__ == "__main__":
#a = [1, 2, 3, 5, 6, 7, 8, 9]
#val = 4
a = [2, 3, 4, 10, 40]
val = 5
print Search().binarySearch(a, val)
|
[
"sansshar@cisco.com"
] |
sansshar@cisco.com
|
a6f137a79a1e01fabe2fd1eb2e860908d545ca92
|
1ab5f49acd587523be1d74111cf7e5aef340c12f
|
/analysis/regular_array_monte_carlo.py
|
305734c3fd0c3b08a362ebb5a886ffadd6ddc1af
|
[
"MIT"
] |
permissive
|
AndrewKirby2/data_synthesis
|
b2837d302eb914bc84d1be4aa740caef32a00c41
|
656858137a348fd5dcb57bcd04bdfece2b9eac1b
|
refs/heads/main
| 2023-04-24T01:08:28.457141
| 2021-05-17T16:32:22
| 2021-05-17T16:32:22
| 351,389,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
"""Monte carlo sampling of regular arrays and then plot
(x1,y1,x2,y2,x3,y3) for 3 most important turbines"""
import sys
import matplotlib.pyplot as plt
sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis')
from regular_array_sampling.functions import regular_array_monte_carlo
num_iters = 10000
fig = plt.figure(figsize=(12.0, 5.0))
turbine1 = fig.add_subplot(1, 3, 1)
turbine1.set_xlim([0, 30])
turbine1.set_ylim([-5, 5])
turbine1.set_xlabel('x_1 (D m)')
turbine1.set_ylabel('y_1 (D m)')
turbine2 = fig.add_subplot(1, 3, 2)
turbine1.set_xlim([0, 30])
turbine1.set_ylim([-5, 5])
turbine2.set_xlabel('x_2 (D m)')
turbine2.set_ylabel('y_2 (D m)')
turbine3 = fig.add_subplot(1, 3, 3)
turbine1.set_xlim([0, 30])
turbine1.set_ylim([-5, 5])
turbine3.set_xlabel('x_3 (D m)')
turbine3.set_ylabel('y_3 (D m)')
turbine_coords = regular_array_monte_carlo(num_iters)
turbine1.scatter(turbine_coords[:, 0], turbine_coords[:, 1])
turbine2.scatter(turbine_coords[:, 2], turbine_coords[:, 3])
turbine3.scatter(turbine_coords[:, 4], turbine_coords[:, 5])
plt.savefig('analysis/regular_array_monte_carlo_plots/regular_array_monte_carlo10000.png')
|
[
"andrew.kirby@trinity.ox.ac.uk"
] |
andrew.kirby@trinity.ox.ac.uk
|
ca8d11df99a0ea2ff359eea46a0a99cd4babfd09
|
3aae82b0a2a09e9ff278adf21dfa7c31299f3930
|
/tut_travello/migrations/0001_initial.py
|
25c92ff84e10a99fa855efaf431b38b0edae06cc
|
[] |
no_license
|
aakriti1435/Django-Basic-Functionality-of-Travelling-website
|
6a2460991b3044f852410e60af154cab9274152d
|
88c36c24abf480377d66af87c767aba9b5bf1918
|
refs/heads/master
| 2022-08-12T05:28:46.212976
| 2020-05-21T16:57:47
| 2020-05-21T16:57:47
| 265,903,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
# Generated by Django 2.0.6 on 2019-12-24 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Destination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('img', models.ImageField(upload_to='pics')),
('des', models.TextField()),
('price', models.IntegerField()),
('offer', models.BooleanField(default=False)),
],
),
]
|
[
"writetoaakriti07@gmail.com"
] |
writetoaakriti07@gmail.com
|
1b7223ca1585505983c20d08e3c6eab1fb12ce82
|
339406ea63e171fb2203ad3db15688e2a809a638
|
/app/utils/search.py
|
65488b2dc73fd810e2ca7fa9d8a38ee76f38f1de
|
[] |
no_license
|
halcyonbrowser/backend
|
78410ae89df2fc6c4bb4f958a7ee1cd01a1ad7ac
|
d4b74d5f9d0cf6b017cb7c59a4e742b261f45e24
|
refs/heads/master
| 2021-01-11T18:47:28.819705
| 2017-01-23T05:42:09
| 2017-01-23T05:42:09
| 79,625,005
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 936
|
py
|
from os import environ
from googleapiclient.discovery import build
def search(search_term):
"""
Search Google for the uttered search term
:param search_term:string - search term
:return: SearchResultsList schema
"""
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
return res['items']
results = google_search(
search_term,
environ.get("GOOGLE_API_KEY", ""),
environ.get("GOOGLE_CSE_ID", ""),
)
def mapper(result):
"""
nlp.annotate(result.title, properties = {
'annotators': 'ner',
'outputFormat': 'json'
})
"""
return {
"doc_id": result.cacheId,
"title": result.title,
"text": result.snippet,
"type": "search result",
"entity": "organization"
}
return map(mapper, results[:20])
|
[
"alastairparagas@gmail.com"
] |
alastairparagas@gmail.com
|
5b52532b4bfca7921cb744f8d364b9af04668166
|
ef0a6de88bef600cef5f6fb48fd35b904ad24b06
|
/Serverv2.py
|
cd76beffddcca9d64caf0aaa10f4a37ea9e62825
|
[] |
no_license
|
nicolosinapitupulu/18213008-18213041
|
ac3472a633eacb119e631ca278b6283034d77ba4
|
5aeca77b627f14913b4b62027c068ce07b9570a6
|
refs/heads/master
| 2021-01-17T09:09:43.717942
| 2016-04-28T14:29:30
| 2016-04-28T14:29:30
| 42,163,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,394
|
py
|
# Name/NIM : Nicholas Posma Nasution/18213008 - Indra Nicolosi Waskita/18213041
# Day, Date : Wednesday, 16 September 2015
# File : Serverv2.py
import SocketServer, threading, sys, os
class MyTCPHandler(SocketServer.BaseRequestHandler):
def handle(self):
print str(self.client_address[0]) + " connected"
menu = "\n"
filename = ""
counter = 0
for file in os.listdir("Text"):
if(file.endswith(".txt")):
counter = counter + 1
menu = menu + str(counter) + ". " + file + "\n"
filename = filename + file + "\n"
self.request.send(menu.rstrip("\n"))
menu = menu.split()
while 1:
self.msg = self.request.recv(1024)
if(not self.msg):
break
if(self.msg == "@bye"):
print str(self.client_address[0]) + " disconnected"
break
print str(self.client_address[0]) + " wrote: " + self.msg
if(self.msg.isdigit()):
if(int(self.msg) <= counter) and (int(self.msg) >= 1):
file = open("Text/" + menu[(int(self.msg) * 2) - 1], "r")
self.request.send(file.read().rstrip('\n'))
else:
self.request.send("text unavailable")
else:
self.request.send("please input number!")
if __name__ == "__main__":
if(len(sys.argv) < 3):
print "Usage : phyton server.py localhost port"
sys.exit()
host = sys.argv[1]
port = int(sys.argv[2])
server = SocketServer.TCPServer((host, port), MyTCPHandler)
server.serve_forever()
|
[
"nicolosinapitupulu@gmail.com"
] |
nicolosinapitupulu@gmail.com
|
cf4fb3e62cf28791bbe1e41cd0efebf220cb2c7a
|
548430b511497e623b2119f6f5531ddc4deca693
|
/PatternDetection_8_November/TreeFunctions.py
|
d40b330542902fcd5f6ec2f8ca96e48fac71a1a9
|
[] |
no_license
|
xiziwang/aa-database-builder
|
e388d842456bed5bc77b0a05815599e979141843
|
8f185a872560fa01bd020fe5d40ab8cceeee72e7
|
refs/heads/master
| 2021-01-19T16:03:01.378226
| 2017-06-13T21:24:46
| 2017-06-13T21:24:46
| 88,241,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,583
|
py
|
from Classes import PatternString, TreeNode, Sequence, MLStripper, URLInfo
def readPageInString(filename):
with open (filename, "rb") as myfile:
pagetext=myfile.read()
pagetext=pagetext.lower()
pagetext=pagetext.replace("<br>"," ")
pagetext=pagetext.replace("<br/>"," ")
#pagetext=pagetext.replace("<a","")
#pagetext=pagetext[500:]
'''pagetext=pagetext.replace("<abbr>","")
pagetext=pagetext.replace("</abbr>","")
pagetext=pagetext.replace("<font","")
pagetext=pagetext.replace("</font>","")
pagetext=pagetext.replace("<small","")
pagetext=pagetext.replace("</small>","")
pagetext=pagetext.replace("<select","")
pagetext=pagetext.replace("</select>","")
pagetext=pagetext.replace("<option","")
pagetext=pagetext.replace("</option>","")
pagetext=pagetext.replace("<b>","")
pagetext=pagetext.replace("</b>","")
pagetext=pagetext.replace("<i>","")
pagetext=pagetext.replace("</i>","")
pagetext=pagetext.replace("<center>","")
pagetext=pagetext.replace("<hr","")'''
return pagetext
'''function to traverse in level order'''
def traverse(node):
traversalstring=""
thislevel = [node]
while len(thislevel)>0:
nextlevel = list()
for n in thislevel:
#print n.tagname+",",
traversalstring+=n.tagname+","
for c in n.children:
nextlevel.append(c)
#print
traversalstring+="$$"
thislevel = nextlevel
return traversalstring
'''same as traverse but print the nodes'''
def traversePrint(node):
thislevel = [node]
while len(thislevel)>0:
nextlevel = list()
for n in thislevel:
print n.tagname+",",
for c in n.children:
nextlevel.append(c)
print
thislevel = nextlevel
'''sets all the immediate subnodes of a node. for each node of the trees,
traverses all it's child nodes and saves these strings as subpages of the node'''
'''this global list contains all the patterns with their start and
end position to find out text inside the tags'''
def SetSubtrees(rootnode):
#allPatterns=[]
allPatterns=[]#dict()
#print "type from func1"+str(type(allPatterns))
thislevel = [rootnode]
while len(thislevel)>0:
nextlevel = list()
for n in thislevel:
for c in n.children:
onenode=traverse(c)
sNode=PatternString(onenode,c.position,c.endposition,c)
n.subpages.append(sNode)
allPatterns.append(sNode)
#allPatternsd.update({sNode.patString:sNode})
nextlevel.append(c)
thislevel = nextlevel
#print allPatterns
return allPatterns
'''def printall(rootnode):
traversalstring=""
thislevel = [rootnode]
while len(thislevel)>0:
nextlevel = list()
for n in thislevel:
print n.tagname
print n.subpages.patString
for c in n.children:
nextlevel.append(c)
print
thislevel = nextlevel'''
'''this function finds all the repetitive patterns in the page'''
def FindDuplicates(rootnode):
taglist=[] #list to contain specific patterns and their counts
stringlist=[]
maxim=0
nd=None
thislevel = [rootnode]
cnt=0
while len(thislevel)>0:
nextlevel = list()
for n in thislevel:
for child in n.subpages:
cnt=taglist.count(child.patString)
if child.patString not in stringlist:
stringlist.append(child.patString)
OneSeq=Sequence(child.patString,child.patString.count("$"))
taglist.append(OneSeq)
else:
for seq in taglist:
if seq.seqstring==child.patString:
seq.seqcount+=1
break
for c in n.children:
nextlevel.append(c)
#print
thislevel = nextlevel
return taglist
def FindTags(pagetext,current):
i=0
while i < len(pagetext):
if pagetext[i]=='<' and ((pagetext[i+1]>="A" and pagetext[i+1]<="Z") or (pagetext[i+1]>="a" and pagetext[i+1]<="z")):
if pagetext[i+1]=="b" and pagetext[i+2]=="r" and pagetext[i+3]==">":
i=i+4
continue
j=i+1
tag=""
while pagetext[j]!=">" and pagetext[j]!=" ":
tag += pagetext[j]
j+=1
#print tag
tagnode=TreeNode(tag)
tagnode.position=i
if current:
current.add_child(tagnode)
tagnode.set_parent(current)
current=tagnode
i=j
if pagetext[i]=='<' and pagetext[i+1]=="/":
#print current.tagname
j=i+2
endtag=""
while pagetext[j]!=">":
endtag += pagetext[j]
j+=1
#print "/"+endtag
if current:
current.endposition=j+1
current=current.parent
i=j
if pagetext[i]=='/' and pagetext[i+1]==">":
current.endposition=i
i=i+1
current=current.parent
i += 1
|
[
"wang3570@umn.edu"
] |
wang3570@umn.edu
|
ee9a2614c35b885b1ec13fa6da9c0e3ef6a6a839
|
4482c4261d549eb531e74483ee9c70701bb1137c
|
/news/models.py
|
5b4a6e58ac2712ef8f4e14344f53651a86cd9da4
|
[] |
no_license
|
BensonGathu/Tribune
|
b332b6e4f487077b76323b0dd76e535301997814
|
41385f7a7697bcddda54fe3a4793c05d38fd7a96
|
refs/heads/master
| 2023-06-16T08:16:00.665054
| 2021-07-12T08:36:29
| 2021-07-12T08:36:29
| 385,175,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,710
|
py
|
from django.db import models
import datetime as dt
from django.contrib.auth.models import User
from tinymce.models import HTMLField
# Create your models here.
# class Editor(models.Model):
# first_name = models.CharField(max_length= 50)
# last_name = models.CharField(max_length=50)
# email = models.EmailField()
# phone_number = models.CharField(max_length=10,blank=True)
# def __str__(self):
# return self.first_name
# class Meta:
# ordering = ['first_name']
# def save_editor(self):
# self.save()
class tags(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
def save_tag(self):
self.save()
class Article(models.Model):
title = models.CharField(max_length=60)
post = HTMLField()
editor = models.ForeignKey(User,on_delete=models.CASCADE)
tags = models.ManyToManyField(tags)
pub_date = models.DateTimeField(auto_now_add = True)
article_image = models.ImageField(upload_to = 'articles/',default='SOME IMAGE')
def __str__(self):
return self.title
def save_article(self):
self.save()
@classmethod
def todays_news(cls):
today = dt.date.today()
news = cls.objects.filter(pub_date__date = today)
return news
@classmethod
def days_news(cls,date):
news = cls.objects.filter(pub_date__date = date)
return news
@classmethod
def search_by_title(cls,search_term):
news = cls.objects.filter(title__icontains=search_term)
return news
class NewsLetterRecipients(models.Model):
name = models.CharField(max_length=40)
email = models.EmailField()
|
[
"bensongathu23@gmail.com"
] |
bensongathu23@gmail.com
|
e49a703c6d8b64cec1ef494d84c93c960e87f502
|
e165af04e29e028be5d9426601e003f88e227358
|
/Hubi_Discord/funciones/thispersondoesnotexist/helpers.py
|
889b0f064db288e5d93c7de1347a8e8a40513e7b
|
[
"MIT"
] |
permissive
|
teehanming/Hubi_AI
|
3412bff4c83fdc0b3084a168e62fde0e33b281bb
|
1a5a2301cc80d9eeb2ecc1d1d15f24a288e1e56f
|
refs/heads/main
| 2023-08-30T04:57:57.224525
| 2021-11-17T03:07:05
| 2021-11-17T03:07:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
import hashlib
from hashlib import algorithms_available
__all__ = ("get_checksum_from_picture", "algorithms_available", "save_picture")
def get_checksum_from_picture(picture: bytes, method: str = "md5") -> str:
"""Calculate the checksum of the provided picture, using the desired method.
Available methods can be fetched using the the algorithms_available function.
:param picture: picture as bytes
:param method: hashing method as string (optional, default=md5)
:return: checksum as string
"""
h = hashlib.new(method.lower())
h.update(picture)
return h.hexdigest()
def save_picture(picture: bytes, file: str = None) -> int:
"""Save a picture to a file.
The picture must be provided as it content as bytes.
The filename must be provided as a str with the absolute or relative path where to store it.
If no filename is provided, a filename will be generated using the MD5 checksum of the picture, with jpeg extension.
:param picture: picture content as bytes
:param file: filename as string, relative or absolute path (optional)
:return: int returned by file.write
"""
if file is None:
file = get_checksum_from_picture(picture) + ".jpeg"
with open(file, "wb") as f:
return f.write(picture)
|
[
"noreply@github.com"
] |
teehanming.noreply@github.com
|
725c0c6f6b937e6ca9589f2b1068b6dfb8026256
|
8bb26300ef3db75053203e45b20e40de14830ead
|
/nccdc/myria/filter_distinct_query.py
|
68d30f29e999642084dddf1be8177e354a8a0712
|
[] |
no_license
|
uwescience/myria-demo
|
56b46664c8df696135e1c4bf6bd411096a649232
|
5b044b00d3df36b0e86e3d3c412819e52c5f50ed
|
refs/heads/master
| 2021-03-12T23:32:47.259819
| 2014-02-09T19:11:48
| 2014-02-09T19:11:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,226
|
py
|
#!/usr/bin/env python
"""
Filter out a relevant time range in the nccdc dataset.
Project out only the src and dst fields.
Eliminate duplicates.
Store the result in a new nccdc_filtered_distinct relation.
nccdc_filtered_distinct(src,dst) :- nccdc(src, dst, timestamp, _,_,_,_),
timestamp > 1366475761,
timestamp < 136647582
"""
import json
def pretty_json(obj):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
time_range = (1366475761, 1366475821)
input_relation_name = 'nccdc'
def generate():
scan = {
'op_type' : 'TableScan',
'op_name' : 'Scan',
"relation_key" : {
"user_name" : "public",
"program_name" : "adhoc",
"relation_name" : input_relation_name,
}
}
filter1 = {
'op_type' : 'Filter',
'op_name' : 'Filter1',
'arg_child' : 'Scan',
'arg_predicate' : {
'type': 'SimplePredicate',
'arg_compare_index': 3,
'arg_op': "GREATER_THAN",
'arg_compare_value' : time_range[0]
}
}
filter2 = {
'op_type' : 'Filter',
'op_name' : 'Filter2',
'arg_child' : 'Filter1',
'arg_predicate' : {
'type': 'SimplePredicate',
'arg_compare_index': 3,
'arg_op': "LESS_THAN",
'arg_compare_value' : time_range[1]
}
}
project = {
'op_type' : 'Project',
'op_name' : 'Project',
'arg_child' : 'Filter2',
'arg_field_list' : ["0","1"]
}
producer = {
"op_name": "Shuffle",
"op_type": "ShuffleProducer",
"arg_child": "Project",
"arg_operator_id": "op1",
"arg_pf": {
"index": [
"0",
"1"
],
'type' : 'MultiFieldHash'
}
}
fragment1 = {
'operators' : [scan, filter1, filter2, project, producer]
}
consumer = {
"op_name": "Consumer",
"op_type": "ShuffleConsumer",
"arg_operator_id": "op1",
"arg_schema": {
"column_names": [
"src",
"dst"
],
"column_types": [
"STRING_TYPE",
"STRING_TYPE"
]
}
}
dupelim = {
'op_name' : 'DupElim',
'op_type' : 'DupElim',
'arg_child' : 'Consumer'
}
insert = {
'op_type' : 'DbInsert',
'op_name' : 'Insert',
'arg_child' : 'DupElim',
'arg_overwrite_table' : True,
'relation_key' : {
'user_name' : 'public',
'program_name' : 'adhoc',
'relation_name' : 'nccdc_filtered_distinct'
}
}
fragment2 = {
'operators' : [consumer, dupelim, insert]
}
return pretty_json({
'fragments' : [fragment1, fragment2],
'logical_ra' : '???',
'raw_datalog' :
'nccdc_filtered_distinct(src,dst) :- ' +
'nccdc(src, dst, timestamp, _,_,_,_), ' +
'timestamp > 1366475761, timestamp < 136647582'
})
if __name__ == "__main__":
print generate()
|
[
"whitaker@cs.washington.edu"
] |
whitaker@cs.washington.edu
|
405adb9607642dc4250bd4817cf6125d2083c5d9
|
686f3fec1aeff15c4040d15757c5c9be90d5da5e
|
/main_parallelized.py
|
4c3c3d2e430c8028dac5442c3cb3b9a9becf1799
|
[] |
no_license
|
jendawkins/cdiff_metabolomics
|
6ef3a9b572002591e9f5ddc91daa5660698b08dd
|
0f53949b0ce762a441ed3be741d9327ec094eeca
|
refs/heads/master
| 2022-11-10T11:16:34.306032
| 2020-06-26T01:11:25
| 2020-06-26T01:11:25
| 265,290,750
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,992
|
py
|
from dataLoaderCdiff import *
from ml_methods import *
import argparse
import numpy as np
import torch
from torch import nn
import pickle
def main_parallelized(ml, lamb, zip_ixs, net, optimizer, TRAIN, TRAIN_L, epochs, weighting, regularizer, inner_dic):
y_test_vec = []
y_guess_vec = []
test_running_loss = 0
for train_index, test_index in zip_ixs:
# initialize net for each new dataset
net.apply(ml.init_weights)
X_train, X_test = TRAIN.iloc[train_index,:], TRAIN.iloc[test_index,:]
y_train, y_test = TRAIN_L[train_index], TRAIN_L[test_index]
if isinstance(test_index, int):
X_train, y_train, X_test, y_test = torch.FloatTensor(np.array(X_train)), torch.DoubleTensor(
y_train), torch.FloatTensor([np.array(X_test)]), torch.DoubleTensor([[y_test]])
else:
X_train, y_train, X_test, y_test = torch.FloatTensor(np.array(X_train)), torch.DoubleTensor(
y_train), torch.FloatTensor(np.array(X_test)), torch.DoubleTensor(y_test)
if weighting:
weights = len(y_train) / (2 * np.bincount(y_train))
criterion = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor(weights))
else:
criterion = nn.BCEWithLogitsLoss()
y_test_per_epoch = []
y_guess_per_epoch = []
running_vec = []
for epoch in range(epochs):
out, loss = ml.train_loop(X_train, y_train, net,
optimizer, criterion, lamb, regularizer)
# evaluate on test set
if epoch % 10 ==0:
test_out, test_loss, y_guess = ml.test_loop(
net, X_test, y_test, criterion)
y_test_per_epoch.append(y_test)
y_guess_per_epoch.append(y_guess)
running_vec.append(loss)
if len(running_vec) > 12:
bool_test = np.abs(running_vec[-1] - running_vec[-2])<=1e-4
# perform early stopping if greater than 50 epochs and if either loss is increasing over the past 10 iterations or auc / f1 is decreasing over the past 10 iterations
if (len(running_vec) > 12 and bool_test):
y_test_vec.append(y_test_per_epoch[-2])
y_guess_vec.append(y_guess_per_epoch[-2])
test_running_loss += test_loss
# add record of lambda and lowest loss or highest auc/f1 associated with that lambda at this epoch
break
# if len(y_test_vec) ==1:
y_test_vec.append(y_test_per_epoch[-2])
y_guess_vec.append(y_guess_per_epoch[-2])
test_running_loss += test_loss
# import pdb; pdb.set_trace()
y_guess_mat = np.concatenate(y_guess_vec)
y_pred_mat = np.argmax(y_guess_mat, 1)
if len(y_test_vec) < y_guess_mat.shape[0]:
y_test_vec = np.concatenate(y_test_vec)
f1 = sklearn.metrics.f1_score(y_test_vec,y_pred_mat)
try:
fpr, tpr, _ = roc_curve(y_test_vec, y_guess_mat[:, 1].squeeze())
except:
import pdb; pdb.set_trace()
roc_auc = auc(fpr, tpr)
if lamb not in inner_dic.keys():
inner_dic[lamb] = {}
inner_dic[lamb]['auc'] = roc_auc
inner_dic[lamb]['f1'] = f1
inner_dic[lamb]['loss'] = test_running_loss / (len(TRAIN_L)+1)
return inner_dic
if __name__ == "__main__":
# path = '/PHShome/jjd65/CDIFF/cdiff_metabolomics/outdir/'
parser = argparse.ArgumentParser()
cd = cdiffDataLoader()
cd.make_pt_dict(cd.cdiff_raw)
parser.add_argument("-dtype", "--data_type",
help="type of data", type=str)
parser.add_argument("-lambda", "--lambda_val",
help="lambda to test", type=float)
parser.add_argument("-reg", "--regularizer",
help="regularizer", type=int)
parser.add_argument("-w", "--weighting",
help="weighting", type=str)
parser.add_argument("-lr", "--learning_rate",
help="weighting", type=float)
parser.add_argument("-epoch", "--epochs",
help="weighting", type=int)
args = parser.parse_args()
if args.weighting == 'False':
weights = False
if args.weighting == 'True':
weights = True
ml = mlMethods(cd.pt_info_dict, lag=1)
data = ml.data_dict[args.data_type]
labels = ml.targets_int[args.data_type]
path = 'outputs_june25/'
import os
if not os.path.isdir(path):
os.mkdir(path)
ml.path = path
try:
with open(args.data_type + '_ixs.pkl', 'rb') as f:
ixx = pickle.load(f)
except:
ixx = ml.leave_one_out_cv(data, labels)
pickle.dump(ixx, open(path + "ixs.pkl", "wb"))
outer_loops = len(ixx)
for i in range(outer_loops):
ix_in = ixx[i]
ixtrain, ixtest = ix_in
TRAIN, TRAIN_L, TEST, TEST_L = data.iloc[ixtrain,
:], labels[ixtrain], data.iloc[ixtest, :], labels[ixtest]
name = path + args.data_type + '_' + str(args.weighting) + '_' + str(
args.regularizer) + str(args.learning_rate).replace('.', '') + str(args.epochs) + 'inner_dic.pkl'
if i !=0:
with open(name, 'rb') as f:
inner_dic = pickle.load(f)
else:
print('couldnt find dic')
inner_dic = {}
zip_ixs = ml.leave_one_out_cv(TRAIN, TRAIN_L)
net = LogRegNet(TRAIN.shape[1])
optimizer = torch.optim.RMSprop(net.parameters(), lr=args.learning_rate)
inner_dic = main_parallelized(ml, args.lambda_val, zip_ixs, net, optimizer, TRAIN,
TRAIN_L, args.epochs, weights, args.regularizer, inner_dic)
print(args.weighting)
pickle.dump(inner_dic, open(name, "wb"))
print('loop ' + str(i) +' Complete')
|
[
""
] | |
5101143cf9adf62f78c6823662968dad578cd625
|
9b7b43ff0283ac27731d17caf3cfa69d7a3a894e
|
/Modulos_propios/Sort_Json.py
|
a55d01a79e8494f1b8c72a5c12c013417720812e
|
[] |
no_license
|
eytati/Bot-Viajero
|
f97382056268af67cee5f19e10e4c38fa9d48cb2
|
551b8a83e9ecd1508c6c14df66149d33197ef465
|
refs/heads/master
| 2021-01-22T18:38:02.324525
| 2017-04-29T00:32:00
| 2017-04-29T00:32:11
| 85,096,256
| 0
| 0
| null | 2017-04-10T05:17:39
| 2017-03-15T16:32:33
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,622
|
py
|
from bson import ObjectId
from decimal import *
c = getcontext()
c.traps[FloatOperation] = True
class Sort():
def sort_list(self, list, type):
return self.sort_split(list, 0, len(list)-1, type)
def sort_split(self, list, first, last_one, type):
if first<last_one:
split = self.sort_division(list, first, last_one, type)
self.sort_split(list, first, split -1, type)
self.sort_split(list, split + 1, last_one, type)
return list
def sort_division(self, list, first, last_one, type):
compare_value = list[first].get(type)
left_part = first + 1
right_part = last_one
done = False
while not done:
value_1 = list[left_part].get(type)
while left_part <= right_part and value_1 <= compare_value:
left_part += 1
if left_part < len(list):
value_1 = list[left_part].get(type)
value_2= list[right_part].get(type)
while value_2>= compare_value and right_part >= left_part:
right_part -= 1
if right_part < len(list):
value_2 = list[right_part].get(type)
print(value_2)
if right_part < left_part:
done = True
else:
temporary = list[left_part]
list[left_part] = list[right_part]
list[right_part] = temporary
temporary = list[first]
list[first] = list[right_part]
list[right_part] = temporary
return right_part
|
[
"eytati050@hotmail.com"
] |
eytati050@hotmail.com
|
5bd0706e46f015ca95d85fbc4ceb92c1eaa899f7
|
ba402c1f8803774ecd7904410d03eb619d95e255
|
/SpiderDemo/settings.py
|
76b379eca12f2f19fc0db802d47039f842a3b30c
|
[] |
no_license
|
993739033/SpiderDemo
|
c131305a2d2207c5c1bf475fd05b966ecdb081ef
|
6678b0882bcc37a675382efaddbf726a7c5aa313
|
refs/heads/master
| 2020-05-22T14:06:11.669733
| 2019-05-13T08:21:20
| 2019-05-13T08:21:20
| 186,376,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,117
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for SpiderDemo project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'SpiderDemo'
SPIDER_MODULES = ['SpiderDemo.spiders']
NEWSPIDER_MODULE = 'SpiderDemo.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'SpiderDemo (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'SpiderDemo.middlewares.SpiderdemoSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'SpiderDemo.middlewares.SpiderdemoDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'SpiderDemo.pipelines.SpiderdemoPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"993739033"
] |
993739033
|
cd34f5a2cb4a47abb143e99ac221b392cc89bd8c
|
3d9f6e0d50daaf020bb56fe87226ee16469b9a14
|
/piconzero/parcourstest.py
|
cc1be5bdd35d41516e15e2d5c734bde54b2b0080
|
[] |
no_license
|
aureliengit/pc3
|
3fad8b7aa897f9c0a1825753d961dc6f3b0fed64
|
3d402a47bebca771a9da79bde3b28e4be51651db
|
refs/heads/master
| 2020-03-07T05:02:40.914928
| 2018-03-29T11:42:49
| 2018-03-29T11:42:49
| 127,283,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import time
import piconzero as pz
import hcsr04
pz.setInputConfig(2,0)
pz.setInputConfig(3,0)
pz.init()
hcsr04.init()
speed = 65
i=2
try:
while True:
if ((pz.readInput(2)!=1) and (pz.readInput(3)!=1) and i%2==0):
pz.stop()
time.sleep(0.1)
pz.spinRight(70)
time.sleep(0.25)
elif ((pz.readInput(2)!=1) and (pz.readInput(3)!=1) and i%2==1):
pz.stop()
time.sleep(0.1)
pz.spinLeft(70)
time.sleep(0.25)
elif ((pz.readInput(2)==1) and (pz.readInput(3)==1)):
i+=1
else:
pz.reverse(speed)
except KeyboardInterrupt:
print "Au revoir"
finally:
pz.cleanup()
hcsr04.cleanup()
|
[
"derume.aurelien@hotmail.fr"
] |
derume.aurelien@hotmail.fr
|
20f41c377812561ce60f33c3b551a357b9c374a6
|
90caec6d76d44718e74c6cfacc7c5b48bbf117f2
|
/BodyTracking/PoseModule.py
|
648e498def25dc773e34c42e3f1a5f7c09b68a52
|
[] |
no_license
|
cfdelv100/SampleProjects
|
bc002e440bd13f43d77bd3e67209ac575233388c
|
0ddda28d831cce65ee3ab581aa7b497b3573a811
|
refs/heads/main
| 2023-05-11T13:39:13.487180
| 2021-06-03T00:38:05
| 2021-06-03T00:38:05
| 350,955,935
| 0
| 0
| null | 2021-06-03T00:38:06
| 2021-03-24T05:25:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
import cv2
import mediapipe as mp
import time
class poseDetector():
def __init__(self, mode=False, upBody=False, smooth=True, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.upBody = upBody
self.smooth = smooth
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(self.mode, self.upBody, self.smooth, self.detectionCon, self.trackCon)
def findPose(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)
return img
def findPosition(self, img, draw=True):
lmList = []
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
# print(id, lm)
cx, cy = int(lm.x * w), int(lm.y * h)
lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return lmList
def main():
cap = cv2.VideoCapture('PoseVideos/charlesnguyen.mp4')
previousTime = 0
detector = poseDetector()
while True:
ret, img = cap.read()
img = detector.findPose(img)
lmList = detector.findPosition(img, draw=False)
if len(lmList) != 0:
print(len(lmList[4]))
# cv2.circle(img, (lmList[4][1], lmList[4][2]), 15, (0, 0, 255), cv2.FILLED) # Tracked position of body 4.
currentTime = time.time()
fps = 1 / (currentTime - previousTime)
previousTime = currentTime
cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(10)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
cfdelv100.noreply@github.com
|
48cc54e2ed38c28567a4b36ff7d62f57959f1c2f
|
3776b41b59ae354da984afd0c765627e0a05bfb1
|
/pathredux.py
|
a18e7956d396685a723cf291929196dcb265e9e8
|
[] |
no_license
|
jamesstocktonj1/Maze-Solving
|
118906d36299cddffcd2149dd6a64aa8ac790894
|
2279a1f9502d98ddfdf1dc970097ace1cfef8162
|
refs/heads/main
| 2023-06-29T06:49:12.093941
| 2021-07-18T15:41:09
| 2021-07-18T15:41:09
| 385,873,611
| 1
| 0
| null | 2021-07-18T15:41:10
| 2021-07-14T08:49:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,843
|
py
|
#path reduction algorithm
#helps reduce the complexity of the maze for the solving algorithm to be run next
#James Stockton
#12/07/21
from pathfinding import *
def deleteToNode(maze_grid, x, y):
#array of the different squares to delete
listToDelete = [[x, y]]
atNode = False
#repeats until finds a node
while not atNode:
#get the previous coord
prevSquare = listToDelete[len(listToDelete) - 1]
nextSquares = findNextPaths(maze_grid, prevSquare[0], prevSquare[1])
#if not the first node (dead end) then delete previous square to stop repeating
if len(listToDelete) > 1:
if listToDelete[len(listToDelete) - 2] in nextSquares:
nextSquares.remove(listToDelete[len(listToDelete) - 2])
#if node is not caught then this error will show up
if len(nextSquares) > 1:
print("REDUCTION ERROR\nThere are more than one squares to reduce")
#if the next square is a node then return
if numNewPaths(maze_grid, nextSquares[0][0], nextSquares[0][1]) > 2:
return listToDelete
#add the next square to the list to delete
else:
listToDelete.append(nextSquares[0])
def path_reduction(maze_grid):
for y in range(1, len(maze_grid) - 1):
for x in range(1, len(maze_grid[0]) - 1):
#if dead end (and coord x, y is not a wall) then start reduction
if (numNewPaths(maze_grid, x, y) == 1) and (maze_grid[y][x] == 1):
delSquares = deleteToNode(maze_grid, x, y)
if len(delSquares) != 0:
for sq in delSquares:
#set square to delete to 0 (black)
maze_grid[sq[1]][sq[0]] = 0
return maze_grid
|
[
"james.stocktonj@gmail.com"
] |
james.stocktonj@gmail.com
|
59e2b6b8e27f96042528b420bb574c83bd95a482
|
a871d05b08d6d1a6f0e8d7a9e4295d76713a838a
|
/app/core/tests/test_models.py
|
bc87b48c462f159da9e3cda8a3ffe6ede167ec6c
|
[
"MIT"
] |
permissive
|
gorzelakp/recipe-app-api
|
b4581b1e6c6886c638a09a9731bfe643089bbee3
|
c20099f70d9cc07f0e683bb614f99260de507416
|
refs/heads/main
| 2023-03-08T07:33:00.246403
| 2021-02-25T20:31:29
| 2021-02-25T20:31:29
| 338,833,994
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,281
|
py
|
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@example.com', password='123456'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'email@example.com'
password = '123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'email@EXAMPLE.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(
'email@example.com',
'123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
tag = models.Tag.objects.create(
user=sample_user(),
name='Name')
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Jakis tytul potrawy',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Sprawdzanie czy obraz jest zapisany w poprawnej lokacji"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
|
[
"gorzelak92@gmail.com"
] |
gorzelak92@gmail.com
|
0508696cc1419df1700fd7e46faafa7ec029f799
|
a13d208eaf40c8370e2e0bbdd03b54a31fd31f47
|
/qaysen/urls.py
|
19b2a8bb7badbff324dda857e0d6aee1e31e8b12
|
[] |
no_license
|
Qaysen/SitioWeb
|
135450cabd235327073df0d3eb17c060c09b0b39
|
4376ebb8e45ce779200167c6991e0f2ca3af68b8
|
refs/heads/master
| 2021-01-18T15:15:17.292907
| 2013-01-10T21:30:15
| 2013-01-10T21:30:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'sitioweb.views.inicio'),
# url(r'^qaysen/', include('qaysen.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
[
"edwinpgm@outlook.com"
] |
edwinpgm@outlook.com
|
97f335343cd2e6bfe7a48eeec8b2db867c671f1b
|
23c873a248b018426d4f49a91d88805e8af7eecb
|
/CatLaser/app/admin.py
|
6376b36286d36ec8bcf0fe1d47b92f143529ef17
|
[] |
no_license
|
SimonF89/catLaser
|
77e60394fe5dfa6639e68fe2a50edc52c134b3ce
|
791dea3fed72e9e6dbb8f976c871b4421b114127
|
refs/heads/master
| 2021-10-18T03:56:37.690462
| 2019-02-13T19:22:35
| 2019-02-13T19:22:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,903
|
py
|
from django.contrib import admin
from .models import Point, Edge, Playground, PointTypes, LaserPosition
from .forms import EdgeForm
class PointInline(admin.TabularInline):
model = Point
extra = 0
min_num = 3
def get_queryset(self, request):
qs = super(PointInline, self).get_queryset(request)
return qs.filter(type=PointTypes.corner) | qs.filter(type=PointTypes.run_point)
class EdgeInline(admin.TabularInline):
model = Edge
readonly_fields = ["A", "B", "M", "Vr", "Nr"]
extra = 0
class LaserPositionInline(admin.TabularInline):
model = LaserPosition
class PlaygroundAdmin(admin.ModelAdmin):
search_fields = ['name']
readonly_fields = ('active','minX','minY','maxX','maxY')
list_display = ('name','active','minX','minY','maxX','maxY')
inlines = [LaserPositionInline,PointInline,EdgeInline]
def response_add(self, request, playground_obj):
playground_obj.customInit(playground_obj)
return super(PlaygroundAdmin, self).response_add(request, playground_obj)
def response_change(self, request, playground_obj):
playground_obj.customInit(playground_obj)
return super(PlaygroundAdmin, self).response_change(request, playground_obj)
class EdgeAdmin(admin.ModelAdmin):
form = EdgeForm
# Registered models
admin.site.register(Point)
admin.site.register(LaserPosition)
admin.site.register(Edge, EdgeAdmin)
admin.site.register(Playground, PlaygroundAdmin)
#http://igorsobreira.com/2011/02/12/change-object-after-saving-all-inlines-in-django-admin.html
#class FooAdmin(admin.ModelAdmin):
# inlines = [RelatedInline]
# def response_add(self, request, new_object):
# obj = self.after_saving_model_and_related_inlines(new_object)
# return super(FooAdmin, self).response_add(request, obj)
# def response_change(self, request, obj):
# obj = self.after_saving_model_and_related_inlines(obj)
# return super(FooAdmin, self).response_change(request, obj)
# def after_saving_model_and_related_inlines(self, obj):
# print obj.related_set.all()
# # now we have what we need here... :)
# return obj
#class StepInline(admin.TabularInline):
# """Choice objects can be edited inline in the Poll editor."""
# model = Step
# extra = 3
#
#
#class TodoAdmin(admin.ModelAdmin):
# """Definition of the Poll editor."""
# fieldsets = [
# (None, {'fields': ['name', 'description', 'status']}),
# ('Date information', {'fields': ['creationDate']}),
# ('Additional Information', {'fields': ['author', 'executor', 'reviewer']})
# ]
# inlines = [StepInline]
# list_display = ('name', 'description', 'status', 'creationDate', 'author', 'executor', 'reviewer')
# list_filter = ['creationDate']
# search_fields = ['name']
# date_hierarchy = 'creationDate'
#
#admin.site.register(Todo, TodoAdmin)
|
[
"fritz@fzi.de"
] |
fritz@fzi.de
|
a9fbb33df727682c11e1b15540ae7077153dae8b
|
0646e6797453deefc17529df135c95ff9cad24a0
|
/Utoutou.py
|
012c489d9d0c8e0ea574c62b63584705339cac81
|
[] |
no_license
|
CzvZzz/Utoutou
|
82833449e0c79af99f06493ae5979c033ae4b746
|
af34794c247a0b5c3a10615fa6bcdab2e43ae5b2
|
refs/heads/master
| 2023-01-18T18:53:29.983920
| 2020-11-24T05:31:23
| 2020-11-24T05:31:23
| 315,527,445
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,227
|
py
|
import os
import time
import functions as ft
import random
import shutil
def run_app(size, *houzhui):
old_dirsize = []
base_disklist = []
new_disklist = []
if houzhui:
houzhui = houzhui[0]
#size = 10
#houzhui = '.txt'
base_disklist = ft.get_disklist()
# print(base_disklist)
while(1):
new_disklist = ft.get_disklist()
# print(new_disklist)
if len(new_disklist) > len(base_disklist):
for usb in new_disklist[len(base_disklist):]:
file_save = 'C:\\file_save-' + str(random.randint(0, 1000)) # 保存目录
# print('检测到U盘')
new_dirsize = ft.get_dirsize((usb + '\\'))
# print(new_dirsize)
if new_dirsize not in old_dirsize:
print('开始复制......')
ft.usb_copy((usb + '\\'), file_save, size, *houzhui)
old_dirsize.append(new_dirsize)
print('复制完成')
else:
print('U盘无变化')
else:
print('暂时没U盘')
time.sleep(3)
if __name__ == "__main__":
run_app()
|
[
"2225627922@qq.com"
] |
2225627922@qq.com
|
183457489b00900ae147a6eda4f919cc44b791ac
|
85e9ef01ed21fedcd84590bf78d6bd7d3128c2c5
|
/src/boxme_user/tests.py
|
84abb83f816672151e2ce21a1b7fd714ddd60667
|
[
"BSD-2-Clause"
] |
permissive
|
vituocgia/boxme-users
|
fcf0d00f18d5466103717fc815d9d05f5354f82e
|
0e8238b7852e697643a35c8e306d63446f27a6d2
|
refs/heads/master
| 2020-03-08T20:27:50.548747
| 2018-04-06T10:47:45
| 2018-04-06T10:47:45
| 128,382,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,564
|
py
|
"""BoxmeUser tests."""
from mock import patch
import os
import re
from unittest import skipIf, skipUnless
import django
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.core import mail
from django.core import management
from django.core.urlresolvers import reverse
from django.db import connection
from django.forms.fields import Field
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
from .forms import BoxmeUserChangeForm, BoxmeUserCreationForm
try:
from django.contrib.auth.middleware import SessionAuthenticationMiddleware
except ImportError:
# Only available from Django 1.7, ignore the tests otherwise
SessionAuthenticationMiddleware = None
class UserTest(TestCase):
user_email = 'newuser@localhost.local'
user_password = '1234'
def create_user(self):
"""Create and return a new user with self.user_email as login and self.user_password as password."""
return get_user_model().objects.create_user(self.user_email, self.user_password)
def test_user_creation(self):
# Create a new user saving the time frame
right_now = timezone.now().replace(microsecond=0) # MySQL doesn't store microseconds
with patch.object(timezone, 'now', return_value=right_now):
self.create_user()
# Check user exists and email is correct
self.assertEqual(get_user_model().objects.all().count(), 1)
self.assertEqual(get_user_model().objects.all()[0].email, self.user_email)
# Check date_joined and last_login dates
self.assertEqual(get_user_model().objects.all()[0].date_joined, right_now)
self.assertEqual(get_user_model().objects.all()[0].last_login, right_now)
# Check flags
self.assertTrue(get_user_model().objects.all()[0].is_active)
self.assertFalse(get_user_model().objects.all()[0].is_staff)
self.assertFalse(get_user_model().objects.all()[0].is_superuser)
def test_user_get_full_name(self):
user = self.create_user()
self.assertEqual(user.get_full_name(), self.user_email)
def test_user_get_short_name(self):
user = self.create_user()
self.assertEqual(user.get_short_name(), self.user_email)
def test_email_user(self):
# Email definition
subject = "Email Subject"
message = "Email Message"
from_email = 'from@normal.com'
user = self.create_user()
# Test that no message exists
self.assertEqual(len(mail.outbox), 0)
# Send test email
user.email_user(subject, message, from_email)
# Test that one message has been sent
self.assertEqual(len(mail.outbox), 1)
# Verify that the email is correct
self.assertEqual(mail.outbox[0].subject, subject)
self.assertEqual(mail.outbox[0].body, message)
self.assertEqual(mail.outbox[0].from_email, from_email)
self.assertEqual(mail.outbox[0].to, [user.email])
def test_email_user_kwargs(self):
# valid send_mail parameters
kwargs = {
"fail_silently": False,
"auth_user": None,
"auth_password": None,
"connection": None,
}
user = get_user_model()(email='foo@bar.com')
user.email_user(
subject="Subject here",
message="This is a message", from_email="from@domain.com", **kwargs)
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that test email contains the correct attributes:
message = mail.outbox[0]
self.assertEqual(message.subject, "Subject here")
self.assertEqual(message.body, "This is a message")
self.assertEqual(message.from_email, "from@domain.com")
self.assertEqual(message.to, [user.email])
class UserManagerTest(TestCase):
def test_create_user(self):
email_lowercase = 'normal@normal.com'
user = get_user_model().objects.create_user(email_lowercase)
self.assertEqual(user.email, email_lowercase)
self.assertFalse(user.has_usable_password())
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
def test_create_superuser(self):
email_lowercase = 'normal@normal.com'
password = 'password1234$%&/'
user = get_user_model().objects.create_superuser(email_lowercase, password)
self.assertEqual(user.email, email_lowercase)
self.assertTrue(user.check_password, password)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_user_creation_is_active(self):
# Create deactivated user
email_lowercase = 'normal@normal.com'
password = 'password1234$%&/'
user = get_user_model().objects.create_user(email_lowercase, password, is_active=False)
self.assertFalse(user.is_active)
def test_user_creation_is_staff(self):
# Create staff user
email_lowercase = 'normal@normal.com'
password = 'password1234$%&/'
user = get_user_model().objects.create_user(email_lowercase, password, is_staff=True)
self.assertTrue(user.is_staff)
def test_create_user_email_domain_normalize_rfc3696(self):
# According to http://tools.ietf.org/html/rfc3696#section-3
# the "@" symbol can be part of the local part of an email address
returned = get_user_model().objects.normalize_email(r'Abc\@DEF@EXAMPLE.com')
self.assertEqual(returned, r'Abc\@DEF@example.com')
def test_create_user_email_domain_normalize(self):
returned = get_user_model().objects.normalize_email('normal@DOMAIN.COM')
self.assertEqual(returned, 'normal@domain.com')
def test_create_user_email_domain_normalize_with_whitespace(self):
returned = get_user_model().objects.normalize_email('email\ with_whitespace@D.COM')
self.assertEqual(returned, 'email\ with_whitespace@d.com')
def test_empty_username(self):
self.assertRaisesMessage(
ValueError,
'The given email must be set',
get_user_model().objects.create_user, email=''
)
@skipIf(django.VERSION < (1, 7, 0), 'Migrations not available in this Django version')
class MigrationsTest(TestCase):
def test_makemigrations_no_changes(self):
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
with patch('sys.stdout', new_callable=StringIO) as mock:
management.call_command('makemigrations', 'boxme_user', dry_run=True)
self.assertEqual(mock.getvalue(), 'No changes detected in app \'boxme_user\'\n')
@skipUnless(django.VERSION[:2] == (1, 7), 'Monkey patch only applied to Django 1.7')
def test_monkey_patch_model_field(self):
field_last_login = get_user_model()._meta.get_field('last_login')
self.assertTrue(field_last_login.null)
self.assertTrue(field_last_login.blank)
@skipUnless(django.VERSION[:2] == (1, 7), 'Monkey patch only applied to Django 1.7')
def test_monkey_patch_db_column(self):
table_name = get_user_model()._meta.db_table
cursor = connection.cursor()
table_fields = connection.introspection.get_table_description(cursor, table_name)
field = next(field for field in table_fields if field.name == 'last_login') # pragma: no cover
self.assertTrue(field.null_ok)
@skipUnless(django.VERSION[:2] == (1, 7), 'Monkey patch only applied to Django 1.7')
def test_monkey_patch_side_effects(self):
# Check the parent model isn't affected from the monkey patch
from django.contrib.auth.models import AbstractBaseUser
field_last_login = AbstractBaseUser._meta.get_field('last_login')
self.assertFalse(field_last_login.null)
self.assertFalse(field_last_login.blank)
@skipIf(SessionAuthenticationMiddleware is None, "SessionAuthenticationMiddleware not available in this version")
class TestSessionAuthenticationMiddleware(TestCase):
def setUp(self):
self.user_email = 'test@example.com'
self.user_password = 'test_password'
self.user = get_user_model().objects.create_user(
self.user_email,
self.user_password)
self.middleware_auth = AuthenticationMiddleware()
self.middleware_session_auth = SessionAuthenticationMiddleware()
self.assertTrue(self.client.login(
username=self.user_email,
password=self.user_password,
))
self.request = HttpRequest()
self.request.session = self.client.session
def test_changed_password_doesnt_invalidate_session(self):
# Changing a user's password shouldn't invalidate the session if session
# verification isn't activated.
session_key = self.request.session.session_key
self.middleware_auth.process_request(self.request)
self.middleware_session_auth.process_request(self.request)
self.assertIsNotNone(self.request.user)
self.assertFalse(self.request.user.is_anonymous())
# After password change, user should remain logged in.
self.user.set_password('new_password')
self.user.save()
self.middleware_auth.process_request(self.request)
self.middleware_session_auth.process_request(self.request)
self.assertIsNotNone(self.request.user)
self.assertFalse(self.request.user.is_anonymous())
self.assertEqual(session_key, self.request.session.session_key)
def test_changed_password_invalidates_session_with_middleware(self):
session_key = self.request.session.session_key
with self.modify_settings(MIDDLEWARE_CLASSES={'append': ['django.contrib.auth.middleware.SessionAuthenticationMiddleware']}):
# After password change, user should be anonymous
self.user.set_password('new_password')
self.user.save()
self.middleware_auth.process_request(self.request)
self.middleware_session_auth.process_request(self.request)
self.assertIsNotNone(self.request.user)
self.assertTrue(self.request.user.is_anonymous())
# session should be flushed
self.assertNotEqual(session_key, self.request.session.session_key)
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class BoxmeUserCreationFormTest(TestCase):
def setUp(self):
get_user_model().objects.create_user('testclient@example.com', 'test123')
def test_user_already_exists(self):
data = {
'email': 'testclient@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = BoxmeUserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["email"].errors,
[force_text(form.error_messages['duplicate_email'])])
def test_invalid_data(self):
data = {
'email': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = BoxmeUserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["email"].errors,
[_('Enter a valid email address.')])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'email': 'testclient@example.com',
'password1': 'test123',
'password2': 'test',
}
form = BoxmeUserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'email': 'testclient@example.com'}
form = BoxmeUserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = BoxmeUserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'email': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = BoxmeUserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<%s: jsmith@example.com>' % get_user_model().__name__)
self.assertIsNotNone(u.pk)
def test_success_without_commit(self):
# The success case, but without saving the user instance to the db.
data = {
'email': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = BoxmeUserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save(commit=False)
self.assertEqual(repr(u), '<%s: jsmith@example.com>' % get_user_model().__name__)
self.assertIsNone(u.pk, None)
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class BoxmeUserChangeFormTest(TestCase):
def setUp(self):
user = get_user_model().objects.create_user('testclient@example.com')
user.password = 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161'
user.save()
get_user_model().objects.create_user('empty_password@example.com')
user_unmanageable = get_user_model().objects.create_user('unmanageable_password@example.com')
user_unmanageable.password = '$'
user_unmanageable.save()
user_unknown = get_user_model().objects.create_user('unknown_password@example.com')
user_unknown.password = 'foo$bar'
user_unknown.save()
def test_username_validity(self):
user = get_user_model().objects.get(email='testclient@example.com')
data = {'email': 'not valid'}
form = BoxmeUserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors,
[_('Enter a valid email address.')])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# BoxmeUserChangeForm.
class MyUserForm(BoxmeUserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(BoxmeUserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = get_user_model().objects.get(email='empty_password@example.com')
user.set_unusable_password()
user.save()
form = BoxmeUserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = get_user_model().objects.get(email='empty_password@example.com')
form = BoxmeUserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = get_user_model().objects.get(email='unmanageable_password@example.com')
form = BoxmeUserChangeForm(instance=user)
self.assertIn(
_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = get_user_model().objects.get(email='unknown_password@example.com')
form = BoxmeUserChangeForm(instance=user)
self.assertIn(
_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"""The change form does not return the password value."""
# Use the form to construct the POST data
user = get_user_model().objects.get(email='testclient@example.com')
form_for_data = BoxmeUserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = BoxmeUserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = get_user_model().objects.get(email='testclient@example.com')
form = BoxmeUserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
class BoxmeUserAdminTest(TestCase):
def setUp(self):
self.user_email = 'test@example.com'
self.user_password = 'test_password'
self.user = get_user_model().objects.create_superuser(
self.user_email,
self.user_password)
if settings.AUTH_USER_MODEL == "boxme_user.BoxmeUser":
self.app_name = "boxme_user"
self.model_name = "BoxmeUser"
self.model_verbose_name = "user"
self.model_verbose_name_plural = "Users"
if django.VERSION[:2] < (1, 7):
self.app_verbose_name = "Custom_User"
else:
self.app_verbose_name = "Custom User"
if settings.AUTH_USER_MODEL == "test_boxme_user_subclass.MyCustomBoxmeUser":
self.app_name = "test_boxme_user_subclass"
self.model_name = "mycustomBoxmeUser"
self.model_verbose_name = "MyCustomBoxmeUserVerboseName"
self.model_verbose_name_plural = "MyCustomBoxmeUserVerboseNamePlural"
if django.VERSION[:2] < (1, 7):
self.app_verbose_name = "Test_Custom_User_Subclass"
else:
self.app_verbose_name = "Test Custom User Subclass"
def test_url(self):
self.assertTrue(self.client.login(
username=self.user_email,
password=self.user_password,
))
response = self.client.get(reverse("admin:app_list", args=(self.app_name,)))
self.assertEqual(response.status_code, 200)
def test_app_name(self):
self.assertTrue(self.client.login(
username=self.user_email,
password=self.user_password,
))
response = self.client.get(reverse("admin:app_list", args=(self.app_name,)))
self.assertEqual(response.context['app_list'][0]['name'], self.app_verbose_name)
def test_model_name(self):
self.assertTrue(self.client.login(
username=self.user_email,
password=self.user_password,
))
response = self.client.get(reverse("admin:%s_%s_changelist" % (self.app_name, self.model_name)))
self.assertEqual(force_text(response.context['title']), "Select %s to change" % self.model_verbose_name)
def test_model_name_plural(self):
self.assertTrue(self.client.login(
username=self.user_email,
password=self.user_password,
))
response = self.client.get(reverse("admin:app_list", args=(self.app_name,)))
self.assertEqual(force_text(response.context['app_list'][0]['models'][0]['name']), self.model_verbose_name_plural)
def test_user_change_password(self):
self.assertTrue(self.client.login(
username=self.user_email,
password=self.user_password,
))
user_change_url = reverse('admin:%s_%s_change' % (self.app_name, self.model_name), args=(self.user.pk,))
if django.VERSION[:2] < (1, 8):
# Since reverse() does not exist yet, create path manually
password_change_url = user_change_url + 'password/'
else:
password_change_url = reverse('admin:auth_user_password_change', args=(self.user.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using <a href="([^"]*)">this form</a>',
force_text(response.content)
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url)
)
# Test url is correct.
self.assertEqual(
self.client.get(password_change_url).status_code,
200,
)
|
[
"dotiendiep@gmail.com"
] |
dotiendiep@gmail.com
|
06a39520122965e6fb78d95ec197c866638a4c58
|
ebdd36e75bccc8c3f80e231b4b0409c2cceaea20
|
/UPTEES/pmids.py
|
b1ca3f99fc6e24d9f773b1b0c79b0d827f5f54b9
|
[] |
no_license
|
MaximumEntropy/UPSITE
|
c9865dd675fe8dfd821de9d6af84aa74f224605d
|
77b29ce00c3e0b35d566b6935871e4dda184ec79
|
refs/heads/master
| 2021-01-10T16:19:42.139505
| 2015-10-28T07:32:02
| 2015-10-28T07:32:02
| 45,011,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,193
|
py
|
'''
Created on Mar 1, 2014
@author: Adam
'''
# from pyquery import PyQuery as pq
import urllib2
import xml.etree.ElementTree as ET
import urllib
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
def get_xml(url):
if url:
file = urllib.urlopen(url)
xml = file.read()
file.close()
return xml
def make_search_url(base_URL, q, articles, addition):
max_papers = "&retmax=%d" % articles
title_abstract_add = "[tiab]"
search_url_add = "esearch.fcgi?db=pubmed&term=(%s)" % addition
url = base_URL + search_url_add + max_papers
return url
def get_ID_list(xml, articles):
try:
root = ET.fromstring(xml)
ID_List_ofElements = root.findall("./IdList/Id")
ids = []
for element in ID_List_ofElements:
singleID_string = ET.tostring(element, method='text')
singleID_string_stripped = singleID_string.replace("\n", "")
ids.append(singleID_string_stripped)
except AttributeError:
ids = []
print("No Papers with your query were found on pubmed")
sliced_id_list = ids[:articles]
return sliced_id_list
def make_fetch_url(base_URL, get_abstract_portion_URL, ids, articles):
if ids["papers_to_download"]:
max_papers = "&retmax=%d" % articles
fetch_id_string = ",".join(ids["papers_to_download"])
fetch_url_add = "efetch.fcgi?db=pubmed&id=%s" % fetch_id_string
full_url = base_URL + fetch_url_add + get_abstract_portion_URL + max_papers
return full_url
else:
max_papers = "&retmax=%d" % articles
fetch_id_string = ",".join(ids["papers_to_download"])
fetch_url_add = "efetch.fcgi?db=pubmed&id=%s" % fetch_id_string
full_url = base_URL + fetch_url_add + get_abstract_portion_URL + max_papers
return None
def get_info_from_docs_xml(xml, ids):
root = ET.fromstring(xml)
def findall(whattofind): # closure function -- http://en.wikipedia.org/wiki/Closure_%28computer_programming%29
listofelements = []
for b in root.findall(whattofind):
c = b.text
if isinstance(c, unicode):
c = c.encode('ascii', 'ignore') # Note: ignores unicode, does not keep unicode letters
listofelements.append(c)
return listofelements
id_list = findall(".//ArticleId[@IdType='pubmed']")
title_list = findall(".//ArticleTitle")
abstract_list = findall(".//AbstractText")
authors_list = []
return_dict = {"fetched_id_list" : id_list, "title_list":title_list, "abstract_list":abstract_list, "authors_list": authors_list}
return return_dict
def get_info_from_PubMed(q, num_articles, addition): # Creates URL to search PubMed
base_URL = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
get_abstract_portion_URL = "&rettype=abstract"
search_url = make_search_url(base_URL,q, num_articles, addition)
if len(search_url) > 2000:
print '---------------------URL TOO LONG-----------------------------'
return_dict = {}
return return_dict
id_xml_as_String = get_xml(search_url)
full_ID_List = get_ID_list(id_xml_as_String, num_articles)
return full_ID_List
def main(q, num_articles, q_search_string, evaluation_mode):
if evaluation_mode == 'yes':
id_list = []
elif evaluation_mode == 'no':
id_list = get_info_from_PubMed(q, num_articles, q_search_string)
return id_list
if __name__=="__main__":
from optparse import OptionParser
optparser = OptionParser(description="Get XML from PubMed")
optparser.add_option("-q", "--query1", default='TERT', dest="q1", help="query1")
optparser.add_option("-w", "--query2", default='MDM2', dest="q2", help="query2")
optparser.add_option("-n", "--numpapers", default=2, dest="articles", help="Number of Pubmed Papers")
optparser.add_option("-o", "--output", default="/home/ubuntu/Documents/upsite/protein.txt", dest="output", help="output directory")
(options, args) = optparser.parse_args()
main(options.q1, options.q2, options.articles)
|
[
"sandeeps@andrew.cmu.edu"
] |
sandeeps@andrew.cmu.edu
|
485b8b9c841c27300eb40533324e9c12738f4376
|
48e81d5d338af60abf907bc6416cff74c3e771c8
|
/bokeh_app/myapp.py
|
b1b21c7bd45bb787c66743c35f276990d9264820
|
[] |
no_license
|
peterrayshen/monkey-reach
|
195db88f933da39783d484278c46ac058e8df6ff
|
a7608776c49b0c16975e81f4b895f4558fd0d9dc
|
refs/heads/master
| 2020-04-15T19:58:27.458913
| 2019-01-10T03:25:05
| 2019-01-10T03:25:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,752
|
py
|
from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox, column
from bokeh.models import ColumnDataSource, Span
from bokeh.models.widgets import Slider, Panel, Tabs
from bokeh.plotting import figure
import pandas as pd
#rectangle sizes for spike raster plot
rect_width = 0.003
rect_height = 0.7
#session names, for example 'mt_s1' is Monkey T, session 1
sessions = ['mm_s1', 'mt_s1', 'mt_s2', 'mt_s3']
session_exp_data = {} # session experiment data
all_session_plots = {}
# import data into dictionary
for name in sessions:
session_data = dict()
session_data['trials_info'] = pd.read_pickle('data/{}/trials_info.pickle'.format(name)) #includes trial data such as target locations
session_data['cursor_pos'] = pd.read_pickle('data/{}/cursor_pos.pickle'.format(name)) # dataframe where each row is [time(1000Hz), x_pos, y_pos]
session_data['pop_rate'] = pd.read_pickle('data/{}/f_rate.pickle'.format(name)) # dataframe where each row is [time(10Hz), population rate]
session_data['burst_prob'] = pd.read_pickle('data/{}/burst_prob.pickle'.format(name)) # dataframe where each row is [time(10Hz), burst probability]
session_data['spikes'] = pd.read_pickle('data/{}/spikes.pickle'.format(name)) # dataframe where each row is [time(1000Hz), neuron1_spike, neuron2_spike, neuron3_spike .....] ex [150, 0, 1, 0, 0, 0, 0, 1......] if 1, then neuron spiked during that time
session_exp_data[name] = session_data
# listener for slider that updates time in experiment
def update_time(attrname, old, new):
curr_session = sessions[tabs.active]
# get plot objects
curr_time_slider = all_session_plots[curr_session]['time_slider']
curr_source_pos = all_session_plots[curr_session]['source_pos']
curr_source_spikes = all_session_plots[curr_session]['source_spikes']
curr_source_pop_rate = all_session_plots[curr_session]['source_pop_rate']
curr_source_burst_prob = all_session_plots[curr_session]['source_burst_prob']
curr_pop_rate_vline = all_session_plots[curr_session]['pop_rate_vline']
curr_burst_vline = all_session_plots[curr_session]['burst_vline']
curr_spike_raster_vline = all_session_plots[curr_session]['spike_raster_vline']
# get plot info
curr_pos_df = session_exp_data[curr_session]['cursor_pos']
curr_rate_df = session_exp_data[curr_session]['pop_rate']
curr_burst_df = session_exp_data[curr_session]['burst_prob']
curr_spikes = session_exp_data[curr_session]['spikes']
# Get the current slider values
curr_time = curr_time_slider.value
#update cursor position
new_pos = {}
x_pos = curr_pos_df.loc[curr_time]['x']
y_pos = curr_pos_df.loc[curr_time]['y']
new_pos['x'] = [x_pos, ]
new_pos['y'] = [y_pos, ]
curr_source_pos.data = new_pos
# update population rate plot
new_pop_rate = dict()
new_pop_rate['x'] = curr_rate_df.loc[curr_time-2000:curr_time+2000].index.tolist()
new_pop_rate['y'] = curr_rate_df.loc[curr_time-2000:curr_time+2000].tolist()
curr_source_pop_rate.data = new_pop_rate
#update burst probability plot
new_burst_prob = dict()
new_burst_prob['x'] = curr_burst_df.loc[curr_time-2000:curr_time+2000].index.tolist()
new_burst_prob['y'] = curr_burst_df.loc[curr_time-2000:curr_time+2000].tolist()
curr_source_burst_prob.data = new_burst_prob
new_spike_raster = dict()
# update spike raster plot
all_units = []
curr_spikes_in_range = curr_spikes.loc[curr_time-2000:curr_time+2000]
for e, col in enumerate(curr_spikes_in_range.columns):
unit_spikes = curr_spikes_in_range.iloc[:, e]
unit_spikes = unit_spikes[unit_spikes == 1]
all_units.append(unit_spikes * (e + 1))
all_units_series = pd.concat(all_units)
new_spike_raster['x'] = all_units_series.index
new_spike_raster['y'] = all_units_series
curr_source_spikes.data = new_spike_raster
# update vertical lines on plots
curr_pop_rate_vline.location = curr_time
curr_burst_vline.location = curr_time
curr_spike_raster_vline.location = curr_time
# listener for slider that updates trial #
def update_trial(attrname, old, new):
curr_session = sessions[tabs.active]
# get necessary data and plot objects
curr_time_slider = all_session_plots[curr_session]['time_slider']
curr_trials_slider = all_session_plots[curr_session]['trials_slider']
curr_trials_info = session_exp_data[curr_session]['trials_info']
curr_source_targets = all_session_plots[curr_session]['source_targets']
trial_index = curr_trials_slider.value - 1
curr_start_time = curr_trials_info.loc[trial_index, 'start']
curr_end_time = curr_trials_info.loc[trial_index, 'end']
# update current time slider to match experiment duration
curr_time_slider.value = round(curr_start_time*1000)
curr_time_slider.start = round(curr_start_time*1000)
curr_time_slider.end = round(curr_end_time*1000)
#update target positions
new_targets = dict()
target_x_cols = ['target1_x', 'target2_x', 'target3_x', 'target4_x']
target_y_cols = ['target1_y', 'target2_y', 'target3_y', 'target4_y']
target_x_list = curr_trials_info.loc[trial_index, target_x_cols].tolist()
target_y_list = curr_trials_info.loc[trial_index, target_y_cols].tolist()
new_targets['x'] = target_x_list
new_targets['y'] = target_y_list
curr_source_targets.data = new_targets
# plot initial data
for session_name in sessions:
session_plot_data = {}
curr_exp_data = session_exp_data[session_name]
curr_pos_df = curr_exp_data['cursor_pos']
curr_rate_df = curr_exp_data['pop_rate']
curr_burst_df = curr_exp_data['burst_prob']
curr_spikes = curr_exp_data['spikes']
start_time = curr_exp_data['trials_info'].iloc[0]['start']
start_time = int(1000*start_time)
end_time = curr_exp_data['trials_info'].iloc[0]['end']
end_time = int(1000 * end_time)
num_trials = curr_exp_data['trials_info'].shape[0]
all_units = []
for e, col in enumerate(curr_spikes.columns):
unit_spikes = curr_spikes.loc[start_time:end_time].iloc[:, e]
unit_spikes = unit_spikes[unit_spikes == 1]
all_units.append(unit_spikes * (e + 1))
all_units_series = pd.concat(all_units)
spike_raster_x = all_units_series.index
spike_raster_y = all_units_series
target_x_cols = ['target1_x', 'target2_x', 'target3_x', 'target4_x']
target_y_cols = ['target1_y', 'target2_y', 'target3_y', 'target4_y']
#initial target locations
targets_x = curr_exp_data['trials_info'].iloc[0][target_x_cols].tolist()
targets_y = curr_exp_data['trials_info'].iloc[0][target_y_cols].tolist()
#initial cursor location
cursor_x = curr_exp_data['cursor_pos'].loc[start_time]['x']
cursor_y = curr_exp_data['cursor_pos'].loc[start_time]['y']
#burst prob and firing rate timestamps and values
ts_frate = curr_rate_df.loc[start_time - 2000:start_time + 2000].index.tolist()
values_frate = curr_rate_df.loc[start_time - 2000:start_time + 2000].tolist()
ts_burst = curr_burst_df.loc[start_time - 2000:start_time + 2000].index.tolist()
values_burst = curr_burst_df.loc[start_time - 2000:start_time + 2000].tolist()
# Set up data sources
source_targets = ColumnDataSource(data=dict(x=targets_x, y=targets_y))
source_pos = ColumnDataSource(data=dict(x=[cursor_x, ], y=[cursor_y, ]))
source_pop_rate = ColumnDataSource(data=dict(x=ts_frate, y=values_frate))
source_burst_prob = ColumnDataSource(data=dict(x=ts_burst, y=values_burst))
source_spike_raster = ColumnDataSource(data=dict(x=spike_raster_x, y=spike_raster_y))
# Set up plots
plot_pos = figure(plot_height=500, plot_width=500, x_range=(-10, 10), y_range=(-10, 10))
plot_spike_raster = figure(plot_height=200, plot_width=400, x_axis_label='time (ms)',
y_axis_label='Neuron #')
plot_pop_rate = figure(plot_height=150, plot_width=400, x_axis_label='time (ms)', y_axis_label='Pop. Rate (Hz)')
plot_burst_prob = figure(plot_height=150, plot_width=400, x_axis_label='time (ms)', y_axis_label='Burst Probability %')
plot_pos.asterisk('x', 'y', size=10, color='red', source=source_pos)
plot_pos.circle('x', 'y', size=24, alpha=0.4, source=source_targets)
plot_pop_rate.line('x', 'y', source=source_pop_rate)
pop_rate_vline = Span(location=start_time,
dimension='height', line_color='black',
line_dash='dotted', line_width=2, line_alpha=0.8)
plot_pop_rate.add_layout(pop_rate_vline)
plot_burst_prob.line('x', 'y', source=source_burst_prob)
burst_vline = Span(location=start_time,
dimension='height', line_color='black',
line_dash='dotted', line_width=2, line_alpha=0.8)
plot_burst_prob.add_layout(burst_vline)
plot_spike_raster.rect('x', 'y', rect_width, rect_height, source=source_spike_raster)
spike_raster_vline = Span(location=start_time,
dimension='height', line_color='black',
line_dash='dotted', line_width=2, line_alpha=0.8)
plot_spike_raster.add_layout(spike_raster_vline)
time_slider = Slider(title="time", value=start_time, start=start_time, end=end_time, step=100)
trial_slider = Slider(title="Trial Number", value=1, start=1, end=num_trials, step=1)
# Set up sliders
time_box = widgetbox(time_slider)
trial_box = widgetbox(trial_slider)
time_slider.on_change('value', update_time)
trial_slider.on_change('value', update_trial)
# set up layouts
plots = row(plot_pos, column(plot_spike_raster, plot_pop_rate, plot_burst_prob))
session_plots = column(trial_box, plots, time_box)
tab = Panel(child=session_plots, title=session_name)
# store references of plot objects, for real time updating later
session_plot_data['tab'] = tab
session_plot_data['time_slider'] = time_slider
session_plot_data['trials_slider'] = trial_slider
session_plot_data['source_targets'] = source_targets
session_plot_data['source_pos'] = source_pos
session_plot_data['source_spikes'] = source_spike_raster
session_plot_data['source_pop_rate'] = source_pop_rate
session_plot_data['source_burst_prob'] = source_burst_prob
session_plot_data['pop_rate_vline'] = pop_rate_vline
session_plot_data['burst_vline'] = burst_vline
session_plot_data['spike_raster_vline'] = spike_raster_vline
all_session_plots[session_name] = session_plot_data
tabs_list = []
for name in all_session_plots.keys():
tabs_list.append(all_session_plots[name]['tab'])
tabs = Tabs(tabs=tabs_list)
curdoc().add_root(tabs)
curdoc().title = "Macaque Reach Experiment"
|
[
"peterrayshen@gmail.com"
] |
peterrayshen@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.