blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
223aba0f3a6f0830d35ca6c772b7bd4a586e3e03 | 5e7aee7be8f1e99129957bbd26b93a0a22638b56 | /py/model.py | e3bf74cc518d1bee778f0d12e3c3a9981afed8c8 | [
"MIT"
] | permissive | alexisperrier/rabotnik | fa0391ccc62bf1c203a227ffe83ac1c32c738821 | e629118a692ea65dc39bf323f74096eec6c120e5 | refs/heads/master | 2023-04-08T15:09:49.843081 | 2021-04-15T11:02:22 | 2021-04-15T11:02:22 | 277,247,779 | 0 | 0 | null | 2021-04-15T11:02:22 | 2020-07-05T06:53:47 | Python | UTF-8 | Python | false | false | 13,710 | py | '''
Contain classes for most major database tables
Each class offers methods to insert, update, upsert ...
For instance the class Channel has the following methods:
- create: inserts a new channel in table channel
- update: updates data for a given channel_id, data is from API
- update_from_feed: updates data for a given channel_id, data is from RSS feed
'''
from .text import *
from .job import *
import datetime
import pytz
import urllib
from xml.etree import ElementTree
import html
class Model(object):
# TODO rm, not used
def __init__(self):
pass
class Comment(Model):
@classmethod
def create(cls,d):
try:
sql = f'''
insert into comments (comment_id, video_id, discussion_id, parent_id,
author_name, author_channel_id,
text, reply_count, like_count,
published_at, created_at, updated_at)
values ('{d.comment_id}', '{d.video_id}', {d.discussion_id}, '{d.parent_id}',
$${TextUtils.to_db(d.author_name)}$$, '{d.author_channel_id}',
$${TextUtils.to_db(d.text)}$$, {d.reply_count}, {d.like_count},
'{d.published_at}', now(), now())
on conflict (comment_id) DO NOTHING
'''
job.execute(sql)
return job.db.cur.rowcount
except:
return 0
class Discussion(Model):
@classmethod
def create(cls,d):
try:
sql = f'''
insert into discussions (video_id, total_results, results_per_page, error, created_at, updated_at)
values ('{d.video_id}', {d.total_results}, {d.results_per_page}, $${TextUtils.to_db(d.error)}$$, now(), now())
on conflict (video_id) DO NOTHING
RETURNING id;
'''
job.execute(sql)
return job.db.cur.fetchone()[0]
except:
return None
class VideoStat(Model):
@classmethod
def create(cls,d):
try:
fields = "video_id, source, viewed_at"
values = f"'{d.video_id}', '{d.source}', '{d.viewed_at}'"
for field in ['views','like_count','dislike_count','favorite_count','comment_count']:
if hasattr(d,field):
val = int(d[field])
fields += f",{field}"
values += f", {val}"
sql = f'''
insert into video_stat as cs ({fields})
values ({values})
on conflict (video_id, viewed_at) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
except:
return 0
class Channel(object):
@classmethod
def create(cls, channel_id, origin ):
sql = f'''
insert into channel (channel_id, origin)
values ('{channel_id}','{origin}')
on conflict (channel_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def update(cls,d):
sql = f'''
update channel set
created_at = '{d.created_at}',
title = $${TextUtils.to_db(d.title)}$$,
description = $${TextUtils.to_db(d.description)}$$,
thumbnail = '{d.thumbnail}',
show_related = '{d.show_related}',
custom_url = '{d.custom_url}',
country = '{d.country}',
retrieved_at = now()
where channel_id = '{d.channel_id}'
'''
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def update_from_feed(cls,d):
if d.activity is not None:
str_activity = f"activity = '{d.activity}',"
else:
str_activity = f"activity = null,"
if d.activity is not None:
str_activity_score = f"activity_score = {d.activity_score},"
else:
str_activity_score = f"activity_score = null,"
sql = f'''
update channel set
{str_activity}
{str_activity_score}
rss_next_parsing = NOW() + interval '{d.frequency}',
retrieved_at = now()
where channel_id = '{d.channel_id}'
'''
job.execute(sql)
return job.db.cur.rowcount
class ChannelTopic(Model):
@classmethod
def upsert(cls,d):
if d.topics is None:
sql = f'''
insert into topic as tpc (channel_id, topics, created_at)
values ('{d.channel_id}',Null, now())
on conflict (channel_id) do update
set topics = Null, created_at = now()
where tpc.channel_id = '{d.channel_id}'
'''
else:
sql = f'''
insert into topic as tpc (channel_id, topics, created_at)
values ('{d.channel_id}','{d.topics}', now())
on conflict (channel_id) do update
set topics = '{d.topics}', created_at = now()
where tpc.channel_id = '{d.channel_id}'
'''
job.execute(sql)
class ChannelStat(Model):
@classmethod
def upsert(cls,d):
if d.hidden_subscribers_count:
sql = f'''
insert into channel_stat as cs
(channel_id, views, videos, retrieved_at)
values
('{d.channel_id}', {d.views}, {d.videos}, now())
on conflict (channel_id) do update
set views = {d.views},
videos = {d.videos},
retrieved_at = now()
where cs.channel_id = '{d.channel_id}'
'''
else:
sql = f'''
insert into channel_stat as cs
(channel_id, views, subscribers, videos, retrieved_at)
values
('{d.channel_id}', {d.views}, {d.subscribers}, {d.videos}, now())
on conflict (channel_id) do update
set views = {d.views},
subscribers = {d.subscribers},
videos = {d.videos},
retrieved_at = now()
where cs.channel_id = '{d.channel_id}'
'''
job.execute(sql)
return job.db.cur.rowcount
class IndexSearch(Model):
@classmethod
def upsert(cls,d):
sql = f'''
insert into augment as au (video_id, tsv_lemma, created_at)
values ( '{d.video_id}', to_tsvector('french', $${TextUtils.to_db(d.refined_lemma)}$$), now() )
on conflict (video_id) do update
set tsv_lemma = to_tsvector('french', $${TextUtils.to_db(d.refined_lemma)}$$),
created_at = now()
where au.video_id = '{d.video_id}'
'''
job.execute(sql)
return job.db.cur.rowcount
class Video(Model):
@classmethod
def update(cls,d):
sql = f'''
update video set
published_at = '{d.published_at}',
channel_id = '{d.channel_id}',
title = $${TextUtils.to_db(d.title)}$$,
summary = $${TextUtils.to_db(d.summary)}$$,
thumbnail = '{d.thumbnail}',
category_id = {d.category_id},
duration = '{d.duration}',
caption = {d.caption},
privacy_status = '{d.privacy_status}',
tags = $${TextUtils.to_db(d.tags)}$$,
pubdate = '{d.pubdate}',
live_content = '{d.live_content}',
default_audio_language = '{d.default_audio_language}',
default_language = '{d.default_language}',
wikitopics = $${TextUtils.to_db(d.wikitopics)}$$,
seconds = {d.seconds},
retrieved_at = now()
where video_id = '{d.video_id}'
'''
try:
job.execute(sql)
return job.db.cur.rowcount
except:
print("=="*20)
print("FAILED")
print(sql)
print("=="*20)
job.reconnect()
return 0
@classmethod
def create_from_feed(cls,d):
# ok
sql = f'''
insert into video
(video_id,channel_id,title,summary,origin,published_at)
values
('{d.video_id}', '{d.channel_id}',$${TextUtils.to_db(d.title)}$$,$${TextUtils.to_db(d.summary)}$$,'{d.origin}','{d.published_at}')
on conflict (video_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def create_from_id(cls, video_id, origin):
sql = f'''
insert into video
(video_id,origin)
values
('{video_id}', '{origin}')
on conflict (video_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def bulk_create(cls, video_ids, origin):
for video_id in video_ids:
values.append(f"('{video_id}', '{origin}')")
sql = f''' insert into video (video_id,origin) values {','.join(values)} '''
job.execute(sql)
return job.db.cur.rowcount
class Pipeline(Model):
@classmethod
def update_status(cls, **kwargs):
sql = f" update pipeline set status = '{kwargs['status']}' where {kwargs['idname']}= '{kwargs['item_id']}' "
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def update_lang(cls, **kwargs):
sql = f" update pipeline set lang = '{kwargs['lang']}', lang_conf = '{kwargs['lang_conf']}' where {kwargs['idname']}= '{kwargs['item_id']}' "
job.execute(sql)
return job.db.cur.rowcount
@classmethod
def create(cls, **kwargs):
sql = f'''
insert into pipeline ({kwargs['idname']}, status)
values ('{kwargs['item_id']}','incomplete')
on conflict ({kwargs['idname']}) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
class RelatedChannels(object):
@classmethod
def insert(cls, **kwargs):
sql = f'''
insert into related_channels (channel_id, related_id, retrieved_at)
values ('{kwargs['channel_id']}','{kwargs['related_id']}',NOW())
on conflict (channel_id, related_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
class RecommendedVideos(object):
@classmethod
def insert(cls, d):
sql = f'''
insert into video_recommendations
(src_video_id, tgt_video_id, harvest_date, tgt_video_harvested_at)
values ('{d.src_video_id}','{d.tgt_video_id}', '{d.harvest_date}',NOW())
on conflict (src_video_id, tgt_video_id, harvest_date) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
class VideoScrape(Model):
@classmethod
def insert(cls,video_id):
completed_date = datetime.datetime.now(pytz.timezone('Europe/Amsterdam')).strftime("%Y-%m-%d")
sql = f'''
insert into video_scrape (video_id, completed_date, created_at)
values ('{video_id}', '{completed_date}', now())
on conflict (video_id) DO NOTHING;
'''
job.execute(sql)
return job.db.cur.rowcount
class Caption(object):
@classmethod
def get_lang(cls, url):
params = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
if ('lang' in params.keys()):
return params['lang'][0]
else:
return ''
@classmethod
def get_asr(cls, url):
params = urllib.parse.parse_qs(urllib.parse.urlparse(url).query)
if ('kind' in params.keys()):
if (params['kind'][0] == 'asr'):
return 'b_generated'
else:
return 'c_unknown'
else:
return 'a_manual'
@classmethod
def get_expire(cls, url):
return urllib.parse.parse_qs(urllib.parse.urlparse(url).query)['expire'][0]
@classmethod
def get_captions(cls, caption_urls):
HTML_TAG_REGEX = re.compile(r'<[^>]*>', re.IGNORECASE)
captions = []
for i,u in caption_urls.iterrows():
http_client = requests.Session()
result = requests.Session().get(u.url)
if (result.status_code == 200) and (len(result.text) > 0):
caption_text = [re.sub(HTML_TAG_REGEX, '', html.unescape(xml_element.text)).replace("\n",' ').replace("\'","'")
for xml_element in ElementTree.fromstring(result.text)
if xml_element.text is not None
]
# caption_text = ' '.join(caption_text)
else:
caption_text = None
captions.append({
'code': result.status_code,
'len_': len(result.text),
'expire': datetime.datetime.utcfromtimestamp( int(u.expire) ).strftime('%Y-%m-%d %H:%M:%S'),
'text': caption_text,
'caption_type': u.caption_type,
'lang': u.lang,
'caption_url': u.url
})
captions = pd.DataFrame(captions)
return captions
| [
"alexis.perrier@pm.me"
] | alexis.perrier@pm.me |
511ca25210aebdea239ec9c00fcca35b9da49554 | 71aaefa30760ecc699f533db24ebe353084cc8e0 | /src/tornado-webserver.py | 5cb8d891a7f973f9fb2c48ba6bffd64a68544abe | [] | no_license | rmessner/docker-deis-dashboard | e190fc4b450fbb8ab298cdb76eecd2f3d62a8b88 | 3e879bb12bd2f192bf4403a37f8c96fdcfb02b45 | refs/heads/master | 2021-01-01T19:42:54.864826 | 2014-12-03T17:44:17 | 2014-12-03T17:44:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
from dashboard import app
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(80)
enable_pretty_logging()
IOLoop.instance().start()
| [
"raphael.messner@gmail.com"
] | raphael.messner@gmail.com |
f173160d77e0d5bea220f33633e5bb63c9668916 | 50c20d107f98eb6c78553c9a0dcc20298df5958d | /courses/udacity/Intro to Machine Learning/svm/svm_author_id.py | bbf9b60353c3d39fb2b87320cb3e84193a894362 | [] | no_license | arnaldog12/Deep-Learning | a7b9dade336f9977109e8de4de8b65db35b711e3 | b0d46d93203394692cf9ba8d5628f8edc1589b6a | refs/heads/master | 2022-05-02T16:05:40.978508 | 2022-04-29T12:34:27 | 2022-04-29T12:34:27 | 98,334,247 | 90 | 51 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | #!/usr/bin/python
"""
This is the code to accompany the Lesson 2 (SVM) mini-project.
Use a SVM to identify emails from the Enron corpus by their authors:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
# features_train = features_train[:len(features_train)/100]
# labels_train = labels_train[:len(labels_train)/100]
#########################################################
### your code goes here ###
clf = SVC(C=10000.0, kernel='rbf')
t0 = time()
clf.fit(features_train, labels_train)
print("Training Time: {0:.3f}".format(time()-t0))
t1 = time()
pred = clf.predict(features_test)
print("Test Time: {0:.3f}".format(time()-t1))
# print(pred)
print(len(pred[pred == 1]))
# print(accuracy_score(pred, labels_test[10]))
#########################################################
| [
"arnaldo.g12@gmail.com"
] | arnaldo.g12@gmail.com |
8f641be213c38d2d7cad0bb6497df44984f4c44f | ffe5bc9851a57851e70fbe9cf71b532482ad5813 | /CountSheep.py | 1f2256081516bc894c05223ea5450f6988fe0614 | [] | no_license | BigBricks/PythonChallenges | 0320e786cb0ceac0dce8ed098b44b3890abf1654 | 9ab39b2dfe07b0a23a205ed91d56296ac9a75828 | refs/heads/master | 2020-05-05T00:39:12.315967 | 2019-10-30T04:10:51 | 2019-10-30T04:10:51 | 179,581,968 | 0 | 0 | null | 2019-10-30T04:10:51 | 2019-04-04T21:46:50 | Python | UTF-8 | Python | false | false | 102 | py | def count_sheeps(arrayOfSheeps):
# TODO May the force be with you
return arrayOfSheeps.count(True) | [
"bsa6.23.94@gmail.com"
] | bsa6.23.94@gmail.com |
71e8829afac3e0a0c65027c407736ec43eeb6262 | 0cba5529e387ba0f077b4e8ddeb96f914004f5df | /malaya/emotion.py | dcd419468d7b3fce6dc88b499f1cc790ea1925c7 | [
"MIT"
] | permissive | AsyrafAzlan/Malaya | dc78398ee6880578f40c5646a48882a5913217ae | 3d5166173cf74881f7a56fffaaf391813c55d4f1 | refs/heads/master | 2021-05-21T22:47:41.863857 | 2020-04-03T15:00:21 | 2020-04-03T15:00:21 | 252,841,526 | 1 | 0 | MIT | 2020-04-03T21:04:44 | 2020-04-03T21:04:44 | null | UTF-8 | Python | false | false | 1,861 | py | from malaya.supervised import softmax
from malaya.path import PATH_EMOTION, S3_PATH_EMOTION
from herpetologist import check_type
_emotion_label = ['anger', 'fear', 'joy', 'love', 'sadness', 'surprise']
_availability = [
'bert',
'tiny-bert',
'albert',
'tiny-albert',
'xlnet',
'alxlnet',
]
def available_transformer_model():
"""
List available transformer emotion analysis models.
"""
return _availability
def multinomial(**kwargs):
"""
Load multinomial emotion model.
Returns
-------
BAYES : malaya._models._sklearn_model.BAYES class
"""
return softmax.multinomial(
PATH_EMOTION, S3_PATH_EMOTION, 'emotion', _emotion_label, **kwargs
)
@check_type
def transformer(model: str = 'xlnet', **kwargs):
"""
Load Transformer emotion model.
Parameters
----------
model : str, optional (default='bert')
Model architecture supported. Allowed values:
* ``'bert'`` - BERT architecture from google.
* ``'tiny-bert'`` - BERT architecture from google with smaller parameters.
* ``'albert'`` - ALBERT architecture from google.
* ``'tiny-albert'`` - ALBERT architecture from google with smaller parameters.
* ``'xlnet'`` - XLNET architecture from google.
* ``'alxlnet'`` - XLNET architecture from google + Malaya.
Returns
-------
MODEL : Transformer class
"""
model = model.lower()
size = size.lower()
if model not in _availability:
raise Exception(
'model not supported, please check supported models from malaya.emotion.available_transformer_model()'
)
return softmax.transformer(
PATH_EMOTION,
S3_PATH_EMOTION,
'emotion',
_emotion_label,
model = model,
size = size,
validate = validate,
)
| [
"husein.zol05@gmail.com"
] | husein.zol05@gmail.com |
f68c22a3ebcff8045d9ad3131f9b30a050725a36 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_filthiness.py | d1e4e34d83d291300555681e0bf38feb72c2e796 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py |
#calss header
class _FILTHINESS():
def __init__(self,):
self.name = "FILTHINESS"
self.definitions = [u'the quality of being very dirty']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
830407e09552cfb2cb0473e85960160bfe3aa607 | c6ccee43794d7aa95c81eb30afa986db1853a765 | /djangomediapil/fields.py | 9ca64ab3a447900cbde106de365ecfdd6521d6dc | [] | no_license | giorgi94/django-media-pil | b14eba7a661953aabb94e6f6959cc8ef8c77ef36 | 63dd25ecf81b0ef2b0d682c5ffeaddc016ef0249 | refs/heads/master | 2020-04-08T02:14:28.004119 | 2019-03-09T15:02:32 | 2019-03-09T15:02:32 | 158,928,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | import os
import json
import datetime as dt
from django import forms
from django.db import models
from django.core import exceptions
from django.utils.translation import ugettext_lazy as _
from .mediaPIL import MediaPIL
from .widgets import ImagePILWidget
class ImagePILField(models.TextField):
description = "Image PIL Field"
def __init__(self, pathway="", point=(50, 50), quality=90,
upload_to=".", *args, **kwargs):
self.blank = kwargs.get('blank', False)
if pathway is None:
pathway = ""
self.default_kwargs = {
'pathway': pathway,
'point': point,
'quality': quality,
'upload_to': upload_to,
}
kwargs['default'] = json.dumps(
self.default_kwargs, ensure_ascii=False)
super().__init__(*args, **kwargs)
def from_db_value(self, value, expression, connection):
try:
if value is None:
return self.default_kwargs
if type(value) == str and '{' not in value:
kw = self.default_kwargs.copy()
kw['pathway'] = value
return kw
return json.loads(value)
except Exception as e:
return self.default_kwargs
def clean(self, value, model_instance):
val = json.loads(value)
if not val.get('pathway') and not self.blank:
raise forms.ValidationError(
_('This field is required'), code='invalid')
return value
def get_prep_value(self, value):
if type(value) == str:
return value
return json.dumps(value, ensure_ascii=False)
def value_to_string(self, obj):
return self.get_prep_value(obj.image)
def to_python(self, value):
return self.from_db_value(value=value)
def formfield(self, **kwargs):
widget = kwargs.get('widget')
if 'AdminTextareaWidget' in str(widget):
kwargs['widget'] = ImagePILWidget
return super().formfield(**kwargs)
| [
"giorgik1994@gmail.com"
] | giorgik1994@gmail.com |
39c2bccbbad0084903df070044113fb7721f2cfb | 24fec5484a82c7705185b3c8ff41ad4f93b7feeb | /pizza_shop/mainapp/migrations/0002_alter_cart_final_price.py | 82f7224fe27b3aea90a802ee9e7d25c950eb805e | [] | no_license | rita-mazets/isp_lab3-4 | 89d0e5ecc853b7e1630f59d49b33c7dbafff0c8d | 182ad669ac49a37249003b082abba707bb6ebf64 | refs/heads/master | 2023-06-06T09:34:31.860928 | 2021-06-14T20:16:26 | 2021-06-14T20:16:26 | 370,748,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | # Generated by Django 3.2 on 2021-04-29 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='final_price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=9, verbose_name='Общая цена'),
),
]
| [
"mmmazets@mail.ru"
] | mmmazets@mail.ru |
e528b43a3e6dd339eefe897b88c870322865d82a | 4e6202e6c44fcde360a5cd22972556df2c3af975 | /src/compas_testing/rhino/gom.py | eadc3787c9cac4e165fd1cfb0e080c57c3af809f | [
"MIT"
] | permissive | franaudo/compas_testing | ce60e3cde066ce484b38dba472214b5104d0499f | f5c443b3c5f420793efda1bd156319cf7b85556e | refs/heads/master | 2023-05-31T12:46:09.603946 | 2020-01-27T21:24:02 | 2020-01-27T21:24:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,990 | py | from compas.geometry import Point
from compas_rhino.artists import PointArtist
__author__ = 'Francesco Ranaudo'
__copyright__ = 'Copyright 2020, BLOCK Research Group - ETH Zurich'
__license__ = 'MIT License'
__email__ = 'ranaudo@arch.ethz.ch'
__all__ = ['draw_point_cloud_color',
]
def draw_point_cloud_color(points_history_coord, color_map, stage_index):
"""
Draws point clouds for a chosen stage - in Rhino,
Point color is defined by its displacement from initial position
Parameters
----------
points_history_coord : dictionary
key: str - point geometric key
value : list - a list of locations of a given point in three-dimensional space (XYZ coordinates of the point)
color_map : dictionary
key: str - point geometric key
value : list of lists - RGB values for each point at each stage
stage_index : int - the stages to be drawn
"""
for key in points_history_coord.keys():
point = Point(points_history_coord[key][stage_index][0],
points_history_coord[key][stage_index][1],
points_history_coord[key][stage_index][2]
)
p = PointArtist(point, name=key.strip("()"), color=color_map[key][stage_index], layer='Stage_'+str(stage_index))
p.draw()
def draw_delaunay_mesh(point_cloud, color_map):
pass
# def draw_stages(points_history_coord, scaled_disp, start, stop):
# '''draw the point cloud for input stages'''
#
# """
# Draw point clouds for a sequence of stages - in Rhino,
# Point color is defined by its displacement from initial position
#
# Parameters
# ----------
# points_history_coord : dictionary
# key: *
# value : list - a list of locations of a given point in three-dimensional space (XYZ coordinates of the point)
#
# scaled_displ : dictionary
# key: *
# value : list - a list of scalars between 0 and 1
#
# start : the first stage to be drawn
#
# stop : the last stage to be drawn
#
# * condition : keys must be identical in points_history_coord and scaled_displ
#
# """
#
# for key, value in points_history_coord.items():
# for j in range(start, stop):
# point = Point(value[j][0], value[j][1], value[j][2])
# deformation = scaled_displ[key][j]
# rgb = ratio_to_rgb(deformation)
# p = PointArtist(point, name=key.strip("()"), color=rgb, layer='Stage8_' + str(j))
# p.draw()
#
# return p
#
#
# def point_trajectory(points_history, key, rgb=(255, 255, 255)):
# """
# Draw the locations in space of a point throughout the successive stages
#
# Parameters
# ----------
# points_history : dictionary
# key: string - the coordinates of a point in initial stage
# value : sequence - a sequence of tuples describing locations of a given point in three-dimensional space
# * tuple : distance to reference point, XYZ coordinates of the point, Stage of the point
#
# key : - key of the point that you want to draw
#
# color : the chosen color
#
# """
#
# values = points_history[key]
# for v in values:
# point = Point(v[0], v[1], v[2])
# p = PointArtist(point, name=key, color=rgb, layer=key.strip("()"))
# p.draw()
# return p
#
#
# def find_rhpoint_key():
# """
# Select a point on rhino and get its key in the points_history dictionary
# """
#
# points = select_points("select one point")
# coordinates = get_point_coordinates(points)
# name = get_object_names(points)
#
# parse = str(name[0])
# split = parse.split(",")
# key = '(' + split[0] + ',' + split[1] + ',' + split[2] + ')'
# return key
# ******************************************************************************
# Main
# ******************************************************************************
if __name__ == "__main__":
pass
| [
"ranaudo@arch.ethz.ch"
] | ranaudo@arch.ethz.ch |
0458ec665e53e9ec2babef6d8d8f4eb70518e005 | cb28edc8fecba9b12de7d5798ea8f04fd99cfce1 | /LIA_test.py | a6dcad4032e4749f4d5f30dfb1f0ca75ab1054db | [] | no_license | khoidnyds/Rosanlind | 5b597e267c6a9e9b54ed2c05f72537af8160b52b | 68902285e6ca462f8cebb21eae6016bc18d02518 | refs/heads/master | 2022-08-28T00:28:40.318066 | 2020-05-25T22:40:25 | 2020-05-25T22:40:25 | 266,893,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | import unittest
from LIA import Mendel2
class MyTestCase(unittest.TestCase):
def test_seq1(self):
nuc = Mendel2(2, 1)
self.assertAlmostEqual(nuc.get_result(), 0.684, 3)
def test_seq2(self):
nuc = Mendel2(7, 31)
self.assertAlmostEqual(nuc.get_result(), 0.6142569731, 7)
if __name__ == '__main__':
unittest.main()
| [
"khoidnyds@vt.edu"
] | khoidnyds@vt.edu |
37a0377ba0a802b58950a24f45ab331d51f47c8c | e147cf167bb9d0985b563d8aa5cad4d42b075004 | /app/libs/http.py | dbd5cf01d8bb9b646ff517c9b9e436333d241c0c | [] | no_license | pilu01/kk | b3f54b6343e178a184992af9f68a0a30f60aba9b | 02fde0c9374f3652cdd5b0458b9b878ca33a9fff | refs/heads/master | 2021-07-10T14:50:05.611996 | 2020-10-13T12:49:41 | 2020-10-13T12:49:41 | 207,334,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # -*- coding: utf-8 -*-
# @Time : 2020/9/14 15:31
# @Author : xhb
# @FileName: http.py
# @Software: PyCharm
import requests
class Http(object):
def __init__(self, url):
self.url = url
@staticmethod
def get(url, json_return=True):
r = requests.get(url)
if r.status_code != 200:
return {} if json_return else ''
return r.json() if json_return else r.text
| [
"xinhb@vastio.com"
] | xinhb@vastio.com |
950a91e7f352536d8c73b04d04228a01c66a4fda | 13a80359dedbf4aae47ad47ae9ae7d6dbd1bcb16 | /chat_search.py | 8b901c54b040ef6ce9c7070e7ef8804002ae8ebf | [] | no_license | tiarafreddyandika/mitm_addon | bb52569a4ed86142cfd11ec33b8f73f00ddc6053 | 8246f5d9230c0b3c9f456dd907c142d7db773c17 | refs/heads/master | 2022-11-11T19:20:53.325345 | 2020-06-18T03:55:55 | 2020-06-18T03:55:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import mitmproxy
from base.base_simple_gql_request import BaseRequest
class ChatSearch(BaseRequest):
def __init__(self):
super().__init__()
@property
def error_response_file(self) -> str:
return "./response/chat_attachment_error.json"
@property
def modified_response_file(self) -> str:
return "./response/chat_initial_search.json"
@property
def query_matcher(self) -> str:
return "query contactAndRepliesSearch"
@property
def simulate_error(self) -> bool:
return False
@property
def modify_response(self) -> bool:
return True
addons = [
ChatSearch()
] | [
"alfon.lavinski@tokopedia.com"
] | alfon.lavinski@tokopedia.com |
b033e8f0b13e41e324b11e403739c993c52bbe7e | a4a01e251b194f6d3c6654a2947a33fec2c03e80 | /PythonWeb/Ajax/1809/Day02/1808/AjaxDemo02/run01.py | 35ac2bfbdbdab18d5da55f05332beae995cd1c85 | [] | no_license | demo112/1809 | 033019043e2e95ebc637b40eaf11c76bfd089626 | e22972229e5e7831dce2aae0b53ce19a6e3bb106 | refs/heads/master | 2020-04-09T07:10:49.906231 | 2019-02-27T13:08:45 | 2019-02-27T13:08:45 | 160,143,869 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,861 | py | from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import json
import pymysql
pymysql.install_as_MySQLdb()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']="mysql://root:123456@localhost:3306/flask"
db = SQLAlchemy(app)
class Login(db.Model):
__tablename__ = "login"
id = db.Column(db.Integer,primary_key=True)
lname = db.Column(db.String(30))
lpwd = db.Column(db.String(30))
uname = db.Column(db.String(30))
def to_dict(self):
dic = {
'id':self.id,
'lname' : self.lname,
'lpwd' : self.lpwd,
'uname' : self.uname,
}
return dic
@app.route('/00-homework')
def homework():
return render_template('00-homework.html')
@app.route('/00-server')
def server00():
lname = request.args.get('lname')
login=Login.query.filter_by(lname=lname).first()
if login:
return "用户名称已经存在"
else:
return "通过"
@app.route('/01-post')
def post():
return render_template("01-post.html")
@app.route('/01-server',methods=['POST'])
def server01():
uname = request.form['uname']
uage = request.form['uage']
return "传递过来的uname的值为:%s,传递过来的uage的值为:%s" % (uname,uage)
@app.route('/02-form',methods=['GET','POST'])
def form():
if request.method == 'GET':
return render_template('02-form.html')
else:
uname = request.form['uname']
uage = request.form['uage']
return "传递过来的uname的值为:%s,传递过来的uage的值为:%s" % (uname,uage)
@app.route('/03-getlogin')
def getlogin():
return render_template('03-getlogin.html')
@app.route('/03-server')
def server03():
logins = Login.query.all()
str1 = ""
for login in logins:
str1 += str(login.id)
str1 += login.lname
str1 += login.lpwd
str1 += login.uname
return str1
@app.route('/04-json')
def json_views():
return render_template("04-json.html")
@app.route('/04-server')
def server04():
# list = ["王老六","RapWang","隔壁老顽固"]
# dic = {
# 'name':'TeacherWang',
# 'age' : 35,
# 'gender' : 'Male',
# }
# jsonStr=json.dumps(dic)
list = [
{
"name":"wangwc",
"age":35,
"gender":"Male",
},
{
'name':'RapWang',
'age':40,
'gender':'Female',
}
]
jsonStr=json.dumps(list)
return jsonStr
@app.route('/05-json-login')
def json_login():
return render_template('05-json-login.html')
@app.route('/05-server')
def server05():
#得到id为一的Login的信息
login=Login.query.filter_by(id=1).first()
jsonStr=json.dumps(login.to_dict())
return jsonStr
if __name__ == "__main__":
app.run(debug=True) | [
"huafengdongji@hotmail.com"
] | huafengdongji@hotmail.com |
5e38d99387ab0922958a03e78b4d12fe8b189012 | 20171d2c050a727d265bb551861326d723e43322 | /2019/05/sunny_with_a_chance_of_asteroids.py | f493be77d97d3e5ce6938fde33bbf9f7a57c4546 | [] | no_license | paisuhas/AdventOfCode | d7d0e708e7672d4ec68e63fe9ec40b3eb1de320c | 31bad46f44a7c657d12bb5718d9e0903950993ff | refs/heads/master | 2020-09-24T06:39:45.694404 | 2019-12-13T06:50:20 | 2019-12-13T06:50:20 | 225,690,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | #!/usr/bin/env python3
def decode(opcode):
op = opcode % 100
modes = []
for i in [100, 1000]:
modes.append((opcode // i) % 10)
return (modes, op)
def get_operands(pc, modes):
global program
operands = []
for offset, mode in enumerate(modes, 1):
address = pc + offset if mode else program[pc+offset]
operands.append(program[address])
return operands
program = list(map(int, open("input.txt").readlines()[0].strip().split(',')))
next_pc = 0
three_op_instructions = [1, 2]
one_op_instructions = [3, 4]
jump_instructions = [5, 6]
comparison_instructions = [7, 8]
for pc, opcode in enumerate(program):
if pc == next_pc:
modes, op = decode(opcode)
if op in three_op_instructions:
operands = get_operands(pc, modes)
next_pc = pc + 4
if op == 1:
result = sum(operands)
else:
assert(op == 2)
result = operands[0] * operands[1]
program[program[pc + 3]] = result
elif op in one_op_instructions:
next_pc = pc + 2
if op == 3:
program[program[pc+1]] = 5 # 1 for Part 1
else:
assert(op == 4)
if modes[0]:
print(program[pc+1])
else:
print(program[program[pc+1]])
elif op in jump_instructions:
operands = get_operands(pc, modes)
if (op == 5 and operands[0]) or (op == 6 and not operands[0]):
next_pc = operands[1]
else:
next_pc = pc + 3
elif op in comparison_instructions:
next_pc = pc + 4
operands = get_operands(pc, modes)
if (op == 7 and operands[0] < operands[1]) or (op == 8 and operands[0] == operands[1]):
program[program[pc+3]] = 1
else:
program[program[pc+3]] = 0
else:
assert(op == 99)
break
| [
"spai@cs.wisc.edu"
] | spai@cs.wisc.edu |
9cf8249155e099a4f2d638b13a734b6f3276c7d7 | d93cb231c2c2fdda9615d11d150045886608a8d7 | /server/admin.py | 1996b7b7dd7e4dde85e917c95b8ae503b6e31b5d | [
"Apache-2.0"
] | permissive | Kiesum/tfrs-1 | 1aea5245a4d80bdb36c637ab158f8ca2a45c68df | 12a8dff77c5b94687117e9a0822a1f1e28c453c5 | refs/heads/master | 2021-01-01T17:59:45.536698 | 2017-08-24T19:09:25 | 2017-08-24T19:09:25 | 98,216,933 | 0 | 0 | null | 2017-07-24T17:31:03 | 2017-07-24T17:31:02 | null | UTF-8 | Python | false | false | 2,394 | py | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.contrib import admin
from .models.Attachment import Attachment
from .models.Audit import Audit
from .models.Contact import Contact
from .models.CreditTrade import CreditTrade
from .models.CreditTradeLogEntry import CreditTradeLogEntry
from .models.FuelSupplier import FuelSupplier
from .models.Group import Group
from .models.GroupMembership import GroupMembership
from .models.History import History
from .models.LookupList import LookupList
from .models.Note import Note
from .models.Notification import Notification
from .models.NotificationEvent import NotificationEvent
from .models.Offer import Offer
from .models.Permission import Permission
from .models.Role import Role
from .models.RolePermission import RolePermission
from .models.User import User
from .models.UserFavourite import UserFavourite
from .models.UserRole import UserRole
admin.site.register(Attachment)
admin.site.register(Audit)
admin.site.register(Contact)
admin.site.register(CreditTrade)
admin.site.register(CreditTradeLogEntry)
admin.site.register(FuelSupplier)
admin.site.register(Group)
admin.site.register(GroupMembership)
admin.site.register(History)
admin.site.register(LookupList)
admin.site.register(Note)
admin.site.register(Notification)
admin.site.register(NotificationEvent)
admin.site.register(Offer)
admin.site.register(Permission)
admin.site.register(Role)
admin.site.register(RolePermission)
admin.site.register(User)
admin.site.register(UserFavourite)
admin.site.register(UserRole) | [
"gwalker@escapesystems.com"
] | gwalker@escapesystems.com |
d602bd37839f0456bfb7aabbb13cffddb3e2b1e3 | c4f2c58b2eb83f5bf672e82a39b6c96671f0eac1 | /iss.py | 64eeed4e973b979754e09aa5b058cac7b82be4c4 | [] | no_license | fthbrmnby/ISS-Position | 0bf36407c9384ebd02ed19e25c8687965a495bf4 | cf69b23fad97a8810b20e70ac5c144b0500c06e0 | refs/heads/master | 2021-01-19T22:34:36.257275 | 2017-07-31T07:46:57 | 2017-07-31T07:46:57 | 88,828,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
import urllib.request
import json
# basic map setup
globe = Basemap(projection='robin', resolution = 'c', lat_0=0, lon_0=0)
globe.drawcoastlines()
globe.drawcountries()
globe.fillcontinents(color="grey")
globe.drawmapboundary()
globe.drawmeridians(np.arange(0, 360, 30))
globe.drawparallels(np.arange(-90, 90, 30))
x,y = globe(0, 0)
point = globe.plot(x, y, 'ro', markersize=7)[0]
def init():
point.set_data([], [])
return point,
# animation function. This is called sequentially
def animate(i):
lons, lats = iss_position()
x, y = globe(lons, lats)
point.set_data(x, y)
return point,
# http://api.open-notify.org/iss-now.json
def iss_position():
resp = urllib.request.urlopen("http://api.open-notify.org/iss-now.json").read()
jsn = json.loads(resp.decode('utf-8'))
pos = jsn["iss_position"]
lon = pos["longitude"]
lat = pos["latitude"]
return (lon, lat)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(plt.gcf(), animate, init_func=init,
interval=3000, blit=True)
plt.show()
| [
"fthbrmnby@gmail.com"
] | fthbrmnby@gmail.com |
35e5326d1aad1c103b3e76b9efefdd92864a2926 | 45edff14271724c5bf27e62e96eeb635840eae22 | /ML/ensemble_learning/util.py | d998161fe6c0a48ae7207841cc63d1e0147b0db8 | [] | no_license | DaiJitao/machine_learning | 1e41208dc94836a97e57a4b0f5778f8da2bb81d4 | 49e1db9ecbfbf886a11ce416eea402d214cf2049 | refs/heads/master | 2021-06-25T23:52:06.066315 | 2021-02-07T16:17:50 | 2021-02-07T16:17:50 | 209,712,507 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 347 | py |
"""
决策树常用的工具类:指标的计算、数据的加载
"""
import numpy as np
def load_data():
'''
根据《统计学习方法》第八章8.1.3产生数据.
:return:
'''
dataset_label = np.array([[0, 1], [1, 1], [2, 1], [3, -1], [4, -1], [5, -1], [6, 1], [7, 1], [8, 1], [9, -1]])
return dataset_label
| [
"hejinrong@news.cn"
] | hejinrong@news.cn |
1d33a72c9dcc9eadff29cb6b67e3cceb03f68a5f | 72aeb6cbad1a595e656c1469df3e27a1794d9542 | /Tarea/repaso/repaso/settings.py | dd312496f36add46bcc6cd0e3aa79d8963291163 | [] | no_license | monicanicole/DjangoRest-Angular | 87f3b83b2dd2ee035d943c04db8ebb4b3614ef3d | 1a2db5a0ae93a9fb7b800f3be0f5e92aab76aee3 | refs/heads/master | 2020-12-25T11:15:26.871064 | 2016-07-21T03:53:30 | 2016-07-21T03:53:30 | 63,833,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,352 | py | """
Django settings for repaso project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qa6ipqpw$4arzr!u%273@odw0^emkj^&p98r##0si=g4rl^ccf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'administrar',
'rest_framework',
'servicioweb',
'corsheaders',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'repaso.urls'
CORS_ORIGIN_ALLOW_ALL = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,"template")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'repaso.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"monicanicole881@gmail.com"
] | monicanicole881@gmail.com |
2d62061754e25389e9568ddda07c4f09a77ac25f | db970f92ec15ff2a4221079d5b3c16c4000f3a2d | /tpot_secom_best.py | 1cf3dbfb67de19be280f5ab4a940e558e2d7a423 | [] | no_license | Ranga2904/AzureML_TS_SECOM | 390fab7b28b558bd76f5c78d14a1927f0f7197ca | 67aac95bd11a75beb1385365214aab5cfa0ba4a9 | refs/heads/main | 2023-03-19T03:24:39.160640 | 2021-03-14T15:24:27 | 2021-03-14T15:24:27 | 347,672,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from tpot.export_utils import set_param_recursive
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'], random_state=1)
# Average CV score on the training set was: 0.9106825452925345
exported_pipeline = make_pipeline(
StackingEstimator(estimator=RandomForestClassifier(bootstrap=False, criterion="gini", max_features=0.3, min_samples_leaf=17, min_samples_split=12, n_estimators=100)),
StackingEstimator(estimator=ExtraTreesClassifier(bootstrap=True, criterion="entropy", max_features=0.4, min_samples_leaf=19, min_samples_split=20, n_estimators=100)),
ExtraTreesClassifier(bootstrap=True, criterion="gini", max_features=0.5, min_samples_leaf=1, min_samples_split=5, n_estimators=100)
)
# Fix random state for all the steps in exported pipeline
set_param_recursive(exported_pipeline.steps, 'random_state', 1)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
"noreply@github.com"
] | Ranga2904.noreply@github.com |
ebf8f91c4cebdb610c8c71f2511f1d32c8984cf2 | 1099175fcf3dca6d1ae00e5729c954c7838d3cce | /main.py | a34799ae095bf748fbe1221af66112b93a4061f1 | [] | no_license | thenfserver/bot-py | d990e360c8c164d329b77a72096d567d60fef95e | a8e3cbd193679281e9a8cf5dd2ef53bae814363b | refs/heads/main | 2023-04-25T14:42:49.572423 | 2021-05-07T03:29:32 | 2021-05-07T03:29:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | import discord, os, time, random, datetime, asyncio, platform, youtube_dl
from discord.ext import commands
from discord.utils import get
from discord import FFmpegPCMAudio
start_time = time.time()
intents = discord.Intents.default()
intents.members = True
client = commands.Bot(command_prefix =["nf!", "NF!", "Nf!", "nF!", "!"], case_insensitive=True, intents=intents)
TOKEN = ''
client.remove_command('help')
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
print(f"Loaded cog.{filename[:-3]}")
@client.event
async def on_ready():
print(f"Succesfully signed in as {client.user.name} ({client.user.id}).")
channel = client.get_channel(743246479594094693)
embed = discord.Embed(description=f"{client.user.name} has booted on {time.ctime()}.",color=discord.Color.green())
await channel.send(embed=embed)
voice = client.get_channel(830314719356387359)
songsource = random.choice(os.listdir("/root/nf%20songs"))
source = FFmpegPCMAudio(songsource)
await voice.connect()
player = voice.play(source)
async def ch_pr():
await client.wait_until_ready()
fo = open("utils/lists/songs.txt", "r")
song = fo.readlines()
statuses = [f"{random.choice(song)} | nf!help", "nf.lnk.to/clouds"]
while not client.is_closed():
fo = open("utils/lists/songs.txt", "r")
song = fo.readlines()
statuses = [f"{random.choice(song)} | nf!help", "nf.lnk.to/clouds"]
status = random.choice(statuses)
await client.change_presence(activity=discord.Game(name=status))
await asyncio.sleep(30)
client.loop.create_task(ch_pr())
@client.command()
async def info(ctx):
""" The bot's info. """
current_time = time.time()
difference = int(round(current_time - start_time))
text = str(datetime.timedelta(seconds=difference))
embed = discord.Embed(color=discord.Color.green())
embed.set_author(name=f"{client.user.name}'s Info")
embed.set_footer(text=f"Ping {round(client.latency * 1000)}ms | Uptime {text} | Version 2020.20.09")
embed.add_field(name="Developer", value=f"bread#7620", inline=True)
embed.add_field(name="Language", value=f"Python {platform.python_version()}")
embed.add_field(name="Libary", value=f"discord.py {discord.__version__}", inline=True)
embed.add_field(name="Users", value=f"`{len(set(client.get_all_members()))}`", inline=True)
embed.add_field(name="Github", value=f"[Click Here](https://github.com/IronCodez/nfrealbot/)")
embed.add_field(name="Status", value="[Click Here](https://stats.uptimerobot.com/L5ZkxcPQNB)")
await ctx.send(embed=embed)
@client.command()
async def uptime(ctx):
current_time = time.time()
difference = int(round(current_time - start_time))
text = str(datetime.timedelta(seconds=difference))
embed = discord.Embed(color=discord.Color.green(), description=text)
await ctx.send(embed=embed)
client.run(TOKEN, bot=True, reconnect=True)
| [
"noreply@github.com"
] | thenfserver.noreply@github.com |
bbae3698bee755a86e113f6ff4e7d52fe4f8a1ca | 7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a | /.history/DEBER_20210905000023.py | 9516b0bda58c56e4e39bbf9f8a08dc9dc32c935e | [
"MIT"
] | permissive | Alopezm5/PROYECTO-PARTE-1 | a1dce04009b24852c1c60e69bdf602ad3af0574b | bd7a8594edf08d41c6ca544cf6bac01ea4fcb684 | refs/heads/main | 2023-07-25T11:22:17.994770 | 2021-09-07T03:27:34 | 2021-09-07T03:27:34 | 403,670,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,447 | py | import os
class Empresa():
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr=""):
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("")
print("Empresa")
print("La empresa de nombre {}\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de {}\n Es una entidad {}".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Empleado(Empresa):
def __init__(self,nom="",cedu=0,dire="",tele=0,email="",estado="",profe=""):
self.nombre=nom
self.cedula=cedu
self.direccion=dire
self.telefono=tele
self.correo=email
self.estadocivil=estado
self.profesion=profe
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula del empleado: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {}".format(self.nombre,self.cedula,self.direccion,self.telefono,self.correo))
class Departamento(Empleado):
def __init__(self,dep=""):
self.departamento=dep
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: {}".format(self.departamento))
class Pagos(Empleado):
def __init__(self, desper=0,valhora=0,hotraba=0,extra=0,suel=0,hrecar=0,hextra=0,pres=0,mcou=0,valho=0,sobtiem=0,comofi=0,antobre=0,iemple=0,cuopres=0,tot=0,liquid=0,cuota=0,anti=0,comi=0,fNomina="",fIngreso="",iess=0):
self.permisos=desper
self.valorhora=valhora
self.horastrabajadas=hotraba
self.valextra=extra
self.sueldo= suel
self.horasRecargo= hrecar
self.horasExtraordinarias=hextra
self.prestamo= pres
self.mesCuota= mcou
self.valor_hora= valho
self.sobretiempo=sobtiem
self.comEmpOficina = comofi
self.antiEmpObrero = antobre
self.iessEmpleado = iemple
self.cuotaPrestamo=cuopres
self.totdes = tot
self.liquidoRecibir = liquid
self.mesCuota=cuota
self.antiguedad=anti
self.comision=comi
self.fechaNomina=fNomina
self.fechaIngreso=fIngreso
self.iess=iess
def pagoNormal(self):
self.sueldo=float(input("Ingresar sueldo del trabajador: $ "))
self.prestamo=float(input("Ingresar monto del prestamo que ha generado el empleado: $ "))
self.mesCuota=int(input("Ingresar meses a diferir el prestamo: "))
self.comision=float(input("Ingresar valor de la comsion: "))
self.antiguedad=int(input("Ingresar antiguedad: "))
self.iess=float(input("Ingresar valor del iees recordar que debe ser porcentuado Ejemplo si quiere decir 20% debe ingresar 0.20"))
def pagoExtra(self):
self.horasRecargo=int(input("Ingresar horas de recargo: "))
self.horasExtraordinarias=int(input("Ingresar horas extraordinarias: "))
self.fechaNomina=float(input("Ingresar fecha de nomida (formato año-mes-dia): "))
self.fechaIngreso=float(input("Ingresar fecha de ingreso (formato año-mes-dia): "))
def calculoSueldo(self):
self.valor_hora=self.sueldo/240
self.sobretiempo= self.valor_hora * (self.horasRecargo*0.50+self.horasExtraordinarias*2)
self.comEmpOficina = self.comision*self.sueldo
self.antiEmpObrero = self.antiguedad*(self.fechaNomina - self.fechaIngreso)/365*self.sueldo
self.iessEmpleado = self.iess*(self.sueldo+self.sobretiempo)
self.cuotaPrestamo=self.prestamo/self.mesCuota
self.toting = self.sueldo+self.sobretiempo+ self.comEmpOficina + self.antiEmpObrero
self.totdes = self.iessEmpleado + self.prestamo
self.liquidoRecibir = self.toting - self.totdes
def mostrarSueldo(self):
print("El empleado tiene un sueldo de ${}")
emp=Empresa()
emp.datosEmpresa()
os.system ("cls")
emple=Empleado()
emple.empleado()
os.system ("cls")
emple.empleadoObrero()
emple.empleadoOficina()
os.system ("cls")
depa=Departamento()
depa.departa()
pag=Pagos()
pag.pagoNormal()
pag.pagoExtra()
pag.calculoSueldo()
os.system ("cls")
emp.mostrarEmpresa()
print("")
emple.mostrarempleado()
print("")
pag.mostrarSueldo() | [
"85761855+Alopezm5@users.noreply.github.com"
] | 85761855+Alopezm5@users.noreply.github.com |
7dbe4f4d19fda2c3257bb3c276319a61d493f7d9 | a2558e0d92c6f9e3dcd6b12410ba824cb523075d | /app.py | 7d2bb02b84a5bb24ba3b367450d3bbe9100840fb | [] | no_license | agsorganics/agsorganicsbs | 0ea2240ebfd8651bb5a5b61819739d1571c7049c | 21404976ce2ef4d57cacbc093c3d9303ee604cb7 | refs/heads/master | 2022-09-08T21:12:22.435434 | 2020-06-06T08:39:07 | 2020-06-06T08:39:07 | 269,766,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | from flask import Flask, render_template, url_for, request, redirect
import csv
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('index.html')
@app.route('/<string:page_name>')
def html_page(page_name):
return render_template(page_name)
def write_to_csv(data):
with open('database.csv', mode='a') as database:
name = data["name"]
email = data["email"]
address = data["address"]
num = data["num"]
state = data["state"]
country = data["country"]
csv_writer = csv.writer(database, delimiter =',', quotechar ='"', quoting = csv.QUOTE_MINIMAL )
csv_writer.writerow([name,email,address,num,state,country])
@app.route('/submit_form', methods=['POST', 'GET'])
def submit_form():
if request.method == 'POST':
data = request.form.to_dict()
write_to_csv(data)
return redirect('/thanks.html')
else:
return 'try again'
| [
"eolisegun83@gmail.com"
] | eolisegun83@gmail.com |
0d3d361b190d5c8e94559f799d122150bbc60c1d | 0ba2975b23b7c15a804eb5a87490130c83c369f8 | /paper_experiment/compare_oversampling.py | 82420af7b26de0c1809a94789039b0c3cbbe29fe | [] | no_license | IqaHaziqah/on_the_way | eaf3cbda27d9c3935a2a849231133b8883cc3ba0 | bf3ad94535c046337eb0646ab355491f20475eaf | refs/heads/master | 2021-05-20T00:31:38.819902 | 2018-05-28T08:53:52 | 2018-05-28T08:53:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,138 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 8 20:01:28 2018
@author: zhouying
"""
import sys
sys.path.append('vae')
sys.path.append('distribution_ovsampling')
import pandas as pd
import numpy as np
import scipy.io as scio
from myutil2 import create_cross_validation,get_resultNB,compute
from vae6 import mnist_vae
from ndo import normal,smote
from sklearn.naive_bayes import GaussianNB
import sklearn
'''load the dataset'''
dataset = 'ionosphere'
mydata = scio.loadmat('MNIST_data\\UCI\\'+dataset+'.mat')
data = np.array(mydata['data'])
label = np.squeeze(mydata['label'])
para_o = pd.read_pickle('vae\\'+dataset+'.txt')
f1 = open('vae.txt','ab')
f2 = open('ndo.txt','ab')
f3 = open('smo.txt','ab')
result = create_cross_validation([data,label],1,10)
for i in range(1):
train,train_label,test,test_label = result[str(i)]
########vae
ov_vae,_,_ = mnist_vae(train[train_label==1],train.shape[0],para_o)
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(train,np.arange(0,train_label.shape[0]))#求最近邻的编号
pre = model.predict(ov_vae)
info_0 = len(pre[train_label[pre]==0])#生成样本中0类标的个数
info_1 = len(pre[train_label[pre]==1])#生成样本中1类标的个数
pre = model.predict(ov_vae)
pre = np.array(list(set(pre)))
dive_0 = len(pre[train_label[pre]==0])#生成样本中不同的类标0的个数
dive_1 = len(pre[train_label[pre]==1])#生成样本中不同的类标1的个数
train_1 = np.concatenate((train,ov_vae),axis=0)
train_label1 = np.concatenate((train_label,np.ones(ov_vae.shape[0])),axis=0)
gnb = GaussianNB()
y_predne = gnb.fit(train_1,train_label1).predict(test)
y_pro = gnb.predict_proba(test)[:,1]
re = compute(test_label,y_predne,y_pro)
print(info_0,info_1,dive_0,dive_1)
print(re)
# np.savetxt(f1,[info_0,info_1,dive_0,dive_1],fmt='%d')
# np.savetxt(f1,np.array([re]),fmt='%.4f')
#######ndo
ov_ndo,_,_ = normal(train,100)
# ov_ndo,_,_ = mnist_vae(train[train_label==1],train.shape[0],para_o)
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(train,np.arange(0,train_label.shape[0]))#求最近邻的编号
pre = model.predict(ov_ndo)
info_0 = len(pre[train_label[pre]==0])#生成样本中0类标的个数
info_1 = len(pre[train_label[pre]==1])#生成样本中1类标的个数
pre = model.predict(ov_ndo)
pre = np.array(list(set(pre)))
dive_0 = len(pre[train_label[pre]==0])
dive_1 = len(pre[train_label[pre]==1])
train_1 = np.concatenate((train,ov_ndo),axis=0)
train_label1 = np.concatenate((train_label,np.ones(ov_ndo.shape[0])),axis=0)
gnb = GaussianNB()
y_predne = gnb.fit(train_1,train_label1).predict(test)
y_pro = gnb.predict_proba(test)[:,1]
re = compute(test_label,y_predne,y_pro)
print(info_0,info_1,dive_0,dive_1)
print(re)
# np.savetxt(f2,[info_0,info_1,dive_0,dive_1],fmt='%d')
# np.savetxt(f2,np.array([re]),fmt='%.4f') #get_resultNB(1,result,ov_ndo)
#####smote
ov_smo,_,_ = smote(train)
# ov_smo,_,_ = mnist_vae(train[train_label==1],train.shape[0],para_o)
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(train,np.arange(0,train_label.shape[0]))#求最近邻的编号
pre = model.predict(ov_smo)
info_0 = len(pre[train_label[pre]==0])#生成样本中0类标的个数
info_1 = len(pre[train_label[pre]==1])#生成样本中1类标的个数
pre = model.predict(ov_smo)
pre = np.array(list(set(pre)))
dive_0 = len(pre[train_label[pre]==0])
dive_1 = len(pre[train_label[pre]==1])
train_1 = np.concatenate((train,ov_smo),axis=0)
train_label1 = np.concatenate((train_label,np.ones(ov_smo.shape[0])),axis=0)
gnb = GaussianNB()
y_predne = gnb.fit(train_1,train_label1).predict(test)
y_pro = gnb.predict_proba(test)[:,1]
re = compute(test_label,y_predne,y_pro)
print(info_0,info_1,dive_0,dive_1)
print(re)
# np.savetxt(f3,[info_0,info_1,dive_0,dive_1],fmt='%d')
# np.savetxt(f3,np.array([re]),fmt='%.4f')
f1.close()
f2.close()
f3.close() | [
"442049887@qq.com"
] | 442049887@qq.com |
8fb9a5d00e42f21928065cf01922f2705535718d | 073d700d517cfc6f87911be9ec153e012e16398f | /cupcakery/urls.py | f6eb7084554302f6711a93af2ccf3652b2d32c92 | [] | no_license | Baranix/cupcakery | 76686127a1499dc56cf293cd1b37dd4cbd079205 | 052d873196398b55d6bd65f452f55c64414f5c83 | refs/heads/master | 2021-01-17T17:59:37.157488 | 2016-10-21T08:39:10 | 2016-10-21T08:39:10 | 70,790,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | """cupcakery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^store/', include('store.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"nikki.ebora@gmail.com"
] | nikki.ebora@gmail.com |
85559cad23bef27acfad5460f0f619b730ca763d | 93720fa8240ed31835d53480a1db31519e5f22ea | /src/contest/migrations/setup_keyspaces.py | 1c9c5af1b564e9ce90742925a7f1c1b588b62935 | [] | no_license | riccitensor/contest-py | 788075916bbc6d78c8280977d542f78446151bef | c32f0321bd5819df9658cbeeb368aa70f3245af2 | refs/heads/master | 2021-01-25T08:55:23.822311 | 2012-06-11T19:44:05 | 2012-06-11T19:44:05 | 9,649,980 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | '''
Created on 25.12.2011
@author: christian.winkelmann@plista.com
'''
from contest.config import config_global
from contest.config import config_local
import cql
class Setup_Keyspaces(object):
def __init__(self):
dbconn = cql.connect(config_local.cassandra_host, config_local.cassandra_port )
cursor = dbconn.cursor()
try:
cql_query = """ DROP KEYSPACE :keyspace; """
cursor.execute(cql_query, dict(keyspace = config_global.cassandra_default_keyspace))
except cql.ProgrammingError as programmingError:
print cql_query
print programmingError
try:
cql_query = """ CREATE KEYSPACE :keyspace WITH strategy_class = 'SimpleStrategy'
AND strategy_options:replication_factor = 1; """
cursor.execute(cql_query, dict(keyspace = config_global.cassandra_default_keyspace))
except cql.ProgrammingError as programmingError:
print cql_query
print programmingError
if __name__ == '__main__':
sK = Setup_Keyspaces() | [
"christian.winkelmann@plista.com"
] | christian.winkelmann@plista.com |
6d61713b8a648e26e0264e207a0bcba10f35cc49 | bcdd32e48435fdbcc717b300be34bccec188404f | /catkin_ws/src/camera_motor/Prediction.py | 8df13d9108796420a47c80b50af01f017da555de | [] | no_license | tuf22191/Senior_Design_Project_Spring_2017 | 8ce899a2138747f4aca9ec7f5e0f6d94dd1d56de | 21eff0ff5c2129b56c3d99ad01b9b49ffb2affbd | refs/heads/master | 2021-01-11T20:08:39.739420 | 2017-03-31T00:48:21 | 2017-03-31T00:48:21 | 79,049,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,980 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from camera_motor/Prediction.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
#import genpy
import std_msgs.msg
class Prediction(genpy.Message):
_md5sum = "f251b6023fb3143f56d892530c9c6948"
_type = "camera_motor/Prediction"
_has_header = False #flag to mark the presence of a Header object
_full_text = """std_msgs/Time msg_sent_time
float64 x_vel
float64 y_vel
std_msgs/Duration time_to_impact
================================================================================
MSG: std_msgs/Time
time data
================================================================================
MSG: std_msgs/Duration
duration data
"""
__slots__ = ['msg_sent_time','x_vel','y_vel','time_to_impact']
_slot_types = ['std_msgs/Time','float64','float64','std_msgs/Duration']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
msg_sent_time,x_vel,y_vel,time_to_impact
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Prediction, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.msg_sent_time is None:
self.msg_sent_time = std_msgs.msg.Time()
if self.x_vel is None:
self.x_vel = 0.
if self.y_vel is None:
self.y_vel = 0.
if self.time_to_impact is None:
self.time_to_impact = std_msgs.msg.Duration()
else:
self.msg_sent_time = std_msgs.msg.Time()
self.x_vel = 0.
self.y_vel = 0.
self.time_to_impact = std_msgs.msg.Duration()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_2I2d2i.pack(_x.msg_sent_time.secs, _x.msg_sent_time.nsecs, _x.x_vel, _x.y_vel, _x.time_to_impact.secs, _x.time_to_impact.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.msg_sent_time is None:
self.msg_sent_time = std_msgs.msg.Time()
if self.time_to_impact is None:
self.time_to_impact = std_msgs.msg.Duration()
end = 0
_x = self
start = end
end += 32
(_x.msg_sent_time.secs, _x.msg_sent_time.nsecs, _x.x_vel, _x.y_vel, _x.time_to_impact.secs, _x.time_to_impact.nsecs,) = _struct_2I2d2i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_2I2d2i.pack(_x.msg_sent_time.secs, _x.msg_sent_time.nsecs, _x.x_vel, _x.y_vel, _x.time_to_impact.secs, _x.time_to_impact.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.msg_sent_time is None:
self.msg_sent_time = std_msgs.msg.Time()
if self.time_to_impact is None:
self.time_to_impact = std_msgs.msg.Duration()
end = 0
_x = self
start = end
end += 32
(_x.msg_sent_time.secs, _x.msg_sent_time.nsecs, _x.x_vel, _x.y_vel, _x.time_to_impact.secs, _x.time_to_impact.nsecs,) = _struct_2I2d2i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_2I2d2i = struct.Struct("<2I2d2i")
| [
"tuf22191@temple.edu"
] | tuf22191@temple.edu |
e8fd309c9d59ebfbf16e05e16b272c2b4b073b2a | c3470e984f3c27766f16da46dde1467004469c1f | /venv/lib/python3.7/base64.py | 551d86a0e7946c38b9135752f36068e2ff83adcf | [] | no_license | binwei-yu/zqweb | e647077b320d14988efda60af224a37e50cb19fc | 2e3036dd254230272614fafd375e5142aacdb9d5 | refs/heads/master | 2020-05-07T22:04:13.020298 | 2019-04-25T20:14:15 | 2019-04-25T20:14:15 | 180,927,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | /Users/zhouqi/anaconda3/lib/python3.7/base64.py | [
"zqcarlos@umich.edu"
] | zqcarlos@umich.edu |
6a730ff82c333d93882e1a954aba3e8f1b3fef01 | 5d3b79b7f823a7c66a61c065be83cced6073731d | /Basics/Tuple.py | 751640c9af345dd68ec4ec62ef35c22c54f63280 | [] | no_license | gtripti/PythonBasics | e4d548c34fdfd47c38f59a44a53750295a59b736 | 2a13178001888ce3093e253ed49203b958489472 | refs/heads/master | 2020-06-25T11:46:45.559838 | 2019-08-24T19:16:15 | 2019-08-24T19:16:15 | 199,299,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | t = (1,2,3)
l=[1,2,3]
print(type(t))
print(type(l))
print(len(t))
t = ('one',2)
# slicing and indexing
print(t[0])
print(t[-1])
# build in methods
# 1.count
t=('a','a','b')
print(t.count('a'))
# 2.index
print(t.index('a'))
# immutability possible with list but not with tuple
l = [1,2,3]
print(l)
l[0] = 'NEW'
print(l)
print(t)
t[0] = 'NEW' | [
"tripti.gupta97@gmail.com"
] | tripti.gupta97@gmail.com |
8cf0710d3d1e894a8ffc673a018df78e74505973 | 81e84e22e5d8ce033499d382c584a57acd8af1d3 | /seconde_app/views.py | 984937b337e251b2cfde0a28b1a4f59465096650 | [] | no_license | LakhanKumarGautam/travello | 7525f3edd352995d2c2d85f40043102df78c9153 | 8ca9be9f211039f38176b2cdd71a19ccb04a2304 | refs/heads/master | 2022-07-15T09:25:31.386645 | 2020-05-18T18:05:07 | 2020-05-18T18:05:07 | 259,970,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def home2(request):
my_dict = {'hii_Lakhan':"this is my best friend"}
return render(request,'home2.html',context =my_dict)
def add(request):
val1 = int(request.POST['val1'])
val2 = int(request.POST['val2'])
val3 = int(request.POST['val3'])
res = val1 + val2 + val3
return render(request,'result.html',{'result':res}) | [
"50776528+LakhanKumarGautam@users.noreply.github.com"
] | 50776528+LakhanKumarGautam@users.noreply.github.com |
0f31bab85029d70a6e19843c3d32bb3f395b7394 | 395707d0df8dd0df9667401b4bde0b38960b1e24 | /prefect-experiments/flow-of-flows.py | 9994d5303773c859051b69a0523ae07968bcb18b | [] | no_license | agatagawad/prefect-experiments | 539114b68f2ffdb4ec3f856f3b8d90213683440a | 7e4175d02a2e94f5a9af94a97733bf3d0a1c864f | refs/heads/main | 2023-04-02T09:09:28.011771 | 2021-03-27T17:52:10 | 2021-03-27T17:52:10 | 352,137,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,853 | py |
# Example from https://docs.prefect.io/core/idioms/flow-to-flow.html
from prefect import Flow, task
# from prefect.core import task
from prefect.core.parameter import Parameter
from prefect.tasks.prefect import StartFlowRun
@task
def A_task1(val):
return 10*val
@task
def A_task2(val):
return val + 5
with Flow(name='A') as flow_A:
A_param = Parameter('A_param', 2)
x = A_task1(A_param)
y = A_task2(x)
flow_A.register(project_name='examples')
@task
def B_task1(val):
return 20*val
@task
def B_task2(val):
return val + 15
with Flow(name='B') as flow_B:
B_param = Parameter('B_param', 1)
x = B_task1(B_param)
y = B_task2(x)
flow_B.register(project_name='examples')
@task
def C_task1(val):
return 20*val
@task
def C_task2(val):
return val + 15
with Flow(name='C') as flow_C:
C_param = Parameter('C_param', 1)
x = C_task1(C_param)
y = C_task2(x)
flow_C.register(project_name='examples')
@task
def D_task1(val):
return 20*val
@task
def D_task2(val):
return val + 15
@task
def D_task3(x, y, val):
return x + y + val
with Flow(name='D') as flow_D:
C_param = Parameter('D_param', 1)
x = D_task1(D_param)
y = D_task2(x)
z = D_task3(x, y, C_param)
flow_D.register(project_name='examples')
# assumes you have registered the following flows in a project named "examples"
flow_a = StartFlowRun(flow_name="A", project_name="examples", wait=True)
flow_b = StartFlowRun(flow_name="B", project_name="examples", wait=True)
flow_c = StartFlowRun(flow_name="C", project_name="examples", wait=True)
flow_d = StartFlowRun(flow_name="D", project_name="examples", wait=True)
with Flow("parent-flow") as flow:
b = flow_b(upstream_tasks=[flow_a])
c = flow_c(upstream_tasks=[flow_a])
d = flow_d(upstream_tasks=[b, c])
flow.register(project_name='examples') | [
"agata.gawad@yher.be"
] | agata.gawad@yher.be |
082fce6cf017f2b1f42c80cd64d20110852737af | 5e709e364397d8e26a8c188057b544d44b9fa2d5 | /blog/migrations/0001_initial.py | 579fcc154447c379e858f2cdbda9709ceeaed6f2 | [] | no_license | cdavis0119/my-first-blog | 56136df863f04eacb283643884e7c638d0a0da7a | 8d526cf4bf68b7bf9e9e18e544b188baabc698f2 | refs/heads/master | 2021-01-01T17:19:53.969453 | 2017-07-22T22:06:35 | 2017-07-22T22:06:35 | 98,050,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-22 18:06
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"courtneyldavis19@gmail.com"
] | courtneyldavis19@gmail.com |
46ca9958a730d18a7f5981a994caa4ea011f3532 | 75388db141483f6aa8994df4f97e83584b93e50e | /movie/movie_app/migrations/0001_initial.py | b7a48040292ddc91dcda7436e9623e4bcc3b592a | [] | no_license | bonrg/movies | 8a2c8b44b3525cce1633b163e4879a9c8d03c3d6 | e1703a3a887d87a5524e9d71c51700b83aa7e984 | refs/heads/master | 2022-11-29T06:26:01.097169 | 2020-03-25T07:26:55 | 2020-03-25T07:26:55 | 248,919,260 | 0 | 0 | null | 2022-11-22T05:24:58 | 2020-03-21T06:22:42 | HTML | UTF-8 | Python | false | false | 7,010 | py | # Generated by Django 3.0.4 on 2020-03-21 09:23
import datetime
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Actor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('age', models.PositiveSmallIntegerField(default=0, verbose_name='Age')),
('description', models.TextField(verbose_name='Description')),
('image', models.ImageField(upload_to='actors/', verbose_name='Image')),
],
options={
'verbose_name': 'Actors and Producers',
'verbose_name_plural': 'Actors and Producers',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='Category')),
('description', models.TextField(verbose_name='Description')),
('url', models.SlugField(max_length=160, unique=True)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('description', models.TextField(verbose_name='Description')),
('url', models.SlugField(max_length=160, unique=True)),
],
options={
'verbose_name': 'Genre',
'verbose_name_plural': 'Genres',
},
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('tagline', models.CharField(default='', max_length=100, verbose_name='Slogan')),
('description', models.TextField(verbose_name='Description')),
('poster', models.ImageField(upload_to='movies/', verbose_name='Poster')),
('year', models.PositiveSmallIntegerField(default=2019, verbose_name='Issue date')),
('country', models.CharField(max_length=30, verbose_name='Country')),
('world_premiere', models.DateField(default=datetime.date.today, verbose_name='Premiere in world')),
('budget', models.PositiveIntegerField(default=0, help_text='in dollars', verbose_name='Budget')),
('fees_in_usa', models.PositiveIntegerField(default=0, help_text='in dollars', verbose_name='Fees in USA')),
('fees_in_world', models.PositiveIntegerField(default=0, help_text='in dollars', verbose_name='Fees in world')),
('url', models.SlugField(max_length=160, unique=True)),
('draft', models.BooleanField(default=False, verbose_name='Draft')),
('actors', models.ManyToManyField(related_name='film_actor', to='movie_app.Actor', verbose_name='actors')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='movie_app.Category', verbose_name='Category')),
('directors', models.ManyToManyField(related_name='film_director', to='movie_app.Actor', verbose_name='producer')),
('genres', models.ManyToManyField(to='movie_app.Genre', verbose_name='genres')),
],
options={
'verbose_name': 'Movie',
'verbose_name_plural': 'Movies',
},
),
migrations.CreateModel(
name='RatingStar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.PositiveSmallIntegerField(default=0, verbose_name='Value')),
],
options={
'verbose_name': 'Star rating',
'verbose_name_plural': 'Stars rating',
},
),
migrations.CreateModel(
name='Reviews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('name', models.CharField(max_length=100, verbose_name='Name')),
('text', models.TextField(max_length=5000, verbose_name='Message')),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movie_app.Movie', verbose_name='movie')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='movie_app.Reviews', verbose_name='Parent')),
],
options={
'verbose_name': 'Review',
'verbose_name_plural': 'Reviews',
},
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=15, verbose_name='IP address')),
('movie', models.ForeignKey(on_delete=django.db.models.fields.CharField, to='movie_app.Movie', verbose_name='movie')),
('star', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movie_app.RatingStar', verbose_name='star')),
],
options={
'verbose_name': 'Rating',
'verbose_name_plural': 'Ratings',
},
),
migrations.CreateModel(
name='MovieShots',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('description', models.TextField(verbose_name='Description')),
('image', models.ImageField(upload_to='movie_shots/', verbose_name='Image')),
('movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movie_app.Movie', verbose_name='Movie')),
],
options={
'verbose_name': 'Shot on movie',
'verbose_name_plural': 'Shots on movie',
},
),
]
| [
"a.uderbay@kazdream.kz"
] | a.uderbay@kazdream.kz |
9f08f0ce81f15f2afdcd8017b72a7d1a9acf39fd | 41976606488ba795e201c05cccdc4c39a3015875 | /app/views/perfil_views.py | 74cf6feb731dcf167f44950f98c2a49d16374f39 | [] | no_license | andersonvaler/capstone-backend-Q3-python | c16158f52ef57b47cb0e36d6f929ee3ef1c41fbf | 8cf10943e2ca98939e55f53c0a60fad882e0bc32 | refs/heads/main | 2023-06-23T10:44:30.524647 | 2021-07-24T16:24:03 | 2021-07-24T16:24:03 | 389,144,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | from app.models.lojistas_model import Lojistas
from flask_jwt_extended import jwt_required
from app.models.clientes_model import Clientes
from flask import Blueprint, jsonify
bp = Blueprint("bp_perfil", __name__)
@bp.get("/lojistas/<int:lojista_id>")
@jwt_required()
def get_lojista_id(lojista_id):
lojista = Lojistas.query.filter_by(id=lojista_id).first()
if not lojista:
return {"Error": "Lojista não encontrado."}, 404
return jsonify(lojista.serialized)
@bp.get("/clientes/<int:cliente_id>")
@jwt_required()
def get_cliente_id(cliente_id):
cliente = Clientes.query.filter_by(id=cliente_id).first()
if not cliente:
return {"Error": "Cliente não encontrado."}, 404
return jsonify(cliente.serialized)
@bp.get("/clientes")
@jwt_required()
def get_all_clientes():
clientes = Clientes.query.all()
data = [cliente.serialized for cliente in clientes]
return jsonify(data)
@bp.get("/lojistas")
@jwt_required()
def get_all_lojistas():
lojistas = Lojistas.query.all()
data = [lojista.serialized for lojista in lojistas]
return jsonify(data)
| [
"andersonvaler@gmail.com"
] | andersonvaler@gmail.com |
3dca43e0102cde8dac9752705559f1b75cccde3d | 8203e42d18ea718302d19029b1df8a344d3a4ad9 | /quality/views.py | 75475e725155f9ebfceda9adf97f6b5ae7c31e0a | [] | no_license | sbsimo/quality | 07c6774d352f753aa11edc441c232dd507b53bf9 | a463cca3b223e8b135b7079c4aec9623e8b149fd | refs/heads/master | 2021-01-25T08:55:16.111596 | 2012-06-14T14:12:34 | 2012-06-14T14:12:34 | 2,779,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,129 | py | from geonode import settings
from geonode.maps.views import _perms_info, MAP_LEV_NAMES, _perms_info_json, \
LAYER_LEV_NAMES, _describe_layer
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.http import HttpResponse
from geonode.maps.models import Map, MapLayer, Layer
import json
from django.template import RequestContext, loader, Context
from django.utils.translation import ugettext as _
#from cartography.models import Document
from django.contrib.auth.decorators import login_required
from geonode.maps.views import default_map_config
from django.views.decorators.csrf import csrf_exempt
from quality.models import Subtopic, LayerSubtopic, QualityMatrix
#imgtypes = ['jpg','jpeg','tif','tiff','png','gif']
#def documentdetail(request, docid):
# """
# The view that show details of each document
# """
# document = get_object_or_404(Document, pk=docid)
# map = document.maps.all()[0]
# if not request.user.has_perm('maps.view_map', obj=map):
# return HttpResponse(loader.render_to_string('401.html',
# RequestContext(request, {'error_message':
# _("You are not allowed to view this map.")})), status=401)
# return render_to_response("cartography/docinfo.html", RequestContext(request, {
# 'map': map,
# 'permissions_json': json.dumps(_perms_info(map, MAP_LEV_NAMES)),
# 'document': document,
# 'imgtypes': imgtypes
# }))
#def newmaptpl(request):
# config = default_map_config()[0]
# return render_to_response('cartography/newmaptpl.html',RequestContext(request, {'config':json.dumps(config)}))
#@login_required
#def upload_document(request,mapid=None):
# if request.method == 'GET':
# return render_to_response('cartography/document_upload.html',
# RequestContext(request,{'mapid':mapid,}),
# context_instance=RequestContext(request)
# )
# elif request.method == 'POST':
# mapid = str(request.POST['map'])
# file = request.FILES['file']
# title = request.POST['title']
# document = Document(title=title, file=file)
# document.save()
# document.maps.add(Map.objects.get(id=mapid))
# return HttpResponse(json.dumps({'success': True,'redirect_to':'/maps/' + str(mapid)}))
@csrf_exempt
def layerController(request, layername):
DEFAULT_MAP_CONFIG, DEFAULT_BASE_LAYERS = default_map_config()
layer = get_object_or_404(Layer, typename=layername)
if (request.META['QUERY_STRING'] == "describe"):
return _describe_layer(request,layer)
if (request.META['QUERY_STRING'] == "remove"):
return _removeLayer(request,layer)
if (request.META['QUERY_STRING'] == "update"):
return _updateLayer(request,layer)
if (request.META['QUERY_STRING'] == "style"):
return _changeLayerDefaultStyle(request,layer)
else:
if not request.user.has_perm('maps.view_layer', obj=layer):
return HttpResponse(loader.render_to_string('401.html',
RequestContext(request, {'error_message':
_("You are not permitted to view this layer")})), status=401)
metadata = layer.metadata_csw()
maplayer = MapLayer(name = layer.typename, ows_url = settings.GEOSERVER_BASE_URL + "wms")
# center/zoom don't matter; the viewer will center on the layer bounds
map = Map(projection="EPSG:900913")
qualityRecord = layer.qualitymatrix
return render_to_response('quality/layer.html', RequestContext(request, {
"layer": layer,
"metadata": metadata,
"viewer": json.dumps(map.viewer_json(* (DEFAULT_BASE_LAYERS + [maplayer]))),
"permissions_json": _perms_info_json(layer, LAYER_LEV_NAMES),
"GEOSERVER_BASE_URL": settings.GEOSERVER_BASE_URL,
"qualityRecord": qualityRecord
}))
GENERIC_UPLOAD_ERROR = _("There was an error while attempting to upload your data. \
Please try again, or contact and administrator if the problem continues.")
def listSubtopics(request):
# access to the table that contains the list of subtopics
allSubtopics = Subtopic.objects.all()
return render_to_response('quality/subtopics.html', RequestContext(request, {
'allSubs' : allSubtopics,
}))
def ask4weights(request):
if request.method == 'GET':
subtopic_pk = request.GET.__getitem__("subtopic")[0]
subtopic = Subtopic.objects.get(pk=subtopic_pk)
return render_to_response('quality/ask4weights.html', RequestContext(request, {
'subtopic': subtopic,
'subtopic_pk': subtopic_pk,
}))
else:
return HttpResponse(loader.render_to_string('401.html',
RequestContext(request, {'error_message':
_("You are not permitted to view this layer")})), status=401)
def calculateBest(request):
if request.method == 'GET':
# get the weights input by the client
weightVector = [request.GET.__getitem__("geographicExtent")]
weightVector.append(request.GET.__getitem__("licensingConstraint"))
weightVector.append(request.GET.__getitem__("scaleDenominator"))
weightVector.append(request.GET.__getitem__("update"))
weightVector.append(request.GET.__getitem__("temporalExtent"))
weightVector.append(request.GET.__getitem__("fitness4use"))
weightVector.append(request.GET.__getitem__("thematicRichness"))
weightVector.append(request.GET.__getitem__("integration"))
weightVector.append(request.GET.__getitem__("dataIntegrity"))
weightVector.append(request.GET.__getitem__("positionalAccuracy"))
weightVector.append(request.GET.__getitem__("thematicAccuracy"))
weightVector.append(request.GET.__getitem__("completeness"))
# get the subtopic and the set of related layersubtopics
subtopic_id = request.GET.__getitem__("subtopic_pk")
subtopic = Subtopic.objects.get(pk=subtopic_id)
layersubtopics = subtopic.layersubtopic_set.all()
# generate a dictionary needed for storing the results of the total score calculation
results = []
# loop on layersubtopics in order to calculate the total score for each one
# and store them into the newly created dictionary
for layersubtopic in layersubtopics:
currentLayer = layersubtopic.layer
qualityVector = QualityMatrix.objects.get(layer=currentLayer)
# calculate the quality total score of the layer
currentScore = 0
unWeightedScore = 0
currentScore = qualityVector.geographicExtent*int(weightVector[0]) +\
qualityVector.licensingConstraint*int(weightVector[1])+\
qualityVector.scaleDenominator*int(weightVector[2])+\
qualityVector.update*int(weightVector[3])+\
qualityVector.temporalExtent*int(weightVector[4])+\
qualityVector.fitness4Use*int(weightVector[5])+\
qualityVector.thematicRichness*int(weightVector[6])+\
qualityVector.integration*int(weightVector[7])+\
qualityVector.dataIntegrity*int(weightVector[8])+\
qualityVector.positionalAccuracy*int(weightVector[9])+\
qualityVector.thematicAccuracy*int(weightVector[10])+\
qualityVector.completeness*int(weightVector[11])
unWeightedScore = qualityVector.geographicExtent +\
qualityVector.licensingConstraint + qualityVector.scaleDenominator +\
qualityVector.update + qualityVector.temporalExtent + \
qualityVector.fitness4Use + qualityVector.thematicRichness + \
qualityVector.integration + qualityVector.dataIntegrity + \
qualityVector.positionalAccuracy + qualityVector.thematicAccuracy + \
qualityVector.completeness
curLayerId = layersubtopic.layer.id
curLayerName = Layer.objects.get(id=curLayerId)
results.append([curLayerName, currentScore, unWeightedScore])
return render_to_response("quality/layerRanking.html", RequestContext(request, {
"results" : results,
}))
# winnerLayer = Layer.objects.get(id=winner_layer_id)
# layername = winnerLayer.typename
# return redirect("/data/" + layername)
# return layerController(request, layername)
# return render_to_response('quality/temp.html', RequestContext(request, {
# 'weightVector': weightVector,
# 'layername': layername,
# }))
else:
return HttpResponse(loader.render_to_string('401.html',
RequestContext(request, {'error_message':
_("You are not permitted to view this layer")})), status=401)
| [
"simone.blb@gmail.com"
] | simone.blb@gmail.com |
b06c0a336f7918f4804bc29c80b8474a18f07c42 | 3980219a237537ffbb2c1bdb25cbc606e2bc76dd | /teuthology/suite/placeholder.py | 4669c5faa101c6747fff433b1afbc57436791754 | [
"MIT"
] | permissive | dzedro/teuthology | 546ff04c906aaa8a846ff046a977ac55194e7494 | ed015732753d7564157f9f45c1fb1b868f88574d | refs/heads/master | 2020-03-17T21:03:07.971902 | 2018-06-04T12:29:23 | 2018-06-04T12:29:23 | 133,941,039 | 0 | 0 | MIT | 2018-05-18T10:38:30 | 2018-05-18T10:38:29 | null | UTF-8 | Python | false | false | 3,371 | py | import copy
class Placeholder(object):
"""
A placeholder for use with substitute_placeholders. Simply has a 'name'
attribute.
"""
def __init__(self, name):
self.name = name
def substitute_placeholders(input_dict, values_dict):
"""
Replace any Placeholder instances with values named in values_dict. In the
case of None values, the key is omitted from the result.
Searches through nested dicts.
:param input_dict: A dict which may contain one or more Placeholder
instances as values.
:param values_dict: A dict, with keys matching the 'name' attributes of all
of the Placeholder instances in the input_dict, and
values to be substituted.
:returns: The modified input_dict
"""
input_dict = copy.deepcopy(input_dict)
def _substitute(input_dict, values_dict):
for key, value in input_dict.items():
if isinstance(value, dict):
_substitute(value, values_dict)
elif isinstance(value, Placeholder):
if values_dict[value.name] is None:
del input_dict[key]
continue
# If there is a Placeholder without a corresponding entry in
# values_dict, we will hit a KeyError - we want this.
input_dict[key] = values_dict[value.name]
return input_dict
return _substitute(input_dict, values_dict)
# Template for the config that becomes the base for each generated job config
dict_templ = {
'branch': Placeholder('ceph_branch'),
'sha1': Placeholder('ceph_hash'),
'teuthology_branch': Placeholder('teuthology_branch'),
'archive_upload': Placeholder('archive_upload'),
'archive_upload_key': Placeholder('archive_upload_key'),
'machine_type': Placeholder('machine_type'),
'nuke-on-error': True,
'os_type': Placeholder('distro'),
'os_version': Placeholder('distro_version'),
'overrides': {
'admin_socket': {
'branch': Placeholder('ceph_branch'),
},
'ceph': {
'conf': {
'mon': {
'debug mon': 20,
'debug ms': 1,
'debug paxos': 20},
'osd': {
'debug filestore': 20,
'debug journal': 20,
'debug ms': 1,
'debug osd': 25
}
},
'log-whitelist': ['slow request'],
'sha1': Placeholder('ceph_hash'),
},
'ceph-deploy': {
'conf': {
'client': {
'log file': '/var/log/ceph/ceph-$name.$pid.log'
},
'mon': {
'osd default pool size': 2
}
}
},
'install': {
'ceph': {
'sha1': Placeholder('ceph_hash'),
}
},
'workunit': {
'sha1': Placeholder('ceph_hash'),
}
},
'repo': Placeholder('ceph_repo'),
'suite': Placeholder('suite'),
'suite_repo': Placeholder('suite_repo'),
'suite_relpath': Placeholder('suite_relpath'),
'suite_branch': Placeholder('suite_branch'),
'suite_sha1': Placeholder('suite_hash'),
'tasks': [],
}
| [
"ncutler@suse.com"
] | ncutler@suse.com |
44c17b4c002a53f8d2f4fddad9e6dde17fc7dbcb | dbebe89c24d43b54d5b0a83f04b5d5d753f08b0e | /webapi/sandbox/testapp_Https_Sim_Fianancial.py | 6511a410e12e5200af33ed8c6e86ea7603209cad | [] | no_license | jianhuayan/traffic-dispersive | 91d07bd82c86678922607624a47ff825abd64c72 | 8a5ee0272ed4a16c3c50c36b070e1100466a0a42 | refs/heads/master | 2021-07-10T09:46:19.702409 | 2017-10-13T15:24:31 | 2017-10-13T15:24:31 | 106,841,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,452 | py | from ixia.webapi import *
import ixchariotApi
import os
from subprocess import call
import dvn
IxiaIPaddr = dvn.const.IxiachariotIP
# webServerAddress = "https://'dvn.const.IxiachariotIP'"
webServerAddress = "https://" + IxiaIPaddr
print dvn.const.IxiachariotIP
print webServerAddress
apiVersion = "v1"
username = "N/A"
password = "N/A"
apiKey = "e31589d3-4cf1-4bd9-854d-18e9eac768a8" # Get the API Key from the web interface, Menu > My Account > Api Key
print "Connecting to " + webServerAddress
# api = webApi.connect(webServerAddress, apiVersion, None, username, password)
# It is also possible to connect with the API Key instead of username and password, using:
api = webApi.connect(webServerAddress, apiVersion, apiKey, None, None)
session = api.createSession("ixchariot")
print "Created session %s" % session.sessionId
print "Starting the session..."
session.startSession()
print "Configuring the test..."
# Configure few test options
testOptions = session.httpGet("config/ixchariot/testOptions")
testOptions.testDuration = 20
testOptions.consoleManagementQoS = ixchariotApi.getQoSTemplateFromResourcesLibrary(session, "Best Effort")
testOptions.endpointManagementQoS = ixchariotApi.getQoSTemplateFromResourcesLibrary(session, "Best Effort")
session.httpPut("config/ixchariot/testOptions", data = testOptions)
# Available endpoints used in test (list of 'testIP/mgmtIP' strings)
src_EndpointsList = [dvn.const.IxiaEpoint1 + "/" + dvn.const.IxiaMgmt1]
dst_EndpointsList = [dvn.const.IxiaEpoint2 + "/" + dvn.const.IxiaMgmt2]
# Create a new ApplicationMix
name = "AppMix 1"
objective = "USERS"
users = 1
direction = "SRC_TO_DEST"
topology = "FULL_MESH"
appmix = ixchariotApi.createApplicationMix(name, objective, users, direction, topology)
session.httpPost("config/ixchariot/appMixes", data = appmix)
# Configure endpoints for the AppMix
# This demonstrates how to manually assign endpoints to the test configuration using known IP addresses.
# If you want to assign an endpoint discovered by the Registration Server, use the ixchariotApi.getEndpointFromResourcesLibrary() function
# to get the data for httpPost
for src_Endpoint in src_EndpointsList:
ips = src_Endpoint.split('/')
session.httpPost("config/ixchariot/appMixes/1/network/sourceEndpoints", data = ixchariotApi.createEndpoint(ips[0], ips[1]))
for dst_Endpoint in dst_EndpointsList:
ips = dst_Endpoint.split('/')
session.httpPost("config/ixchariot/appMixes/1/network/destinationEndpoints", data = ixchariotApi.createEndpoint(ips[0], ips[1]))
# Add applications to the AppMix
# appName appRatio
appList = [
["HTTPS Simulated Financial", 100],
]
for i in range(0, len(appList)):
appData = appList[i]
appName = appData[0]
appRatio = appData[1]
appScript = ixchariotApi.getApplicationScriptFromResourcesLibrary(session, appName)
app = ixchariotApi.createApp(appScript, appRatio);
session.httpPost("config/ixchariot/appMixes/1/settings/applications", data = app)
try:
print "Starting the test..."
result = session.runTest()
print "The test ended"
#Save all results to CSV files.
print "Saving the test results into zipped CSV files...\n"
filePath = "testResults.zip"
with open(filePath, "wb+") as statsFile:
api.getStatsCsvZipToFile(result.testId, statsFile)
# Get results after test run.
# The functions below can also be used while the test is running, by using session.startTest() to start the execution,
# calling any of the results retrieval functions during the run, and using session.waitTestStopped() to wait for test end.
# You can use time.sleep() to call the results retrieval functions from time to time.
# These functions will return statistics for all the timestamps reported since the beginning of the test until the current moment.
# Get test level results.
# Note: the statistic names should be identical to those that appear in the results CSV
results = ixchariotApi.getTestLevelResults(session, ["Throughput"])
print "Test Level Results: \n"
for res in results:
# Each object in the list of results is of type Statistic (contains the statistic name and a list of StatisticValue objects).
print res.name
for val in res.values:
# The list will contain StatisticValue objects for all the reported timestamps since the beginning of the test.
# Each StatisticValue object contains the timestamp and the actual value.
print str(val.timestamp) + " " + str(val.value)
print ""
# Get group level results.
# Note: the statistic names should be identical to those that appear in the results CSV
results = ixchariotApi.getGroupLevelResults(session, ["Throughput"], "AppMix 1")
print "Group Level Results for AppMix 1:\n"
for res in results:
# Each object in the list of results has a printing function defined.
# It will print the name of the statistic and the list of timestamp - value pairs.
# For accessing each of these components separately see the example above.
print res
print ""
except Exception, e:
print "Error", e
print "Stopping the session..."
session.stopSession()
print "Deleting the session..."
session.httpDelete()
a = int(os.system('ls | grep testResults.zip | wc -l'))
print a
os.system('echo $appData[0]')
if a == 0:
os.system('echo $appData[0]')
os.system('mv testResults.zip (echo $appData[0])_testResults.zip')
os.system('cp testResults.zip ./runningLog')
else:
print "the testing is not finishing...."
| [
"jianhuayan@users.noreply.github.com"
] | jianhuayan@users.noreply.github.com |
48ae0683541c724901af2003c42a3e01a2680bd3 | 0acbec663e7b2b77f799e8f1f298d62fceecbe1c | /admin.py | e038343044d3571276f66c2871bc23ed928cea93 | [] | no_license | rashedul-islam/managebook | d264ca75b031e974337a5bdb1ffa261c381675ad | 0bdd7f01f2cfe9b436b41ac92c3a98ef62a70129 | refs/heads/master | 2021-01-13T16:58:09.363257 | 2016-12-25T12:40:20 | 2016-12-25T12:40:20 | 77,324,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from django.contrib import admin
from .models import Book, Genre, Choices
admin.site.register(Book)
admin.site.register(Genre) | [
"rashedul.islam.kth@gmail.com"
] | rashedul.islam.kth@gmail.com |
3bb90c581abdb121a7c377a67bb816e7b76164f4 | 5bb5cc34e3d52f5cd1f88efde0c182735f682cf4 | /inference.py | fd7f0f5b1ec3a8af6da0607a48b699dd047f62c1 | [] | no_license | jtpils/pc_mr_net | 4f3816bed3b4cd6dd2fc4481cc3374a0ac5ffca8 | 3b5c3ce563473593ebd2b90d0d57a423852f822f | refs/heads/master | 2020-05-05T05:09:23.345742 | 2019-03-21T22:56:43 | 2019-03-21T22:56:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,329 | py | import torch
from argparse import ArgumentParser
import os
import numpy as np
from h5py import File
from layers.pc_mr_net import PointCloudMapRegressionNet
from data.hdf_dataset_loader import HdfDataset
class InferenceDataset(HdfDataset):
def __getitem__(self, item):
data_file = self.data_files[item]
_file = os.path.join(self.dataset_folder, data_file)
with File(_file) as f:
pcl_data = np.array(f["point_cloud"])
feature_vector = self.compute_feature_vector(pcl_data)
return feature_vector, data_file, pcl_data
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("model")
parser.add_argument("data_folder")
parser.add_argument("save_folder")
args = parser.parse_args()
net = PointCloudMapRegressionNet()
net.load_state_dict(torch.load(args.model))
files = os.listdir(args.data_folder)
data_loader = InferenceDataset(args.data_folder)
for i in range(len(data_loader)):
feature_vector, file_name, pcl = data_loader[i]
output = net(feature_vector)
output_file_name = os.path.join(args.save_folder, "out_" + file_name)
with File(output_file_name, "w") as f:
f.create_dataset("point_cloud", data=pcl)
f.create_dataset("object_vectors", data=output)
| [
"jae251@gmx.de"
] | jae251@gmx.de |
46a790f3eecc7144206651c29840f940b5efa53c | 3ffb6d8600d767cf2e430b603a21ad9d85d1e02e | /Article/views.py | 3ee85e39abc330219488785956799d44b509b560 | [] | no_license | wyangyang1230/boke | 231ec662b5427daa4267a20126611ac159ec11da | f6da39c33aae7758fd6b4bba55b654ba533eeb90 | refs/heads/master | 2020-08-07T08:18:05.651256 | 2019-10-07T11:46:15 | 2019-10-07T11:46:15 | 207,441,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | from django.shortcuts import render
from django.http import HttpResponse,HttpResponseRedirect,JsonResponse
from django.core.paginator import Paginator
from Back.models import *
# Create your views here.
## 父模板
def base(request):
# get请求
data=request.GET
serach=data.get('serach')
print(serach)
# 通过form表单提交的数据,判断数据库中是否存在某个文章
# 通过模型查询
article=Article.objects.filter(title__contains=serach).all()
print(article)
return render(request,'article/base.html',locals())
# 网站首页
def index(request):
article=Article.objects.order_by('-date')[:6]
recommend_article=Article.objects.filter(recommend=1)[:7]
click_article=Article.objects.order_by('-click')[:12]
return render(request,'article/index.html',locals())
# 个人相册
def listpic(request):
return render(request,'article/listpic.html')
# 个人简介
def about(request):
return render(request,'article/about.html')
# 文章分页
def newslistpic(request,page=1):
page=int(page) #1为字符串类型,需要将类型转换
article=Article.objects.order_by('-date')
paginator=Paginator(article,6) #显示每页6条数据
page_obj=paginator.page(page)
# 获取当前页
current_page=page_obj.number
start=current_page-3
if start<1:
start=0
end=current_page+2
if end > paginator.num_pages:
end = paginator.num_pages
if start==0:
end=5
if end==paginator.num_pages:
start=paginator.num_pages-5
page_range=paginator.page_range[start:end]
return render(request,'article/newslistpic.html',locals())
# 文章详情
def articledetails(request,id):
# id为字符串类型
id=int(id)
article=Article.objects.get(id=id)
print(article)
return render(request,'article/articledetails.html',locals()) | [
"root@163.com"
] | root@163.com |
4669e42a6a2d00e57c11c14494944ba996bf543b | 9f55ac816c6a4bdb8ac35c4eea55ef5283c2a5cf | /homework/hw06/hw06.py | 17afe454cd9b9192798f9f042301cf27dc136aa9 | [
"MIT"
] | permissive | Nicoleyss/cs61a-self-study | ead2663df58bc5b080badd51a1d90b3e5148368f | e32d77f751af66008ff4c69ffe0b32688b275516 | refs/heads/master | 2022-01-08T17:39:00.442276 | 2018-09-08T18:22:58 | 2018-09-08T18:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,034 | py | passphrase = 'CC74EB'
def survey(p):
"""
You do not need to understand this code.
>>> survey(passphrase)
'3d2eea56786a3d9e503a4c07dd667867ef3d92bfccd68b2aa0900ead'
"""
import hashlib
return hashlib.sha224(p.encode('utf-8')).hexdigest()
class Fib():
"""A Fibonacci number.
>>> start = Fib()
>>> start
0
>>> start.next()
1
>>> start.next().next()
1
>>> start.next().next().next()
2
>>> start.next().next().next().next()
3
>>> start.next().next().next().next().next()
5
>>> start.next().next().next().next().next().next()
8
>>> start.next().next().next().next().next().next() # Ensure start isn't changed
8
"""
def __init__(self, value=0):
self.value = value
def next(self):
# assuming that the user is starting the sequence with zero (duh!)
fib = Fib(1) if self.value == 0 else Fib(self.value + self.previous)
fib.previous = self.value
return fib
def __repr__(self):
return str(self.value)
class VendingMachine:
"""A vending machine that vends some product for some price.
>>> v = VendingMachine('candy', 10)
>>> v.vend()
'Machine is out of stock.'
>>> v.deposit(15)
'Machine is out of stock. Here is your $15.'
>>> v.restock(2)
'Current candy stock: 2'
>>> v.vend()
'You must deposit $10 more.'
>>> v.deposit(7)
'Current balance: $7'
>>> v.vend()
'You must deposit $3 more.'
>>> v.deposit(5)
'Current balance: $12'
>>> v.vend()
'Here is your candy and $2 change.'
>>> v.deposit(10)
'Current balance: $10'
>>> v.vend()
'Here is your candy.'
>>> v.deposit(15)
'Machine is out of stock. Here is your $15.'
>>> w = VendingMachine('soda', 2)
>>> w.restock(3)
'Current soda stock: 3'
>>> w.restock(3)
'Current soda stock: 6'
>>> w.deposit(2)
'Current balance: $2'
>>> w.vend()
'Here is your soda.'
"""
def __init__(self, item, cost):
self.item = item
self.cost = cost
self.stock = 0
self.bank = 0
def vend(self):
if self.stock <= 0:
return 'Machine is out of stock.'
elif self.bank < self.cost:
return 'You must deposit ${0} more.'.format(self.cost - self.bank)
else:
self.bank -= self.cost
self.stock -= 1
if self.bank == 0:
return 'Here is your {0}.'.format(self.item)
else:
change, self.bank = self.bank, 0
return 'Here is your {0} and ${1} change.'.format(self.item, change)
def deposit(self, money):
if self.stock <= 0:
return 'Machine is out of stock. Here is your ${0}.'.format(money)
else:
self.bank += money
return 'Current balance: ${0}'.format(self.bank)
def restock(self, amt):
self.stock += amt
return 'Current {0} stock: {1}'.format(self.item, self.stock) | [
"tejashah88@gmail.com"
] | tejashah88@gmail.com |
f0fb90be4126c4d7c1b3bc08502dc6de8c3bdc26 | 59405bb9af890a081e33f9f56d20a7ecb7d03853 | /02. Logistic Regression/Social Network Ads Logistic Regression.py | 899c9dbea65173d856b4baa545afc7a31c73e83f | [] | no_license | aaryankaushik/ML-Algorithms | cf9a69d15fee6c4f725f16ce4d463411e39fe474 | 38f86b5bebe1e2f50778d79c4833c050b852960b | refs/heads/master | 2020-04-24T23:19:02.879274 | 2019-02-24T15:01:53 | 2019-02-24T15:01:53 | 172,340,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | import pandas as pd
sn=pd.read_csv('Social Network Ads.csv')
gen=pd.get_dummies(sn['Gender'],drop_first=True)
sn.drop(['Gender'],axis=1,inplace=True)
sn=pd.concat([sn,gen],axis=1)
x=sn.drop('Purchased',axis=1)
y=sn['Purchased']
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.2,random_state=0)
from sklearn.linear_model import LogisticRegression
lr=LogisticRegression()
lr.fit(xtrain,ytrain)
ypre=lr.predict(xtest)
print('acc:',lr.score(xtest,ytest))
from sklearn.metrics import confusion_matrix,classification_report
print('cf:',confusion_matrix(ytest,ypre))
print('cr:',classification_report(ytest,ypre)) | [
"noreply@github.com"
] | aaryankaushik.noreply@github.com |
fe4b88457337dd6b7961c723050db6e1729548f3 | 6cb4f70534e4087ef11163a1c660374784a9bb6c | /skia/skia_library.gypi | 9fc63d05a542b784b4aeb847ab61b87833d9e715 | [
"BSD-3-Clause"
] | permissive | yodamaster/engine | 07a3e576b680f6c2d0db30c0b0be763d279f5884 | 33e5611409d261f8783e762e69e82b1dfa3ac480 | refs/heads/master | 2021-01-16T22:22:17.491692 | 2015-11-13T22:02:30 | 2015-11-13T22:02:30 | 46,162,097 | 1 | 0 | null | 2015-11-14T04:58:45 | 2015-11-14T04:58:45 | null | UTF-8 | Python | false | false | 13,850 | gypi | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This gypi file contains the Skia library.
# In component mode (shared_lib) it is folded into a single shared library with
# the Chrome-specific enhancements but in all other cases it is a separate lib.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# variables and defines should go in skia_common.gypi so they can be seen
# by files listed here and in skia_library_opts.gypi.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
{
'dependencies': [
'skia_library_opts.gyp:skia_opts',
'../third_party/zlib/zlib.gyp:zlib',
],
'includes': [
'../third_party/skia/gyp/core.gypi',
'../third_party/skia/gyp/effects.gypi',
'../third_party/skia/gyp/pdf.gypi',
'../third_party/skia/gyp/utils.gypi',
],
'sources': [
'../third_party/skia/src/ports/SkImageDecoder_empty.cpp',
'../third_party/skia/src/images/SkScaledBitmapSampler.cpp',
'../third_party/skia/src/images/SkScaledBitmapSampler.h',
'../third_party/skia/src/ports/SkFontConfigInterface_direct.cpp',
'../third_party/skia/src/fonts/SkFontMgr_fontconfig.cpp',
'../third_party/skia/src/ports/SkFontHost_fontconfig.cpp',
'../third_party/skia/src/fonts/SkFontMgr_indirect.cpp',
'../third_party/skia/src/fonts/SkRemotableFontMgr.cpp',
'../third_party/skia/src/ports/SkRemotableFontMgr_win_dw.cpp',
'../third_party/skia/src/ports/SkImageGenerator_none.cpp',
'../third_party/skia/src/ports/SkFontHost_FreeType.cpp',
'../third_party/skia/src/ports/SkFontHost_FreeType_common.cpp',
'../third_party/skia/src/ports/SkFontHost_FreeType_common.h',
'../third_party/skia/src/ports/SkFontHost_mac.cpp',
'../third_party/skia/src/ports/SkFontHost_win.cpp',
'../third_party/skia/src/ports/SkFontMgr_android.cpp',
'../third_party/skia/src/ports/SkFontMgr_android_factory.cpp',
'../third_party/skia/src/ports/SkFontMgr_android_parser.cpp',
'../third_party/skia/src/ports/SkFontMgr_win_dw.cpp',
'../third_party/skia/src/ports/SkGlobalInitialization_chromium.cpp',
'../third_party/skia/src/ports/SkOSFile_posix.cpp',
'../third_party/skia/src/ports/SkOSFile_stdio.cpp',
'../third_party/skia/src/ports/SkOSFile_win.cpp',
'../third_party/skia/src/ports/SkScalerContext_win_dw.cpp',
'../third_party/skia/src/ports/SkScalerContext_win_dw.h',
'../third_party/skia/src/ports/SkTime_Unix.cpp',
'../third_party/skia/src/ports/SkTLS_pthread.cpp',
'../third_party/skia/src/ports/SkTLS_win.cpp',
'../third_party/skia/src/ports/SkTypeface_win_dw.cpp',
'../third_party/skia/src/ports/SkTypeface_win_dw.h',
'../third_party/skia/src/sfnt/SkOTTable_name.cpp',
'../third_party/skia/src/sfnt/SkOTTable_name.h',
'../third_party/skia/src/sfnt/SkOTUtils.cpp',
'../third_party/skia/src/sfnt/SkOTUtils.h',
'../third_party/skia/include/core/SkFontStyle.h',
'../third_party/skia/include/images/SkMovie.h',
'../third_party/skia/include/images/SkPageFlipper.h',
'../third_party/skia/include/ports/SkFontConfigInterface.h',
'../third_party/skia/include/ports/SkFontMgr.h',
'../third_party/skia/include/ports/SkFontMgr_indirect.h',
'../third_party/skia/include/ports/SkRemotableFontMgr.h',
'../third_party/skia/include/ports/SkTypeface_win.h',
],
# Exclude all unused files in skia utils.gypi file
'sources!': [
'../third_party/skia/include/utils/SkBoundaryPatch.h',
'../third_party/skia/include/utils/SkFrontBufferedStream.h',
'../third_party/skia/include/utils/SkCamera.h',
'../third_party/skia/include/utils/SkCanvasStateUtils.h',
'../third_party/skia/include/utils/SkCubicInterval.h',
'../third_party/skia/include/utils/SkCullPoints.h',
'../third_party/skia/include/utils/SkDebugUtils.h',
'../third_party/skia/include/utils/SkDumpCanvas.h',
'../third_party/skia/include/utils/SkEventTracer.h',
'../third_party/skia/include/utils/SkInterpolator.h',
'../third_party/skia/include/utils/SkLayer.h',
'../third_party/skia/include/utils/SkMeshUtils.h',
'../third_party/skia/include/utils/SkNinePatch.h',
'../third_party/skia/include/utils/SkParsePaint.h',
'../third_party/skia/include/utils/SkParsePath.h',
'../third_party/skia/include/utils/SkRandom.h',
'../third_party/skia/src/utils/SkBitmapHasher.cpp',
'../third_party/skia/src/utils/SkBitmapHasher.h',
'../third_party/skia/src/utils/SkBoundaryPatch.cpp',
'../third_party/skia/src/utils/SkFrontBufferedStream.cpp',
'../third_party/skia/src/utils/SkCamera.cpp',
'../third_party/skia/src/utils/SkCanvasStack.h',
'../third_party/skia/src/utils/SkCubicInterval.cpp',
'../third_party/skia/src/utils/SkCullPoints.cpp',
'../third_party/skia/src/utils/SkDumpCanvas.cpp',
'../third_party/skia/src/utils/SkFloatUtils.h',
'../third_party/skia/src/utils/SkInterpolator.cpp',
'../third_party/skia/src/utils/SkLayer.cpp',
'../third_party/skia/src/utils/SkMD5.cpp',
'../third_party/skia/src/utils/SkMD5.h',
'../third_party/skia/src/utils/SkMeshUtils.cpp',
'../third_party/skia/src/utils/SkNinePatch.cpp',
'../third_party/skia/src/utils/SkOSFile.cpp',
'../third_party/skia/src/utils/SkParsePath.cpp',
'../third_party/skia/src/utils/SkPathUtils.cpp',
'../third_party/skia/src/utils/SkSHA1.cpp',
'../third_party/skia/src/utils/SkSHA1.h',
'../third_party/skia/src/utils/SkTFitsIn.h',
'../third_party/skia/src/utils/SkTLogic.h',
# We don't currently need to change thread affinity, so leave out this complexity for now.
"../third_party/skia/src/utils/SkThreadUtils_pthread_mach.cpp",
"../third_party/skia/src/utils/SkThreadUtils_pthread_linux.cpp",
#windows
'../third_party/skia/include/utils/win/SkAutoCoInitialize.h',
'../third_party/skia/include/utils/win/SkHRESULT.h',
'../third_party/skia/include/utils/win/SkIStream.h',
'../third_party/skia/include/utils/win/SkTScopedComPtr.h',
'../third_party/skia/src/utils/win/SkAutoCoInitialize.cpp',
'../third_party/skia/src/utils/win/SkIStream.cpp',
'../third_party/skia/src/utils/win/SkWGL_win.cpp',
#testing
'../third_party/skia/src/fonts/SkGScalerContext.cpp',
'../third_party/skia/src/fonts/SkGScalerContext.h',
],
'include_dirs': [
'../third_party/skia/include/c',
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/include/images',
'../third_party/skia/include/lazy',
'../third_party/skia/include/pathops',
'../third_party/skia/include/pdf',
'../third_party/skia/include/pipe',
'../third_party/skia/include/ports',
'../third_party/skia/include/record',
'../third_party/skia/include/utils',
'../third_party/skia/src/core',
'../third_party/skia/src/opts',
'../third_party/skia/src/image',
'../third_party/skia/src/pdf',
'../third_party/skia/src/ports',
'../third_party/skia/src/sfnt',
'../third_party/skia/src/utils',
'../third_party/skia/src/lazy',
],
'conditions': [
['skia_support_gpu != 0', {
'includes': [
'../third_party/skia/gyp/gpu.gypi',
],
'sources': [
'<@(skgpu_null_gl_sources)',
'<@(skgpu_sources)',
],
'include_dirs': [
'../third_party/skia/include/gpu',
'../third_party/skia/src/gpu',
],
}],
['skia_support_pdf == 0', {
'sources/': [
['exclude', '../third_party/skia/src/doc/SkDocument_PDF.cpp'],
['exclude', '../third_party/skia/src/pdf/'],
],
}],
['skia_support_pdf == 1', {
'dependencies': [
'../third_party/sfntly/sfntly.gyp:sfntly',
],
}],
[ 'OS == "win"', {
'sources!': [
# Keeping _win.cpp
"../third_party/skia/src/utils/SkThreadUtils_pthread.cpp",
"../third_party/skia/src/utils/SkThreadUtils_pthread_other.cpp",
],
},{
'sources!': [
# Keeping _pthread.cpp and _pthread_other.cpp
"../third_party/skia/src/utils/SkThreadUtils_win.cpp",
],
}],
[ 'OS != "mac"', {
'sources/': [
['exclude', '/mac/']
],
}],
[ 'OS == "android" and target_arch == "arm"', {
'sources': [
'../third_party/skia/src/core/SkUtilsArm.cpp',
],
'includes': [
'../build/android/cpufeatures.gypi',
],
}],
[ 'desktop_linux == 1 or chromeos == 1', {
'dependencies': [
'../build/linux/system.gyp:fontconfig',
'../build/linux/system.gyp:freetype2',
'../third_party/icu/icu.gyp:icuuc',
],
'cflags': [
'-Wno-unused',
'-Wno-unused-function',
],
}],
[ 'use_cairo == 1 and use_pango == 1', {
'dependencies': [
'../build/linux/system.gyp:pangocairo',
],
}],
[ 'OS=="win" or OS=="mac" or OS=="ios" or OS=="android"', {
'sources!': [
'../third_party/skia/src/ports/SkFontConfigInterface_direct.cpp',
'../third_party/skia/src/ports/SkFontHost_fontconfig.cpp',
'../third_party/skia/src/fonts/SkFontMgr_fontconfig.cpp',
],
}],
[ 'OS=="win" or OS=="mac" or OS=="ios"', {
'sources!': [
'../third_party/skia/src/ports/SkFontHost_FreeType.cpp',
'../third_party/skia/src/ports/SkFontHost_FreeType_common.cpp',
],
}],
[ 'OS == "android"', {
'dependencies': [
'../third_party/expat/expat.gyp:expat',
'../third_party/freetype/freetype.gyp:ft2',
],
# This exports a hard dependency because it needs to run its
# symlink action in order to expose the skia header files.
'hard_dependency': 1,
'include_dirs': [
'../third_party/expat/files/lib',
],
}, { # not 'OS == "android"'
'sources!': [
"../third_party/skia/src/ports/SkFontMgr_android_factory.cpp",
'../third_party/skia/src/ports/SkFontMgr_android_parser.cpp',
],
}],
[ 'OS == "ios"', {
'include_dirs': [
'../third_party/skia/include/utils/ios',
'../third_party/skia/include/utils/mac',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/ImageIO.framework',
],
},
'sources': [
# This file is used on both iOS and Mac, so it should be removed
# from the ios and mac conditions and moved into the main sources
# list.
'../third_party/skia/src/utils/mac/SkStream_mac.cpp',
],
# The main skia_opts target does not currently work on iOS because the
# target architecture on iOS is determined at compile time rather than
# gyp time (simulator builds are x86, device builds are arm). As a
# temporary measure, this is a separate opts target for iOS-only, using
# the _none.cpp files to avoid architecture-dependent implementations.
'dependencies': [
'skia_library_opts.gyp:skia_opts_none',
],
'dependencies!': [
'skia_library_opts.gyp:skia_opts',
],
}],
[ 'OS == "mac"', {
'direct_dependent_settings': {
'include_dirs': [
'../third_party/skia/include/utils/mac',
],
},
'include_dirs': [
'../third_party/skia/include/utils/mac',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
],
},
'sources': [
'../third_party/skia/src/utils/mac/SkStream_mac.cpp',
],
}],
[ 'OS == "win"', {
'sources!': [
'../third_party/skia/src/ports/SkOSFile_posix.cpp',
'../third_party/skia/src/ports/SkTime_Unix.cpp',
'../third_party/skia/src/ports/SkTLS_pthread.cpp',
],
'include_dirs': [
'../third_party/skia/include/utils/win',
'../third_party/skia/src/utils/win',
],
},{ # not 'OS == "win"'
'sources!': [
'../third_party/skia/src/ports/SkFontMgr_win_dw.cpp',
'../third_party/skia/src/ports/SkRemotableFontMgr_win_dw.cpp',
'../third_party/skia/src/ports/SkScalerContext_win_dw.cpp',
'../third_party/skia/src/ports/SkScalerContext_win_dw.h',
'../third_party/skia/src/ports/SkTypeface_win_dw.cpp',
'../third_party/skia/src/ports/SkTypeface_win_dw.h',
'../third_party/skia/src/utils/win/SkDWrite.h',
'../third_party/skia/src/utils/win/SkDWrite.cpp',
'../third_party/skia/src/utils/win/SkDWriteFontFileStream.cpp',
'../third_party/skia/src/utils/win/SkDWriteFontFileStream.h',
'../third_party/skia/src/utils/win/SkDWriteGeometrySink.cpp',
'../third_party/skia/src/utils/win/SkDWriteGeometrySink.h',
'../third_party/skia/src/utils/win/SkHRESULT.cpp',
],
}],
],
'target_conditions': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
[ 'OS == "ios"', {
'sources/': [
['include', 'SkFontHost_mac\\.cpp$',],
['include', 'SkStream_mac\\.cpp$',],
['include', 'SkCreateCGImageRef\\.cpp$',],
],
'xcode_settings' : {
'WARNING_CFLAGS': [
# SkFontHost_mac.cpp uses API deprecated in iOS 7.
# crbug.com/408571
'-Wno-deprecated-declarations',
],
},
}],
],
'direct_dependent_settings': {
'include_dirs': [
'../third_party/skia/include/core',
'../third_party/skia/include/effects',
'../third_party/skia/include/pdf',
'../third_party/skia/include/gpu',
'../third_party/skia/include/lazy',
'../third_party/skia/include/pathops',
'../third_party/skia/include/pipe',
'../third_party/skia/include/ports',
'../third_party/skia/include/utils',
],
},
}
| [
"jackson@google.com"
] | jackson@google.com |
c2154d3a5fe4c8670860e1c2b5ea7301a892ea20 | 780b6cca690a213ac908b1cd5faef5366a18dc4e | /314_print_names_to_columns/save1_nopass.py | 8cb6c53bb39aa700c4f9bc48b51e4735762b74ba | [] | no_license | katkaypettitt/pybites-all | 899180a588e460b343c00529c6a742527e4ea1bc | 391c07ecac0d92d5dc7c537bcf92eb6c1fdda896 | refs/heads/main | 2023-08-22T16:33:11.171732 | 2021-10-24T17:29:44 | 2021-10-24T17:29:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | from typing import List # not needed when we upgrade to 3.9
def print_names_to_columns(names: List[str], cols: int = 2) -> None:
name_list = [f'| {name:{9}}' for name in names]
output = ''
for i in range(0, len(name_list), cols):
output += ' '.join(name_list[i: i + cols]) + '\n'
print(output) | [
"70788275+katrinaalaimo@users.noreply.github.com"
] | 70788275+katrinaalaimo@users.noreply.github.com |
c2ba1834a55e267479f9cbd6ac9640b5e7397ba9 | 8fd4822d6d04fe0643b84139ddb1ee1d7d4d9f0a | /tests/test_signal/__init__.py | a5ff54368aa509e876bc36001ae564c1f61973dc | [
"MIT"
] | permissive | mcanatalay/SIMULOC | 48d4352599daa807009635953162d22fced273a0 | af32c522887dec08a0815052a5878b5a595b8d44 | refs/heads/master | 2021-08-22T17:12:30.332027 | 2017-11-18T23:33:14 | 2017-11-18T23:33:14 | 111,238,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | """Test class for Signal module."""
| [
"mcanatalay@hotmail.co.uk"
] | mcanatalay@hotmail.co.uk |
9a0d1a518ad80a316b052d7d50ffc3d5918fc703 | ddcef0ffeb4a024850252ff5c5da247c4433e207 | /src/mmgroup/tests/test_mm/test_prep_xy.py | 8ee8245cac11f4afb4009b578f4e7e43193d7ec6 | [
"MIT"
] | permissive | stratosthirios/mmgroup | 8423bc8c3d38a2478f76ba53ca99db8e8cf8dfa0 | a7a9a92a20580ecd697075f1c673989f0ad13bdc | refs/heads/master | 2023-08-29T00:01:42.038120 | 2021-10-17T20:27:46 | 2021-10-17T20:27:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,848 | py | from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import numpy as np
from random import randint
import pytest
from mmgroup.mm import mm_sub_test_prep_xy
from mmgroup import mat24 as m24
from mmgroup.tests.spaces.sparse_mm_space import SparseMmV
from mmgroup.tests.spaces.sparse_mm_space import SparseMmVector
from mmgroup.tests.groups.mgroup_n import MGroupNWord
from mmgroup.mm_space import characteristics
PRIMES = characteristics()
def _as_suboctad(v1, o):
d = m24.octad_to_gcode(o)
c = m24.ploop_cap(v1, d)
return m24.cocode_to_suboctad(c, d)
class prep_xy:
group = MGroupNWord
space = SparseMmVector
def __init__(self, eps, e, f):
self.f = f & 0x1fff
self.e = e & 0x1fff
self.eps = eps = eps & 0xfff
self.odd = (eps & 0x800) >> 11
lin = np.zeros(6, dtype = np.uint32)
mm_sub_test_prep_xy(f, e, eps, 1, lin)
self.lin_i = lin[:3]
self.lin_d = lin[3:6]
self.sign_XYZ = np.zeros(2048, dtype = np.uint32)
mm_sub_test_prep_xy(f, e, eps, 2, self.sign_XYZ)
self.s_T = np.zeros(759, dtype = np.uint32)
mm_sub_test_prep_xy(f, e, eps, 3, self.s_T)
def inv_op_unit(self, tag, d, j):
if tag == 'X':
tag1 = 'X'
d1 = d ^ self.lin_d[0]
j1 = j
sign = (self.sign_XYZ[d] & 1)
sign ^= (self.lin_i[0] >> j) & 1
if self.odd:
cc = m24.vect_to_cocode(1 << j)
sign ^= m24.scalar_prod(d, cc)
elif tag in 'ZY':
s = self.odd ^ (tag == 'Y')
tag1 = 'ZY'[s]
s += 1
d1 = d ^ self.lin_d[s]
j1 = j
sign = (self.sign_XYZ[d] >> s) & 1
sign ^= (self.lin_i[s] >> j) & 1
elif tag == 'T':
tag1 = 'T'
d1 = d
te = self.s_T[d]
so_exp = _as_suboctad(self.f, d)
assert te & 0x3f == so_exp , (hex(te), hex(so_exp))
j1 = j ^ (te & 0x3f)
sign = m24.suboctad_scalar_prod(j, (te >> 8) & 0x3f)
sign ^= (te >> 14) & 1
sign ^= m24.suboctad_weight(j) & self.odd & 1
assert ((te >> 15) ^ self.odd) & 1 == 0
else:
raise ValueError("Illegal tag " + str(tag))
return sign & 1, tag1, d1, j1
def inv_op(self, v):
w = self.space(v.p)
for value, tag, d, j in v.as_tuples():
sign, tag, d, j = self.inv_op_unit(tag, d, j)
if sign & 1:
value = -value % p
w += value * space(v.p, tag, d, j)
return w
def check_v(self, v, verbose = 0):
grp = self.group
delta_atom = grp('d', self.eps)
x_atom = grp('x', self.e)**(-1)
y_atom = grp('y', self.f)**(-1)
w_ref = v * delta_atom * x_atom * y_atom
w = self.inv_op(v)
error = w != w_ref
if error or verbose:
eps, e, f = self.eps, self.e, self.f
print("vector", v)
print("operation", "d_%xh * x_%xh * y_%xh" % (eps, e, f))
print("obtained:", w)
if error:
print(v * delta_atom , v, delta_atom)
print("expected:", w_ref)
raise ValueError("x-y operation failed")
print("Error: x-y operation failed!!!")
p = PRIMES[0]
space = SparseMmVector
def as_vector(x):
if isinstance(x, str):
data = [(tag, 'r') for tag in x]
return space(p, data)
if isinstance(x, tuple):
return space(p, *x)
if isinstance(x, list):
return space(p, x)
raise TypeError("Bad type for vector of rep")
p = PRIMES[0]
space = SparseMmVector
def prep_xy_testcases():
testcases = [
[ [("X", 3, 6)], 0, 0, 0x1171 ],
[ [("X", 3, 6)], 12, 0, 0 ],
[ [("X", 3, 6)], 12, 1111, 0 ],
[ [("X", 3, 6)], 12, 0, 1111],
[ [("Z", 0, 0)], 0, 0, 0],
[ [("Z", 0, 0)], 12, 0, 0],
[ [("Z", 0, 0)], 0, 34, 0],
[ [("Z", 0, 0)], 0x800, 0, 0],
[ [("Z", 0, 0)], 0x812, 0, 0],
[ [("Z", 0, 0)], 0x800, 34, 0],
[ [("Z", 0, 0)], 0x800, 0, 34],
]
for v, eps, e, f in testcases:
yield as_vector(v), prep_xy(eps, e, f)
v_tags = "TXZY"
for v in v_tags:
for i in range(1000):
v1 = as_vector(v)
eps = randint(0, 0xfff)
e = randint(0, 0x1fff)
f = randint(0, 0x1fff)
yield v1, prep_xy(eps, e, f)
@pytest.mark.mm
def test_prep_xy(verbose = 0):
print("Testing preparation of operation x-y...")
for v, op in prep_xy_testcases():
op.check_v(v, verbose = verbose)
if verbose: print("")
print("passed")
| [
"m.seysen@gmx.de"
] | m.seysen@gmx.de |
15ea8659d5ebb57864269738d0afdbd7d47851c3 | 7eaf758ed8954794ddcf0d56ca247b31ce68af55 | /dapl_ckeditor/app_ckeditor/urls.py | 137a4999f6819af53ba1b45926f5892668891cc9 | [] | no_license | b4isty/Django-Ckeditor | 72a9050c464860931e0b85d2984a68fdca2bfa0e | fe11c3ecdb9706c6aade959ad688ca2f39ce4fe8 | refs/heads/master | 2020-03-23T20:07:58.233621 | 2018-11-14T15:00:16 | 2018-11-14T15:00:16 | 142,023,260 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django.urls import path
from . import views
urlpatterns = [
path('home/', views.home, name='home'),
path('blog/', views.blog, name='blog'),
path('blog_list/', views.blog_list, name='blog_list'),
path('blog_details/<int:pk>/', views.blog_detail_view, name='blog_details'),
path('blog_edit/<int:pk>/', views.blog_edit_view, name='blog_edit'),
path('blog_delete/<int:pk>/', views.blog_delete, name='blog_delete')
]
| [
"baishakhi@digitalaptech.com"
] | baishakhi@digitalaptech.com |
d3e323c429533162b102744f30b393fd5c2f8081 | 0951cb62572e75a8e8a7ef1f98092110bb73d20a | /pandas/tests/categorical/test_operators.py | 09a0607b67a88f0f3b238c65434191cfa6e3562f | [
"BSD-3-Clause"
] | permissive | ActiveState/pandas | 452de0fe049412f273caf6ebc86b8d0ffa0c68e6 | 106a04f14e0c090f95784c311f3d07c35e6ef276 | refs/heads/master | 2023-08-30T09:05:13.587536 | 2018-01-04T15:25:01 | 2018-01-04T15:25:01 | 112,227,117 | 1 | 4 | BSD-3-Clause | 2023-07-28T17:52:11 | 2017-11-27T17:32:22 | Python | UTF-8 | Python | false | false | 11,023 | py | # -*- coding: utf-8 -*-
import pytest
import pandas as pd
import numpy as np
import pandas.util.testing as tm
from pandas import Categorical, Series, DataFrame, date_range
from pandas.tests.categorical.common import TestCategorical
class TestCategoricalOpsWithFactor(TestCategorical):
def test_categories_none_comparisons(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = Categorical(
["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(
["b", "b", "b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
pytest.raises(TypeError, f)
cat_rev_base2 = Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
pytest.raises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
def f():
cat > cat_unorderd
pytest.raises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
pytest.raises(TypeError, lambda: cat > s)
pytest.raises(TypeError, lambda: cat_rev > s)
pytest.raises(TypeError, lambda: s < cat)
pytest.raises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
pytest.raises(TypeError, lambda: cat > a)
pytest.raises(TypeError, lambda: cat_rev > a)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
class TestCategoricalOps(object):
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range('2014-01-01', periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat,
np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0],
np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat,
np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
pytest.raises(TypeError, lambda: cat < 4)
pytest.raises(TypeError, lambda: cat > 4)
pytest.raises(TypeError, lambda: 4 < cat)
pytest.raises(TypeError, lambda: 4 > cat)
tm.assert_numpy_array_equal(cat == 4,
np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4,
np.array([True, True, True]))
@pytest.mark.parametrize('data,reverse,base', [
(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(
Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(
Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
pytest.raises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
pytest.raises(TypeError, lambda: cat > s)
pytest.raises(TypeError, lambda: cat_rev > s)
pytest.raises(TypeError, lambda: cat > a)
pytest.raises(TypeError, lambda: cat_rev > a)
pytest.raises(TypeError, lambda: s < cat)
pytest.raises(TypeError, lambda: s < cat_rev)
pytest.raises(TypeError, lambda: a < cat)
pytest.raises(TypeError, lambda: a < cat_rev)
@pytest.mark.parametrize('ctor', [
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
])
def test_unordered_different_order_equal(self, ctor):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 == c2).all()
c1 = ctor(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'a'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['b', 'b'], categories=['b', 'a'], ordered=False)
assert (c1 != c2).all()
c1 = ctor(['a', 'a'], categories=['a', 'b'], ordered=False)
c2 = ctor(['a', 'b'], categories=['b', 'a'], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(['a', 'b'], categories=['a', 'b'], ordered=False)
c2 = Categorical(['a', 'c'], categories=['c', 'a'], ordered=False)
with tm.assert_raises_regex(TypeError,
"Categoricals can only be compared"):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=['a', 'b'])
c2 = Categorical([], categories=['a'])
msg = "Categories are different lengths"
with tm.assert_raises_regex(TypeError, msg):
c1 == c2
def test_numeric_like_ops(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
# numeric ops should not succeed
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
pytest.raises(TypeError,
lambda: getattr(df, op)(df))
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df['value_group']
for op in ['kurt', 'skew', 'var', 'std', 'mean', 'sum', 'median']:
pytest.raises(TypeError,
lambda: getattr(s, op)(numeric_only=False))
# mad technically works because it takes always the numeric data
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
pytest.raises(TypeError, lambda: np.sum(s))
# numeric ops on a Series
for op in ['__add__', '__sub__', '__mul__', '__truediv__']:
pytest.raises(TypeError, lambda: getattr(s, op)(2))
# invalid ufunc
pytest.raises(TypeError, lambda: np.log(s))
| [
"jeff@reback.net"
] | jeff@reback.net |
0a34c488dbde7d4762ed53a0db5049c78bd899cf | be760ae24f4b9eb148ec3d3efe7d9490fc1c56d2 | /LPTHW/positive.py | 8699df3d8f2d7489e77826a968f0bcdd292c7839 | [] | no_license | yogicat/python-crash-course | 4a7c302cdc82647b3e2ccd42a80a9fedc9bf9c25 | af64c43aeb9a74cc794d88db9cbf6390e2e258eb | refs/heads/master | 2020-05-16T09:41:41.193500 | 2019-04-27T11:09:52 | 2019-04-29T11:09:52 | 182,958,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | def main():
i = get_positive_int("positive integer: ")
print(i)
def get_positive_int(prompt):
while True:
n = int(input(prompt))
if n > 0:
return n
main()
| [
"dahe.oh@gmail.com"
] | dahe.oh@gmail.com |
dd01c8f97e4ee6823d6132b76cc95d6b6bffdaac | 6eab9dfed4521d65df94da0cd2b6542793a7ba22 | /As1.py | 81349938f30450c5815e42540ddf773e1813c050 | [] | no_license | shreyansh-sinha/ML-Assignments | 430b04660773c6879885df04c4f72238724fd50e | 90eadde93e8c2e64b6c7cc6b15e7aee6c4fbb464 | refs/heads/master | 2020-12-18T12:52:08.480934 | 2020-05-23T07:15:38 | 2020-05-23T07:15:38 | 235,388,535 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,318 | py | import numpy as np
#import pandas as pd
import matplotlib.pyplot as plt
def hypothesis(theta, X, n): # h = X.B_transpose
h = np.ones((X.shape[0],1))
theta = theta.reshape(1,n+1)
for i in range(0,X.shape[0]):
h[i] = float(np.matmul(theta, X[i]))
h = h.reshape(X.shape[0])
return h
# iterative updation
def gradient_descent(theta, learning_rate, iterations, h, X, Y, n):
cost = np.ones(iterations)
for i in range(0, iterations):
theta[0] = theta[0] - (learning_rate/X.shape[0]) * sum(h - Y)
for j in range(1, n+1):
theta[j] = theta[j] - (learning_rate/X.shape[0]) * sum((h - Y) * X.transpose()[j])
h = hypothesis(theta, X, n)
# cost function = 1/(2*m) (sigma(h(x) - y) ** 2)
cost[i] = (1/X.shape[0]) * 0.5 * sum(np.square(h - Y))
theta = theta.reshape(1, n+1)
return theta, cost
def linear_regression(X, y, alpha, num_iters):
n = X.shape[1] #size of X
one_column = np.ones((X.shape[0],1))
X = np.concatenate((one_column, X), axis = 1)
# initializing the parameter vector...
theta = np.zeros(n+1)
#print(theta)
# hypothesis calculation....
h = hypothesis(theta, X, n)
# returning the optimized parameters by Gradient Descent...
theta, cost = gradient_descent(theta,alpha,num_iters,h,X,y,n)
return theta, cost
data = np.loadtxt('airfoil_self_noise.dat', delimiter='\t')
X_train = data[:,:-1] #feature set...select all the input values
y_train = data[:,5] #label set...select the output values
mean = np.ones(X_train.shape[1]) # define mean array
std_dev = np.ones(X_train.shape[1]) # define standard deviation array
# Scaling Data
# shape attriute for numpy arrays returns dimensions orf array
# if X has n rows and m columns then X.shape[0] is n and X.shape[1]
# is m
for i in range(0, X_train.shape[1]):
mean[i] = np.mean(X_train.transpose()[i])
std_dev[i] = np.std(X_train.transpose()[i])
for j in range(0, X_train.shape[0]):
X_train[j][i] = (X_train[j][i] - mean[i])/std_dev[i]
iterations = 10000
learning_rate = 0.005
theta, cost = linear_regression(X_train, y_train, learning_rate, iterations)
print(theta)
print(cost[iterations-1])
cost = list(cost)
n_iterations = [x for x in range(1, 10001)]
plt.plot(n_iterations, cost)
plt.xlabel('Number of Iterations')
plt.ylabel('Cost Value')
| [
"noreply@github.com"
] | shreyansh-sinha.noreply@github.com |
8222458758108ced814b77fd8613c63ed0a6df86 | 395ba33c6faecc49eb3cbf32d7cc09ed4ee9c5f0 | /Person.py | ffcc9c3c461dc21d300e7524cb02c28bc96be5c5 | [] | no_license | MackRoe/Herd_Immunity_Term2 | 823d6e9ab8502efb09150529d364450a120a98dc | 236601fd9128790c9ec4d0e48163a700141f9d86 | refs/heads/master | 2020-09-26T06:23:56.950292 | 2019-12-10T18:07:25 | 2019-12-10T18:07:25 | 226,187,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | import random
from Virus import Virus
class Person:
''' The simulation will contain people who will make up a population.'''
def __init__(self, is_vaccinated, infection=None):
''' We start out with is_alive = True
All other values will be set by the simulation through the parameters
when it instantiates each Person object.
'''
self.is_alive = True # boolean
self.is_vaccinated = is_vaccinated # boolean
self.infection = infection # virus object
def did_survive_infection(self):
''' Generate a random number between 0.0 and 1.0 and compare to the
virus's mortality_num.
If the random number is smaller, person dies from the disease. Set the
person's is alive attribute to False
If Person survives, they become vaccinated and they have no infection
(set the vaccinated attibute to True and the infection to None)
Return True if they survived the infection and False if they did not.
'''
compare = random.randint(0.0, 1.0)
if compare < self.infection.mortality_num:
self.is_alive = False
return False
else:
vaccinated = True
infection = None
return True
| [
"elaine.music@students.makeschool.com"
] | elaine.music@students.makeschool.com |
c6b05976675faa83c8508a32dc60ddb4607ba399 | 9975809b516d3e6ff4cf3082761fde8f2c4cdcdb | /blogengine/blog/utils.py | db7d922921be4c805f219e53a1682a3a7de65ebb | [] | no_license | ameagle/django1 | ecba2b4a93724d92c4446e2b1957163ead9cfdc3 | f7895970b4480324be332e4a16b2c807d8b88bab | refs/heads/master | 2023-08-05T02:14:10.624488 | 2021-09-26T19:07:19 | 2021-09-26T19:07:19 | 408,866,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | from django.shortcuts import render, redirect
from django.shortcuts import get_object_or_404
from .models import *
class ObjectDetailMixin:
model = None
template = None
def get(self,request,slug):
obj = get_object_or_404(self.model,slug__iexact=slug)
return render(request,
self.template,
context={self.model.__name__.lower(): obj}
)
class ObjectCreateMixin():
model_form=None
template=None
def get(self,request):
form = self.model_form()
return render(request, self.template,context={'form':form})
def post(self,request):
bound_form = self.model_form(request.POST)
if bound_form.is_valid():
new_obj=bound_form.save()
return redirect(new_obj)
return render(request,self.template,context={'form':bound_form})
#print(request.POST) | [
"ao@ixi.ru"
] | ao@ixi.ru |
e9d40f1152b20f9719a6a72cf80ee1684fc24f55 | 3acd83134884afb4ee92f58346162f847328623a | /django_vuetify/settings/production.py | fefe519385f435d69a8f098d00b1f04d060c707d | [] | no_license | Navaneeth-Nagesh/django_vue | a9ff8810d21518c5cd8c210cc7b716cc11d0fc22 | 0e825373327d98ece325b6b282ea9ebf20a73586 | refs/heads/master | 2020-05-19T01:56:57.852744 | 2019-05-03T14:26:46 | 2019-05-03T14:26:46 | 184,769,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py |
import os
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['']
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '127.0.0.1',
'PORT': '',
}
}
| [
"navaneeth.webtrigon@gmail.com"
] | navaneeth.webtrigon@gmail.com |
76e5e742f70e3956df15fe104b869bd14bb845b2 | a68cf0acc3127303bed87d982558aa458ff5ad62 | /VRD/__init__.py | 1a1456cb40908938312912d832749d3b2c1e6dff | [] | no_license | AbhiJay-K/VRD | cf45f1a3047c0906d996c851e576c0d746b5b013 | 673ca3818a558c9ca3e5ada8290bb1f2da0fda1b | refs/heads/main | 2023-08-11T16:35:48.943967 | 2021-09-23T15:27:45 | 2021-09-23T15:27:45 | 395,602,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | __version__ = '0.22' | [
""
] | |
aa4be24b9867a5c959b936e949960dfdea6302f6 | 6c43a8e9075d0f1a1a3b6d478731f7e303487efb | /app.py | d7efc9b87a16427ab97db0948ee2cb6aaad94e74 | [
"MIT"
] | permissive | ericavdp/flashcards | e94a88f9326387a0c061a5bbd89fa4bd0d9968a3 | f758e12efca935afb4e0af550c98654a95056963 | refs/heads/master | 2020-04-21T09:40:14.970210 | 2019-02-06T18:56:15 | 2019-02-06T18:56:15 | 169,457,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,333 | py | from flask import Flask, render_template, request
import pymysql
import pymysql.cursors
from random import randint
app = Flask(__name__)
@app.route('/')
def main():
return render_template('index.html')
@app.route('/showMakeCard')
def showMakeCard():
return render_template('makeCard.html')
@app.route('/makeCard', methods=['POST', 'GET'])
def makeCard():
# conn = mysql.connect()
conn = pymysql.connect(user='*', passwd='*', db='*', unix_socket='/tmp/mysql.sock',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
try:
_question = request.form['inputQuestion']
_answer = request.form['inputAnswer']
_tags = request.form['inputTags']
_confidence = request.form['inputConfidence']
if _question and _answer and _tags and _confidence:
cursor.execute(""" INSERT INTO card (question, answer, tags, confidence)
VALUES (%s, %s, %s, %s)""", (_question, _answer, _tags, _confidence))
conn.commit()
# cursor.close()
# conn.close()
print('boop')
return
else:
print('enter the required fields')
return
except BaseException:
print('it broke :(')
return
finally:
cursor.execute("SELECT COUNT(*) FROM card")
boop = cursor.fetchone()
print('count of rows is ', boop)
print(boop.values())
moo = boop['COUNT(*)']
print(moo)
shuffle = randint(0, moo)
print(shuffle)
cursor.execute("SELECT * FROM card WHERE id = %s", [shuffle])
yup = cursor.fetchone()
print(yup)
cursor.close()
conn.close()
print('fin')
return render_template("success.html")
@app.route('/showReviewCards')
def showReviewCards():
return render_template('reviewcards.html')
@app.route('/displayCard', methods=['POST', 'GET'])
def displayCard():
conn = pymysql.connect(user='*', passwd='*', db='*', unix_socket='/tmp/mysql.sock',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM card")
boop = cursor.fetchone()
print('count of rows is ', boop)
print(boop.values())
moo = boop['COUNT(*)']
print(moo)
shuffle = randint(0, moo)
print(shuffle)
cursor.execute("SELECT * FROM card WHERE id = %s", [shuffle])
yup = cursor.fetchone()
print(yup)
question = yup['question']
answer = yup['answer']
tags = yup['tags']
confidence = yup['confidence']
cursor.close()
conn.close()
print('fin')
return render_template('showCard.html', question=question, answer=answer, tags=tags, confidence=confidence)
@app.route('/showAnswer', methods=['POST', 'GET'])
def showAnswer():
_confidence = request.form['confidence']
_answer = request.form['answer']
return render_template('showAnswer.html', answer=_answer, confidence=_confidence)
@app.route('/updateCard', methods=['POST', 'GET'])
def updateCard():
conn = pymysql.connect(user='*', passwd='*', db='*', unix_socket='/tmp/mysql.sock',
cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
try:
#create new table for historical confidence and timestamp
_confidence = request.form['inputConfidence']
if _confidence:
#update the confidence value to new value, archive old confidence value?
# cursor.execute(""" INSERT INTO card (question, answer, tags, confidence)
# VALUES (%s,)""", (_question, _answer, _tags, _confidence))
# conn.commit()
#insert a timestamp
print('boop')
return
else:
print('enter the required fields')
return
except BaseException:
print('it broke :(')
return
finally:
cursor.close()
conn.close()
return render_template("reviewCards.html")
# def selectNextCard():
# random selection based on index number (training case)
# selection based on time and confidence level (assumption case)
#display needs to update a time stamp
# def trainAlgorithm():
# Baysian confidence training around time vs confidence
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | ericavdp.noreply@github.com |
0c3252c4dcf7604b633a0875ad23e72836719ee2 | 75ff3b2483447ae18bffe508fe66844bf5e57199 | /course_parsers/campus_course_parser.py | afc239a915693a790d2f4822245d63c4814f5a26 | [] | no_license | SantoshSrinivas79/StudyBoi | 84bdaa4227d05abcd46a3ba22e49ad96ebec5309 | 2c56b2ff35cbb3f85efb4de0168966d7d7d47791 | refs/heads/master | 2022-04-24T20:27:53.202585 | 2020-04-27T12:51:45 | 2020-04-27T12:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | import requests
from bs4 import BeautifulSoup
from parameters import *
class Course:
def __init__(self, title, duration, link, description):
self.title = title
self.duration = duration
self.link = link
self.description = description
def parse_course(url):
try:
response = requests.get(url, headers=headers)
response = response.text
data = BeautifulSoup(response, 'lxml')
workbox = data.find('div',class_='wrap-info-single-course-inner')
inner_workbox = workbox.find('div',class_='content-info-wrap')
for field in inner_workbox:
try:
spans = field.find_all('span')
print(f"{spans[0].text}{spans[1].text.replace(' ','').replace('', '')}")
print('_____________________')
except:
pass
except:
print('a')
parse_course('https://campus.gov.il/course/course-v1-cs-gov_cs_selfpy101/') | [
"urigami2010@gmail.com"
] | urigami2010@gmail.com |
250e40e1c9cc2fcc4091722bca3c92a70c3ac1bf | feab2811821b0d7bcb6dc4c7b29c703757a85747 | /and.py | 3bb581c8b3225921eca07c515454b7223ba2cebd | [] | no_license | smritipillai/256314_Daily_Commits | b2e6638efde4dfdd19c4b3b8fc375160c7350080 | 90dbbc896213c9d9239b4c10eafb0e7ccbc7c2e2 | refs/heads/main | 2023-04-03T14:49:26.089096 | 2021-04-23T06:04:22 | 2021-04-23T06:04:22 | 359,127,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | if (1==1) and (2+2 >3):
print("true")
else
print("false")
| [
"smritipillai.smriti@gmail.com"
] | smritipillai.smriti@gmail.com |
922c0ec014cbb7e3e0b8cf73bd810fb9a6f986a2 | 631a90a2af858b784f19b1242c91f1aaa807cd86 | /Merging-catalogs-V2/Benchmark_plotter.py | 48606cac840ba664f9ece3aefc47d89f97620209 | [] | no_license | atilapaes/PhD-PostDoc | 560430cc8aa0f845934216acf9ec42d2aed9046b | 8611e636e8d9974b3f4fdf24739f474131b9ea51 | refs/heads/master | 2022-12-19T21:06:53.738581 | 2020-09-26T03:54:42 | 2020-09-26T03:54:42 | 281,584,169 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 01:48:07 2020
@author: atilapaes
"""
import pandas
#%% Data importing and preparation
# Import catalog of single events (no duplicate present)
catalog = pandas.read_csv('result_catalog_merged_ES_MFA_FINAL.csv',index_col='Unnamed: 0')
catalog['datetime']=pandas.to_datetime(catalog['datetime'])
#%% Generate the data
# Printed using set(catalog['Date'].values)
list_days=['2016-10-26', '2016-10-27', '2016-10-28', '2016-10-29', '2016-10-30', '2016-10-31',
'2016-11-01', '2016-11-02', '2016-11-03','2016-11-04', '2016-11-05', '2016-11-06',
'2016-11-07', '2016-11-08', '2016-11-09', '2016-11-10', '2016-11-11', '2016-11-12', '2016-11-13',
'2016-11-14', '2016-11-15', '2016-11-16', '2016-11-17', '2016-11-18', '2016-11-19',
'2016-11-20', '2016-11-21', '2016-11-22', '2016-11-23', '2016-11-24', '2016-11-25',
'2016-11-26', '2016-11-27', '2016-11-28', '2016-11-29', '2016-11-30']
#%% Creating the dataframe
benchmark=pandas.DataFrame(data=list_days,columns=['Date'])
benchmark['ES']=''
benchmark['MFA']=''
benchmark['Both']=''
#%%
for index_day in range(len(list_days)):
benchmark.at[index_day,'ES']=len(catalog.loc[(catalog['Date']==list_days[index_day]) & (catalog['source']=='ES')])
benchmark.at[index_day,'MFA']=len(catalog.loc[(catalog['Date']==list_days[index_day]) & (catalog['source']=='MFA')])
benchmark.at[index_day,'Both']=len(catalog.loc[(catalog['Date']==list_days[index_day]) & (catalog['source']=='Both')])
#%% Plot
benchmark.set_index('Date').plot.bar(title='Events detected',figsize=(15,10),fontsize=12) | [
"atila.paes@gmail.com"
] | atila.paes@gmail.com |
5167039388d0d43817b5eb5500459c05bf2b35e8 | c181023ce9db43e957df86420d3005b677d16fde | /Boxplot_calculations.py | d0d12c4969daa087e46251bc2f6687d243be6d79 | [] | no_license | dcmuelle/Master-Thesis | 5c9184145db2c4084a06e38f5795af9fa6bf5dcf | 70cffd1ba5d3df37dfc5a54b38070c541b279af0 | refs/heads/main | 2023-07-07T21:37:01.079184 | 2021-08-10T06:51:18 | 2021-08-10T06:51:18 | 394,547,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,338 | py | {
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "olympic-particle",
"metadata": {},
"outputs": [],
"source": [
"def Boxplot_calculations(meteo_data):\n",
"meteo_data = meteo_data[meteo_data.time.dt.year == year]\n",
"hourly_average=meteo_data.groupby([meteo_data[\"time\"].dt.month, meteo_data[\"time\"].dt.day, meteo_data[\"time\"].dt.hour]).mean()\n",
"hourly_average.index.names = [\"month\", \"day\", \"hour\"]\n",
"hourly_average['Prod/m2'] = hourly_average['G(i)']*0.17/1000\n",
"hourly_average['Prod'] = hourly_average['Prod/m2']*size\n",
"yearly_PV_prod = hourly_average['Prod'].sum()\n",
"PV_production = hourly_average['Prod']\n",
"power_balance = pd.DataFrame()\n",
"power_balance['consumption'] = total_elec_load\n",
"power_balance['from PV'] = PV_production\n",
"power_balance['exchange grid'] = PV_production - total_elec_load\n",
"power_balance['to Grid'] = (PV_production - total_elec_load).clip(lower=0)\n",
"power_balance['from Grid'] = (total_elec_load - PV_production).clip(lower=0)\n",
"power_balance = power_balance.fillna(0)\n",
"total_elec_load = load_SFH_modern_full_retrofit['Total Electricity without AC']\n",
"power_balance = pd.DataFrame()\n",
"power_balance['consumption'] = total_elec_load\n",
"power_balance['from PV'] = PV_production\n",
"power_balance['exchange grid'] = PV_production - total_elec_load\n",
"power_balance['to Grid'] = (PV_production - total_elec_load).clip(lower=0)\n",
"power_balance['from Grid'] = (total_elec_load - PV_production).clip(lower=0)\n",
"power_balance = power_balance.fillna(0)\n",
"power_balance = BatteryDispatch(power_balance, battery_size, eta_discharge, max_c_charge, max_c_discharge)\n",
"power_balance['exchange grid new'] = power_balance['to Grid New'] - power_balance['from Grid New']"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
| [
"PsiVmGij75"
] | PsiVmGij75 |
432430035beb53f8a57dcc46ac91de96ad290daa | d00a51990868a5e4eb4cc3100d47bd1f8930ffa3 | /rllab/envs/mujoco/ant_env.py | c0dc8b41edf98c55fa5588c3d3b046b6de4cf8ec | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | akashratheesh/rllab | 42f1d2a21701343c317ef70c7432439236dbafd7 | 5b0232d2a1b412dd4fd7eb5835142f25ff981afe | refs/heads/master | 2023-08-28T16:20:42.932852 | 2021-10-25T00:43:33 | 2021-10-25T00:43:33 | 417,612,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,224 | py | from .mujoco_env import MujocoEnv
from rllab.core.serializable import Serializable
import numpy as np
from rllab.envs.base import Step
from rllab.misc.overrides import overrides
from rllab.misc import logger
class AntEnv(MujocoEnv, Serializable):
FILE = 'ant.xml'
def __init__(self, *args, **kwargs):
super(AntEnv, self).__init__(*args, **kwargs)
Serializable.__init__(self, *args, **kwargs)
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat,
self.model.data.qvel.flat,
self.get_body_xmat("torso").flat,
self.get_body_com("torso"),
self.get_body_comvel("torso"),
]).reshape(-1)
def step(self, action, collectingInitialData=False):
xposbefore = self.get_body_com("torso")[0]
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
forward_reward = comvel[0]
xposafter = self.get_body_com("torso")[0]
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() \
and self.get_body_com("torso")[2] >= 0.3 and self.get_body_com("torso")[2] <= 1.0 #used to be 0.2, state[2]
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def get_my_sim_state(self):
my_sim_state=np.squeeze(np.concatenate((self.model.data.qpos, self.model.data.qvel, self.model.data.qacc, self.model.data.ctrl)))
return my_sim_state
@overrides
def log_diagnostics(self, paths):
progs = [
path["observations"][-1][-3] - path["observations"][0][-3]
for path in paths
]
logger.record_tabular('AverageForwardProgress', np.mean(progs))
logger.record_tabular('MaxForwardProgress', np.max(progs))
logger.record_tabular('MinForwardProgress', np.min(progs))
logger.record_tabular('StdForwardProgress', np.std(progs))
| [
"anusha.nagabandi@gmail.com"
] | anusha.nagabandi@gmail.com |
8c988b95d39cf5d55d0879a8e1fb1ad9356e1543 | 843da58da462f0d82c847c12a4b67eeaee072e3d | /r2env/__init__.py | 92d4d42ea0d3c4f5fb6debe86e1689ae9b80ad18 | [
"MIT"
] | permissive | as0ler/r2env | 1864a2d716c688aaac4ed944a3ca65a75a373381 | 8796a6502741ccef82bdc7174c2ad5f04c87b3ef | refs/heads/master | 2023-05-30T08:51:02.346778 | 2021-05-24T22:41:03 | 2021-05-24T22:41:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from r2env.package import Package
from r2env.repl import main
import r2env.ipdb
import os
def load_packages(cfg):
from r2env.db import Radare2
from r2env.db import R0
pkgs = []
pkgs.append(Radare2(cfg))
pkgs.append(R0(cfg))
return pkgs
cfg = {
"srcdir": "", # depends on the pkg
"linkdir": "/usr",
"envdir": 123,
"prefix": "",
}
class R2Env:
def __init__(self):
self.db = load_packages(cfg)
def init(self):
if not os.path.isdir(".r2env"):
os.mkdir(".r2env")
def version(self):
return "0.2.0"
def available_packages(self):
return self.db
def installed_packages(self):
return ipdb.list()
def clean_package(self, pkgname):
return ipdb.clean(pkgname)
| [
"pancake@nopcode.org"
] | pancake@nopcode.org |
e7457ef3edccd7ffa5e7a792a53e60da70576705 | aa32c9526306990f599a495919508d16d4361492 | /lesson_002/03_favorite_movies.py | 481a1866b45c67e363391500e597a80794a498f5 | [] | no_license | nnngracheducation/pyHomeWorks | 31ff46a8376fdf28bab5de4637cedc220edd31ee | 507bab7a63ae9b72d20797ca6e815783da41eaae | refs/heads/master | 2023-01-08T18:29:35.743241 | 2020-11-07T15:35:24 | 2020-11-07T15:35:24 | 310,876,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Есть строка с перечислением фильмов
my_favorite_movies = 'Терминатор, Пятый элемент, Аватар, Чужие, Назад в будущее'
# Выведите на консоль с помощью индексации строки, последовательно:
# первый фильм
# последний
# второй
# второй с конца
# Переопределять my_favorite_movies и использовать .split() нельзя.
# Запятая не должна выводиться.
# TODO здесь ваш код
print(my_favorite_movies[:10])
print(my_favorite_movies[-15:])
print(my_favorite_movies[12:25])
print(my_favorite_movies[-22: -17]) | [
"nnngrach@gmail.com"
] | nnngrach@gmail.com |
010885dad083a7b1ec9ebb80c5c3d64b92989605 | 37930870719caede967fdf6905c032e22d086e8b | /scripts/imaging/chaining/slam/light_parametric__mass_light_dark__source_parametric.py | 80e4df39df68667dc5cd365fcf51cfac21c6f9f0 | [] | no_license | Cywtim/autolens_workspace | cbede944c0f85ee95cd7362fee957ef77e701280 | da40cafee8dc26e5d8b1041888fb280598e74a5e | refs/heads/master | 2023-04-05T14:22:06.091992 | 2021-04-15T20:29:28 | 2021-04-15T20:29:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,711 | py | """
SLaM (Source, Light and Mass): Light Parametric + Mass Total + Source Parametric
================================================================================
SLaM pipelines break the analysis down into multiple pipelines which focus on modeling a specific aspect of the strong
lens, first the Source, then the (lens) Light and finally the Mass. Each of these pipelines has it own inputs which
which customize the model and analysis in that pipeline.
The models fitted in earlier pipelines determine the model used in later pipelines. For example, if the SOURCE PIPELINE
uses a parametric `EllSersic` profile for the bulge, this will be used in the subsequent MASS LIGHT DARK PIPELINE.
Using a SOURCE PARAMETRIC PIPELINE, LIGHT PIPELINE and a MASS LIGHT DARK PIPELINE this SLaM script fits `Imaging` of
a strong lens system, where in the final model:
- The lens galaxy's light is a bulge `EllSersic`.
- The lens galaxy's stellar mass distribution is a bulge tied to the light model above.
- The lens galaxy's dark matter mass distribution is modeled as a `EllNFWMCRLudlow`.
- The source galaxy's light is a parametric `EllSersic`.
This runner uses the SLaM pipelines:
`source_parametric/source_parametric__with_lens_light`
`light_parametric/with_lens_light`
`mass_total/mass_light_dark`
Check them out for a detailed description of the analysis!
"""
# %matplotlib inline
# from pyprojroot import here
# workspace_path = str(here())
# %cd $workspace_path
# print(f"Working Directory has been set to `{workspace_path}`")
import os
import sys
from os import path
import autofit as af
import autolens as al
import autolens.plot as aplt
sys.path.insert(0, os.getcwd())
import slam
"""
__Dataset__
Load the `Imaging` data, define the `Mask2D` and plot them.
"""
dataset_name = "light_sersic__mass_mlr_nfw__source_sersic"
dataset_path = path.join("dataset", "imaging", "with_lens_light", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.1,
)
mask = al.Mask2D.circular(
shape_native=imaging.shape_native, pixel_scales=imaging.pixel_scales, radius=3.0
)
imaging = imaging.apply_mask(mask=mask)
imaging_plotter = aplt.ImagingPlotter(imaging=imaging)
imaging_plotter.subplot_imaging()
"""
__Paths__
The path the results of all chained searches are output:
"""
path_prefix = path.join("imaging", "slam", dataset_name)
"""
__Redshifts__
The redshifts of the lens and source galaxies, which are used to perform unit converions of the model and data (e.g.
from arc-seconds to kiloparsecs, masses to solar masses, etc.).
"""
redshift_lens = 0.5
redshift_source = 1.0
"""
__HYPER SETUP__
The `SetupHyper` determines which hyper-mode features are used during the model-fit.
"""
setup_hyper = al.SetupHyper(
hyper_galaxies_lens=False,
hyper_galaxies_source=False,
hyper_image_sky=None,
hyper_background_noise=None,
)
"""
__SOURCE PARAMETRIC PIPELINE (with lens light)__
The SOURCE PARAMETRIC PIPELINE (with lens light) uses three searches to initialize a robust model for the
source galaxy's light, which in this example:
- Uses a parametric `EllSersic` bulge.
- Uses an `EllIsothermal` model for the lens's total mass distribution with an `ExternalShear`.
__Settings__:
- Mass Centre: Fix the mass profile centre to (0.0, 0.0) (this assumption will be relaxed in the MASS LIGHT DARK
PIPELINE).
"""
analysis = al.AnalysisImaging(dataset=imaging)
bulge = af.Model(al.lp.EllSersic)
bulge.centre = (0.0, 0.0)
source_parametric_results = slam.source_parametric.with_lens_light(
path_prefix=path_prefix,
analysis=analysis,
setup_hyper=setup_hyper,
lens_bulge=bulge,
lens_disk=None,
mass=af.Model(al.mp.EllIsothermal),
shear=af.Model(al.mp.ExternalShear),
source_bulge=af.Model(al.lp.EllSersic),
mass_centre=(0.0, 0.0),
redshift_lens=redshift_lens,
redshift_source=redshift_source,
)
"""
__LIGHT PARAMETRIC PIPELINE__
The LIGHT PARAMETRIC PIPELINE uses one search to fit a complex lens light model to a high level of accuracy, using the
lens mass model and source light model fixed to the maximum log likelihood result of the SOURCE PARAMETRIC PIPELINE.
In this example it:
- Uses a parametric `EllSersic` bulge [Do not use the results of the SOURCE PARAMETRIC PIPELINE to initialize priors].
- Uses an `EllIsothermal` model for the lens's total mass distribution [fixed from SOURCE PARAMETRIC PIPELINE].
- Uses the `EllSersic` model representing a bulge for the source's light [fixed from SOURCE PARAMETRIC PIPELINE].
- Carries the lens redshift, source redshift and `ExternalShear` of the SOURCE PIPELINE through to the MASS
PIPELINE [fixed values].
"""
bulge = af.Model(al.lp.EllSersic)
light_results = slam.light_parametric.with_lens_light(
path_prefix=path_prefix,
analysis=analysis,
setup_hyper=setup_hyper,
source_results=source_parametric_results,
lens_bulge=bulge,
lens_disk=None,
)
"""
__MASS LIGHT DARK PIPELINE (with lens light)__
The MASS LIGHT DARK PIPELINE (with lens light) uses one search to fits a complex lens mass model to a high level of
accuracy, using the source model of the SOURCE PIPELINE and the lens light model of the LIGHT PARAMETRIC PIPELINE to
initialize the model priors . In this example it:
- Uses a parametric `EllSersic` bulge for the lens galaxy's light and its stellar mass [12 parameters: fixed from
LIGHT PARAMETRIC PIPELINE].
- The lens galaxy's dark matter mass distribution is a `EllNFWMCRLudlow` whose centre is aligned with bulge of
the light and stellar mass mdoel above [5 parameters].
- Uses the `EllSersic` model representing a bulge for the source's light [priors initialized from SOURCE
PARAMETRIC PIPELINE].
- Carries the lens redshift, source redshift and `ExternalShear` of the SOURCE PARAMETRIC PIPELINE through to the MASS
LIGHT DARK PIPELINE.
"""
analysis = al.AnalysisImaging(dataset=imaging)
lens_bulge = af.Model(al.lmp.EllSersic)
dark = af.Model(al.mp.EllNFWMCRLudlow)
dark.centre = lens_bulge.centre
mass_results = slam.mass_light_dark.with_lens_light(
path_prefix=path_prefix,
analysis=analysis,
setup_hyper=setup_hyper,
source_results=source_parametric_results,
light_results=light_results,
lens_bulge=lens_bulge,
lens_disk=None,
lens_envelope=None,
dark=dark,
)
"""
Finish.
"""
| [
"james.w.nightingale@durham.ac.uk"
] | james.w.nightingale@durham.ac.uk |
a24fa36e3d4cbc2f2bd776e44a28aa7d7c325484 | 3d273d7102dba56a99ba8eb2a163b160d4e882bc | /gnn.py | a1b8df84b637bbe80321af57010d083a8cf94d49 | [] | no_license | silent567/nn_parts | 0b85b2d615f040cff0fd38c402b0fa83558b3f1b | 1f0dfe1b0a0b794066220f2c0bb200bfbef605a1 | refs/heads/master | 2020-04-28T23:25:58.929950 | 2019-03-14T16:08:21 | 2019-03-14T16:08:21 | 175,655,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,548 | py | #!/usr/bin/env python
# coding=utf-8
# 20180825 by tanghao
# This file contains graph-network-related layers
import tensorflow as tf
from .init_var import *
from .fc import *
from .norm import LayerNorm
class FastGCNN:
'''
The class for the FastGCNN layer, which is based on "Convolutional Neural Networks on Graphs with Fast Localized Spectral Filtering"
'''
def __init__(self,kernel_size,input_filter_size,output_filter_size,node_num=None,dynamical_graph_flag=True,name_scope='FastGCNNCell',F=None,F_init=None,L=None,L_init=None,summ_flag=True):
'''
kernel_size is positive int, which is K in the paper, the polynomial degree of filters
input_filter_size is int, which is input channel number
output_filter_size is int, which is output channel number
node_num is int, which is the vertex number in the graph.
dynamical_graph_flag is boolean, which denotes whether the Laplacian Matrix is updated by the optimizer
name_scope should be of type string
F is tf.Variable with shape equal to [self.input_filter_size,self.output_filter_size,self.kernel_size]
init_F can be tf.Variable, tf.Tensor, list, numpy.ndarray of shape [self.input_filter_size,self.output_filter_size,self.kernel_size]
L is tf.Variable with shape equal to [self.node_num,self.node_num]
init_L can be tf.Variable, tf.Tensor, list, numpy.ndarray of shape [self.node_num,self.node_num]
summ_flag is boolean, indicating whether tensors are summarized
One of node_num, L, L_init should not be None for Laplacian Matrix initialization
Sample use:
gcnn_layer = FastGCNN(kernel_size,input_filter_size,output_filter_size,node_num)
gcnn_layer = FastGCNN(kernel_size,input_filter_size,output_filter_size,dynamical_graph_flag=False,L=LaplacianMatrix)
'''
self.input_filter_size = input_filter_size
self.output_filter_size = output_filter_size
self.kernel_size = kernel_size
self.dynamical_graph_flag = dynamical_graph_flag
self.summ_flag = summ_flag
with tf.name_scope(name_scope) as self.name_scope:
L = init_identity_matrix_variable(L,L_init,node_num,'UnsymmetrixLaplacianMatrix')
self.L = tf.divide(L+tf.transpose(L),2.,name='LaplacianMatrix')
self.node_num = self.L.shape.as_list()[-1]
self.L_maxeigenvalue = tf.self_adjoint_eig(self.L)[0][-1]
self.normL = tf.subtract(2*self.L/self.L_maxeigenvalue,tf.eye(self.node_num),name='NormedLaplacianMatrix')
if self.kernel_size == 1:
TnormL_list = [tf.eye(self.node_num)]
else:
TnormL_list = [tf.eye(self.node_num),self.normL]
for tindex in range(2,self.kernel_size):
TnormL_list.append(2*tf.matmul(self.normL,TnormL_list[-1])-TnormL_list[-2])
self.TnormL = tf.stack(TnormL_list,axis=0)
self.F = init_random_variable(F,F_init,[self.input_filter_size,self.output_filter_size,self.kernel_size],2./(self.input_filter_size*self.node_num),'filter')
self.coefficents = tf.einsum('aim,mjk->aijk',self.F,self.TnormL,name='coefficents')
if self.summ_flag:
self.F_summ = tf.summary.histogram('F_summ',self.F)
self.L_summ = tf.summary.histogram('L_summ',self.L)
self.normL_summ = tf.summary.histogram('normL_summ',self.normL)
def get_l2_loss(self,):
return tf.reduce_mean(tf.square(self.F))
def __call__(self,input_tensor):
return self.get_output(input_tensor)
def get_output(self,input_tensor):
'''
input_tensor should be of shape [N,node_num,input_filter_size]
output_tensor should be of the same type as input_tensor
and of shape [N,node_num,output_filter_size]
'''
with tf.name_scope(self.name_scope):
return tf.einsum('nai,ijab->nbj',input_tensor,self.coefficents)
class DenseUpdateLayer(object):
def __init__(self,input_size,output_size,layer_num,norm_flag=True,dropout_flag=False,res_flag=True,activation_func=tf.nn.leaky_relu,summ_flag=False,name_scope='DenseUpdateLayer'):
self.input_size = input_size
self.output_size = output_size
self.layer_num = layer_num
self.norm_flag = norm_flag
self.dropout_flag = dropout_flag
self.res_flag = res_flag
self.activation_func = activation_func
self.summ_flag = summ_flag
with tf.name_scope(name_scope) as self.name_scope:
pass
self.build_model()
def build_model(self):
input_size = self.input_size
output_size = self.output_size
layer_num = self.layer_num
summ_flag = self.summ_flag
self.name_scope_layers = []
self.dense_layers = []
self.norm_layers = []
with tf.name_scope(self.name_scope):
for ln in range(layer_num):
with tf.name_scope('Layer%d'%ln) as tmp_name_scope:
self.name_scope_layers.append(tmp_name_scope)
self.dense_layers.append(Dense(input_size,output_size,activation_func=linear_activation,summ_flag=summ_flag))
self.norm_layers.append(LayerNorm([output_size],summ_flag=summ_flag))
input_size = output_size
def __call__(self,X,train_flag):
'''
input arguments:
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
output: updated node attributes X' of type tf.Tensor annd of shape [N,C']
'''
norm_flag = self.norm_flag
dropout_flag = self.dropout_flag
res_flag = self.res_flag
activation_func = self.activation_func
with tf.name_scope(self.name_scope):
input_X = X
for ns,dense,norm in zip(self.name_scope_layers,self.dense_layers,self.norm_layers):
with tf.name_scope(ns):
output_X = dense(input_X)
if norm_flag:
output_X = norm(output_X)
output_X = activation_func(output_X)
if dropout_flag:
output_X = tf.layers.dropout(output_X,0.5,training=train_flag)
if res_flag and dense.input_size == dense.output_size:
output_X = tf.add(output_X,input_X)
input_X = output_X
return output_X
def get_l2_loss(self):
with tf.name_scope(self.name_scope):
l2_loss = tf.add_n([dense.get_l2_loss() for dense in self.dense_layers])
return l2_loss
class MPNNLayer:
def __init__(self,update_func,aggregate_func,edge_label_num,name_scope='MPNNLayer'):
with tf.name_scope(name_scope) as self.name_scope:
'''
update_func is applied to X to update node attributes individually (similar to conv when kernel size=1)
aggregate_func receives A and X and output aggregated node attributes X'
'''
self.update_func = update_func
self.aggregate_func = aggregate_func
self.edge_label_num = edge_label_num
def __call__(self,A,X,train_flag):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N,M]
X is the node attributes matrix of type tf.Tensor and of shape [N,N,C]
, where N is the number of nodes, M is the number of edge classes, and C is the channel number of node attributes
train_flag is the flag for dropout layer of type tf.Tensor, of shape [] and of type tf.Boolean
output arguments:
updated and aggregated new node attributes X' of type tf.Tensor and of shape [N,N,C']
'''
update_func = self.update_func
aggregate_func = self.aggregate_func
with tf.name_scope(self.name_scope):
output_X_list = []
for en in range(self.edge_label_num):
updated_X = update_func(X,train_flag)
aggregated_X = aggregate_func(A[:,:,en],updated_X)
output_X_list.append(aggregated_X)
output_X = tf.add_n(output_X_list,name='output_X')
return output_X
class SumAggregator:
def __init__(self,name_scope='SumAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
self_loop_A = tf.add(A,tf.eye(tf.shape(A)[0]),name='self_loop_A')
output_X = tf.matmul(self_loop_A,X,name='output_X')
return output_X
class MeanAggregator:
def __init__(self,name_scope='MeanAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
self_loop_A = tf.add(A,tf.eye(tf.shape(A)[0]),name='self_loop_A')
output_X = tf.divide(tf.matmul(self_loop_A,X),tf.reduce_sum(self_loop_A,axis=-1,keepdims=True),name='output_X')
return output_X
class MaxAggregator_old:
def __init__(self,name_scope='MaxAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
output_shape = X.get_shape()
node_num = tf.shape(X,name='output_shape')[0]
self_loop_A = tf.add(A,tf.eye(node_num),name='self_loop_A')
flat_self_loop_A = tf.reshape(self_loop_A,[-1,1],name='flat_self_loop_A')
tiled_X = tf.tile(X,[node_num,1],name='tiled_flat_X')
flat_X_dot_A = tf.reshape(tiled_X*flat_self_loop_A - 1e4*(1-flat_self_loop_A),[node_num,node_num,-1],name='flat_X_dot_A')
output_X = tf.reduce_max(flat_X_dot_A,axis=1,keepdims=False,name='output_X')
output_X.set_shape(output_shape)
return output_X
class MaxAggregator:
def __init__(self,name_scope='MaxAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def _maximum_neighborhood(self,index,A,X,out):
with tf.name_scope(self.name_scope):
neigh = tf.boolean_mask(X,A[index])
max_neigh = tf.reduce_max(neigh,keepdims=True,axis=0)
out = tf.concat([out,max_neigh],axis=0)
return out
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
output_shape = X.get_shape()
node_num = tf.shape(X,name='output_shape')[0]
output_dim = int(output_shape[-1])
self_loop_A = tf.add(A,tf.eye(node_num),name='self_loop_A')
output_X = tf.zeros([0,output_dim])
_,_,_,output_X = tf.while_loop(lambda index,A,X,out: index<node_num,\
lambda index,A,X,out: [index+1,A,X,self._maximum_neighborhood(index,A,X,out)],\
loop_vars = [tf.zeros([],tf.int32),self_loop_A,X,output_X],\
shape_invariants = [tf.TensorShape([]),A.get_shape(),X.get_shape(),tf.TensorShape([None,output_dim])])
output_X.set_shape(output_shape)
return output_X
class GCNAggregator:
def __init__(self,name_scope='GCNAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,A,X):
'''
input arguments:
A is the graph adjacency matrix of type tf.Tensor and of shape [N,N]
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [N,C]
'''
with tf.name_scope(self.name_scope):
self_loop_A = tf.add(A,tf.eye(tf.shape(A)[0]),name='self_loop_A')
self_loop_D_sqrt = tf.linalg.diag(1./tf.sqrt(tf.reduce_sum(self_loop_A,axis=1)),name='self_loop_D_sqrt')
normalized_self_loop_A = tf.matmul(self_loop_D_sqrt,tf.matmul(self_loop_A,self_loop_D_sqrt),name='normalized_self_loop_A')
output_X = tf.matmul(normalized_self_loop_A,X,name='output_X')
return output_X
class SumGraphAggregator:
def __init__(self,name_scope='SumGraphAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,X):
'''
input arguments:
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [1,C]
'''
with tf.name_scope(self.name_scope):
output_X = tf.reduce_sum(X,axis=0,keepdims=True,name='output_X')
return output_X
class MeanGraphAggregator:
def __init__(self,name_scope='MeanGraphAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,X):
'''
input arguments:
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [1,C]
'''
with tf.name_scope(self.name_scope):
output_X = tf.reduce_mean(X,axis=0,keepdims=True,name='output_X')
return output_X
class MaxGraphAggregator:
def __init__(self,name_scope='MaxGraphAggregator'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def __call__(self,X):
'''
input arguments:
X is the node attributes matrix of type tf.Tensor and of shape [N,C]
, where N is the number of nodes and C is the channel number of node attributes
output arguments:
aggregated new node attributes X' of type tf.Tensor and of shape [1,C]
'''
with tf.name_scope(self.name_scope):
output_X = tf.reduce_max(X,axis=0,keepdims=True,name='output_X')
return output_X
class CreateSubgraph:
def __init__(self,name_scope='CreateSubgraph'):
with tf.name_scope(name_scope) as self.name_scope:
pass
def _remove_one_node(self,X,A):
with tf.name_scope(self.name_scope):
indices = tf.range(tf.shape(A)[0])
indices = tf.random_shuffle(indices)[:-1]
X = tf.gather(X,indices)
A = tf.gather(tf.gather(A,indices),indices,axis=1)
return X,A
def __call__(self,X,A):
with tf.name_scope(self.name_scope):
return self._remove_one_node(X,A)
| [
"silent56@sjtu.edu.cn"
] | silent56@sjtu.edu.cn |
b81eacbae1cc55af0ff7165de0962951628a87e6 | 6fa14cd7be2d22553496326bce954b74dd6779bd | /ejercicios 1er año/impares hasta 100.py | 94d9029aa6a93b794e87029da74ab48b1637eed9 | [] | no_license | nucleomis/Archivos_Python | 28e93dfac08eee604f25173ecdbfc3e86eb952ef | dd3e104bb38e9763d1e5efb22614e845f21d68f1 | refs/heads/main | 2023-06-22T13:19:27.952059 | 2021-07-20T14:05:13 | 2021-07-20T14:05:13 | 387,810,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | ##Hacer un pseudocódigo que imprima los números impares hasta el
##100 y que Imprima cuantos impares hay.
a=1
cont=0
while a<99:
a=a+2
cont=cont+1
print (a)
print ("la cantidad de veces que se repiten los impares son", cont) | [
"nucleo.mis@gmail.com"
] | nucleo.mis@gmail.com |
48259064e154e547151d473c41338ec1af6d2bd3 | c2f2c299b2dcc33229010ef77c96293059dfab61 | /classrooms/urls.py | 0041d6784e0553a9bbc50fd91b64bb597a0ac8d8 | [] | no_license | nbalrifai/Classrooms | f6895fa74e3ad84aad7fbde64e755439304f363d | 6100b9bd0e7c773cf3c4e90edef9993415cbb2b3 | refs/heads/master | 2022-01-18T09:00:07.401363 | 2019-07-21T17:39:06 | 2019-07-21T17:39:06 | 198,065,792 | 0 | 1 | null | 2019-07-21T14:21:54 | 2019-07-21T14:21:53 | null | UTF-8 | Python | false | false | 839 | py |
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from classes import views
urlpatterns = [
path('admin/', admin.site.urls),
path('classrooms/', views.classroom_list, name='classroom-list'),
path('classrooms/<int:classroom_id>/', views.classroom_detail, name='classroom-detail'),
path('classrooms/create', views.classroom_create, name='classroom-create'),
path('classrooms/<int:classroom_id>/update/', views.classroom_update, name='classroom-update'),
path('classrooms/<int:classroom_id>/delete/', views.classroom_delete, name='classroom-delete'),
]
if settings.DEBUG:
urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"lailaabdulraheem@gmail.com"
] | lailaabdulraheem@gmail.com |
584a3ac0d6ea04049204f7aaf58c6306f7ddde0d | aeca65f4c5396942d0b710d8915e1c6f53ff218a | /src/carbon_intelligence/meter/models.py | edfeee268242d98fba7385513a34f33dcaebf44c | [] | no_license | rbennell/carbon-intelligence | 9aaaf433faa68094b3f94c010c0021557dd223e1 | 73cb0786cb05e7ae13a9a9e48650e07de9fcb04c | refs/heads/master | 2023-08-11T06:02:04.359094 | 2020-04-19T14:34:28 | 2020-04-19T14:34:28 | 256,499,256 | 0 | 0 | null | 2021-09-22T18:52:51 | 2020-04-17T12:39:28 | Python | UTF-8 | Python | false | false | 1,381 | py | import datetime
from django.db import models
# Create your models here.
METER_TYPE_CHOICES = [
("electricity", "Electricity"),
("gas", "Natural Gas"),
("water", "Water"),
]
UNIT_CHOICES = [
("kWh", "kWh"),
("m3", "m3"),
]
class Meter(models.Model):
id = models.IntegerField(primary_key=True)
building = models.ForeignKey(to="building.Building", on_delete=models.CASCADE)
fuel = models.CharField(max_length=63, choices=METER_TYPE_CHOICES)
unit = models.CharField(max_length=63, choices=UNIT_CHOICES)
# these are the transformations that would need to be applied to each column of data, in order, from the csv file.
csv_transformations = [int, int, str, str]
def parse_meter_reading_datetime(dt):
return datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M")
class MeterReading(models.Model):
meter = models.ForeignKey(to="meter.Meter", on_delete=models.CASCADE)
consumption = models.FloatField()
reading_date_time = models.DateTimeField()
# these are the transformations that would need to be applied to each column of data, in order, from the csv file.
csv_transformations = [float, int, parse_meter_reading_datetime]
class Meta:
get_latest_by = ["reading_date_time"]
@property
def graph_data(self):
return {"datetime": self.reading_date_time, "consumption": self.consumption}
| [
"rbennell@hotmail.co.uk"
] | rbennell@hotmail.co.uk |
03e2a912883ed7271a2cc5d4993b027cbcef07ec | 7df7efb0872a24471d376ceda741b3752502ebc9 | /flaskAPI/models.py | e4cac78ed42cd486872a78f09ceae07b7e61a327 | [] | no_license | EliasAguirre/Flask-Python-Api | 4546541082f656f76cc4cf017725e8a2ebeacab7 | be813fd48ecdf4b7cedda98cd0e1c7c004d6115c | refs/heads/master | 2020-12-27T06:04:31.951371 | 2020-02-02T15:06:12 | 2020-02-02T15:06:12 | 237,787,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | from main import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(10), index=True, unique=True)
age = db.Column(db.Integer, index=True, unique=True)
def __repr__(self):
return '<User {}>'.format(self.name)
def init_db():
db.create_all()
# Create a test user
new_user = User('ad', 2)
db.session.add(new_user)
db.session.commit()
if __name__ == '__main__':
init_db() | [
"eliasdavid.aguirre.a@gmail.com"
] | eliasdavid.aguirre.a@gmail.com |
fdc34fc0a555f3f41229467a66e4e653cb445b40 | a45f69e1daf40a933a5805eddda36d59658815de | /cloudipsp/async_api.py | c8f3ef18ebb6eafc068660e7b594636dcc96f4ff | [
"MIT"
] | permissive | xen/cloudipsp_async | c97f4dba286c8999162a3854aa82427ec35bb4bf | 6e1045de16ad535d858860cd202f8bd0f887aa83 | refs/heads/master | 2022-12-04T11:37:16.820299 | 2020-08-25T21:21:37 | 2020-08-25T21:21:37 | 289,747,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | import logging
from types import MethodType
from cloudipsp import exceptions
try:
import aiohttp
except ImportError:
aiohttp = False
from cloudipsp.api import BaseAPI
log = logging.getLogger(__name__)
class AsyncAPI(BaseAPI):
is_async = True
def __init__(self, **kwargs):
if not aiohttp:
raise ModuleNotFoundError(
"Run 'pip install -U aiohttp' to work with AsyncAPI"
)
super().__init__(**kwargs)
| [
"mkashkin@gmail.com"
] | mkashkin@gmail.com |
a3143711129b88f014fda2d2ef6ac1b8d0d0f0c0 | 6a0a7269ee3cd16763510753a9b2b073accd017d | /5 Airflow/L3/dags/exercise4.py | b997cad138f3cbe2165205bae8ec154054d644fb | [] | no_license | villoro/DEND | e8a5010a916ecf70c47780f9a59b84ccc5dcbcb2 | 398d297232cc5139d9536019db2fd5d60a9ac04f | refs/heads/master | 2021-05-18T16:50:26.306945 | 2020-04-25T09:57:39 | 2020-04-25T09:57:39 | 251,325,163 | 0 | 0 | null | 2020-04-25T09:57:40 | 2020-03-30T14:10:16 | Jupyter Notebook | UTF-8 | Python | false | false | 1,782 | py | import datetime
from airflow import DAG
from airflow.operators import FactsCalculatorOperator, HasRowsOperator, S3ToRedshiftOperator
#
# The following DAG performs the following functions:
#
# 1. Loads Trip data from S3 to RedShift
# 2. Performs a data quality check on the Trips table in RedShift
# 3. Uses the FactsCalculatorOperator to create a Facts table in Redshift
# a. **NOTE**: to complete this step you must complete the FactsCalcuatorOperator
# skeleton defined in plugins/operators/facts_calculator.py
#
dag = DAG("lesson3.exercise4", start_date=datetime.datetime.utcnow())
#
# The following code will load trips data from S3 to RedShift. Use the s3_key
# "data-pipelines/divvy/unpartitioned/divvy_trips_2018.csv"
# and the s3_bucket "udacity-dend"
#
copy_trips_task = S3ToRedshiftOperator(
task_id="load_trips_from_s3_to_redshift",
dag=dag,
table="trips",
redshift_conn_id="redshift",
aws_credentials_id="aws_credentials",
s3_bucket="udacity-dend",
s3_key="data-pipelines/divvy/unpartitioned/divvy_trips_2018.csv",
)
#
# Data quality check on the Trips table
#
check_trips = HasRowsOperator(
task_id="check_trips_data", dag=dag, redshift_conn_id="redshift", table="trips"
)
#
# We use the FactsCalculatorOperator to create a Facts table in RedShift. The fact column is
# `tripduration` and the groupby_column is `bikeid`
#
calculate_facts = FactsCalculatorOperator(
task_id="calculate_facts_trips",
dag=dag,
redshift_conn_id="redshift",
origin_table="trips",
destination_table="trips_facts",
fact_column="tripduration",
groupby_column="bikeid",
)
#
# Task ordering for the DAG tasks
#
copy_trips_task >> check_trips
check_trips >> calculate_facts
| [
"villoro7@gmail.com"
] | villoro7@gmail.com |
178073e46d08f76dd3f07a0d95396e86a2d69c87 | 4887a1a84e5ae0a3f4e23c41576b53e11e56840c | /parkproject/manage.py | 99de1c8a11bea0e3ef907c1d17e2f720a6d87d4f | [] | no_license | liujwplayer/python | c39dfd9d76034e9f4f8dd053442d3cbf3b220020 | 5e270a06c6c0a13cbabb409cebd64fdc6b3150d2 | refs/heads/master | 2020-04-01T08:29:03.386841 | 2018-10-28T05:41:56 | 2018-10-28T05:41:56 | 153,032,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "parkproject.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"liujwplayer@163.com"
] | liujwplayer@163.com |
a652e78af109d2bacbf8df44bfbe96159701df7b | d6375b3202143d7a0761dcb82a2ae8466ff4676a | /apps/users/migrations/0001_initial.py | 09323b8724c2cf41125463026fb876b9fb3956ef | [] | no_license | Allkoman/mxonline | 264c85d18100c3a458b103dcef16393a883d6607 | 431d524e32043aead36f4da71a8424a132a5c1bc | refs/heads/master | 2021-01-20T14:10:27.493578 | 2017-02-24T01:21:41 | 2017-02-24T01:21:41 | 82,746,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,620 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-20 06:19
from __future__ import unicode_literals
import django.contrib.auth.models
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('nick_name', models.CharField(default='', max_length=50, verbose_name='\u6635\u79f0')),
('birday', models.DateField(blank=True, null=True, verbose_name='\u751f\u65e5')),
('gender', models.CharField(choices=[('male', '\u7537'), ('female', '\u5973')], default='female', max_length=5)),
('address', models.CharField(default='', max_length=100)),
('mobile', models.CharField(blank=True, max_length=11, null=True)),
('image', models.ImageField(default='image/default.png', upload_to='image/%Y/%m')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '\u7528\u6237\u4fe1\u606f',
'verbose_name_plural': '\u7528\u6237\u4fe1\u606f',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"18646085515@163.com"
] | 18646085515@163.com |
b094b2109fab7c668ff7b27eeb1147aa55d6aa9c | 8e13c309b04ab6e56de828ab6f2206ba84ed00d8 | /app/models.py | 72a1eae3d26f16bbd0dea9f9f8a6a06fbf0c381c | [] | no_license | mr-Sanchez/first_project | d809cfc93c8486d51ae82fae303f264fb0130d32 | 8a5defb863833fcb6905e55f34271aaabcd7485b | refs/heads/main | 2023-05-24T13:22:45.216893 | 2021-06-03T14:28:18 | 2021-06-03T14:28:18 | 363,196,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,319 | py | from app import db
import re
from sqlalchemy.orm import backref
class PaymentMethod(db.Model):
id = db.Column(db.Integer, primary_key=True)
payment_method_name = db.Column(db.String(100))
payment_method_caption = db.Column(db.String(100))
class ClothesCategory(db.Model):
id = db.Column(db.Integer, primary_key=True)
category_path = db.Column(db.String(255))
category_name = db.Column(db.String(255))
clothes_for = db.Column(db.String(100))
clothes = db.relationship('ClothesItem', backref='category')
class ClothesItem(db.Model):
id = db.Column(db.Integer, primary_key=True)
clothes_name = db.Column(db.String(255))
clothes_price = db.Column(db.Integer)
clothes_discount = db.Column(db.Integer)
clothes_description = db.Column(db.Text)
clothes_category_id = db.Column(db.Integer, db.ForeignKey('clothes_category.id'))
images = db.relationship('ClothesItemImage', backref='clothes')
sizes = db.relationship('ClothesSizes', backref='clothes')
class ClothesItemImage(db.Model):
id = db.Column(db.Integer, primary_key=True)
clothes_image_path = db.Column(db.String(255))
clothes_id = db.Column(db.Integer, db.ForeignKey('clothes_item.id'))
class ClothesSizes(db.Model):
id = db.Column(db.Integer, primary_key=True)
size = db.Column(db.String(100))
count = db.Column(db.Integer)
clothes_id = db.Column(db.Integer, db.ForeignKey('clothes_item.id'))
purchases = db.relationship('SoldClothes', backref='size')
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_name = db.Column(db.String(255))
user_email = db.Column(db.String(255), unique=True)
user_password = db.Column(db.String(255))
purchases = db.relationship('Purchase', backref='user')
class Coupon(db.Model):
id = db.Column(db.Integer, primary_key=True)
coupon_code = db.Column(db.String(20))
coupon_discount = db.Column(db.Integer)
coupon_is_added = db.Column(db.Boolean)
coupon_is_active = db.Column(db.Boolean)
class Purchase(db.Model):
id = db.Column(db.Integer, primary_key=True)
purchase_date = db.Column(db.DateTime)
purchase_cost = db.Column(db.Integer)
purchase_discount = db.Column(db.Integer)
purchase_address = db.Column(db.String(255))
purchase_payment_method_id = db.Column(db.Integer, db.ForeignKey('payment_method.id'))
purchase_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
sizes = db.relationship('SoldClothes', backref='purchase')
class SoldClothes(db.Model):
id = db.Column(db.Integer, primary_key=True)
sold_clothes_quantity = db.Column(db.Integer)
sold_clothes_size_id = db.Column(db.Integer, db.ForeignKey('clothes_sizes.id'))
sold_clothes_size = db.relationship('ClothesSizes', backref=backref('sold', passive_deletes='all'))
sold_clothes_purchase_id = db.Column(db.Integer, db.ForeignKey('purchase.id'))
sold_clothes_purchase = db.relationship('Purchase', backref=backref('sold', passive_deletes='all'))
class Comment(db.Model):
id = db.Column(db.Integer, primary_key=True)
comment_author = db.Column(db.String(100))
comment_text = db.Column(db.Text)
comment_publish_date = db.Column(db.DateTime)
comment_clothes_id = db.Column(db.Integer, db.ForeignKey('clothes_item.id')) | [
"aleksandr.ptrk@gmail.com"
] | aleksandr.ptrk@gmail.com |
fea85b2a070376ac73feafffcec765b84aadb0fe | f3cec139bc484a376753ac8089f000e25927d940 | /Xray_trainloop.py | 8d4678958367de1bf0ffec7d01a7353f64c729fb | [] | no_license | wisemin7/covid | e28e309c1f35eec11a886bf4f6cf0495506b64dd | f347664df8de97c1643e3a060183e8c01a3c925c | refs/heads/master | 2022-11-09T16:34:43.764813 | 2020-06-27T10:26:16 | 2020-06-27T10:26:16 | 275,346,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,831 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 11:25:33 2020
@author: hoon
"""
import torch
import torchvision
from torchvision import transforms
from torch.utils.data.dataset import Dataset
import os, sys, random
import numpy as np
import PIL
from PIL import Image
from gen_utils import *
from ds import *
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
load_tfm = transforms.Compose([
transforms.ToTensor(),
lambda x : (x-x.min())/(x.max()-x.min())
])
train_set = XrayDset('./data_new2/train/', load_tfm)
train_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=10, shuffle=True)
test_set = XrayDset('./data4/test_Shenzen/', load_tfm)
test_loader = torch.utils.data.DataLoader(dataset=test_set, batch_size=10, shuffle=False)
class XrayResnet(torch.nn.Module):
def __init__(self):
super(XrayResnet, self).__init__()
self.C1 = torch.nn.Conv2d(in_channels=1, out_channels=3, kernel_size=3, padding=1, stride=2)
self.model_ft = torchvision.models.resnet18()
self.model_ft.avgpool = torch.nn.AvgPool2d(kernel_size=4, padding=0, stride=2)
self.model_ft.fc = torch.nn.Sequential(
torch.nn.Linear(512,256),
torch.nn.Linear(256,2)
)
def forward(self, x):
y = x
y = self.C1(y)
for lid, layer in enumerate(list(self.model_ft.children())[:9]):
y = layer(y)
y = y.squeeze(-1).squeeze(-1)
y = list(self.model_ft.children())[-1](y)
return y
n_epochs = 30
#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
M = XrayResnet()
M = M.to(device)
optimizer = torch.optim.Adam(M.parameters(), lr=6e-4, weight_decay=1e-2)
exp_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, n_epochs)
criterion = torch.nn.CrossEntropyLoss()
train_loss_track = []
test_loss_track = []
for eph in range(n_epochs):
print('epoch : {} ...'.format(eph))
n_correct = 0
avg_loss = 0
n_samples = 0
M.train()
exp_lr_scheduler.step()
for idx, xy in enumerate(train_loader):
x, y = xy
x, y = x.to(device), y.to(device)
outputs = M(x)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
n_correct += torch.sum(preds.data == y.data)
avg_loss += loss.item()
n_samples += x.size(0)
avg_loss = avg_loss/n_samples
train_loss_track.append(avg_loss)
print('train avg loss : ', avg_loss)
print('num of correct samples : {}/{}'.format(n_correct, n_samples))
n_correct = 0
avg_loss = 0
n_samples = 0
gt_labels = []
pred_labels = []
M.eval()
for idx, xy in enumerate(test_loader):
x, y = xy
# x, y = x.cuda(), y.cuda()
x, y = x.to(device), y.to(device)
outputs = M(x)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, y)
n_correct += torch.sum(preds.data == y.data)
gt_labels += list(y.data.cpu().numpy())
pred_labels += list(preds.data.cpu().numpy())
avg_loss += loss.item()
n_samples += x.size(0)
avg_loss = avg_loss/n_samples
test_loss_track.append(avg_loss)
print('test avg loss : ', avg_loss)
print('num of correct samples : {}/{}'.format(n_correct, n_samples))
plt.plot(train_loss_track, 'b')
plt.plot(test_loss_track, 'r')
plt.xlabel('epochs')
plt.ylabel('avg loss')
plt.show()
target_names = ['No TB', 'TB', 'COVID']
print(classification_report(gt_labels, pred_labels, target_names=target_names))
| [
"noreply@github.com"
] | wisemin7.noreply@github.com |
9dcb940cd9146536df36cac078e567c812b0cf16 | e6c1c1352df0ff0906e23b3cd14520155b9d0e0c | /mysite/settings.py | ef3b731603b572d49fec66ecf0a1b4eb6d8e28b4 | [] | no_license | elciorodrigo/apiCep | 63645190d8439bb3c49f786bd474c0597655c707 | abdb7bfb2787b93ad5b19a989747d99d70c67538 | refs/heads/master | 2021-08-11T20:30:44.460138 | 2017-11-14T03:57:26 | 2017-11-14T03:57:26 | 110,638,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,126 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lng!)12@vj#m0f@zpzg%8=6(eo7ux!r64!hdcrz1_l^c+5gxvk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"elciorodrigo@gmail.com"
] | elciorodrigo@gmail.com |
52e17291e5c10f8c1e415d3e6968fd57a2fa3c58 | b5321f6865f91ef8fb783a3e76e15e0d13e5a711 | /lesson_11/lesson11_ex1.py | 8aeb00e829a0e0bced0a4739ba3f56ff5a8b1983 | [] | no_license | DianaChumachenko/PythonIntro | 6689772391ed7f7e3c9380cf8470ae67fd3e9dd4 | 46e7c4c8b07ebdb076073910337b18f4d7f5ac1a | refs/heads/main | 2023-02-09T09:50:51.679456 | 2021-01-04T19:31:27 | 2021-01-04T19:31:27 | 311,464,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from pprint import pprint
d = dict(zip([x for x in range(32, 128)], [chr(x) for x in range(32, 128)]))
pprint(d) | [
"dchumachenko0508@gmail.com"
] | dchumachenko0508@gmail.com |
a2912b63ff16dc838e87900ce2db2d1f3a43c590 | 0d153f781d04c0fa925a864e03bf28d2bd61cb06 | /python/p7.py | 443704e9aca28ce794b8c809d7048a90e9545fa0 | [] | no_license | glovguy/project-euler-solutions | f9750cf1ca71a2aba9433f99d89838749aa9cf00 | 38f9c60d9d45f88d5d9a384404ab5d41cff491f0 | refs/heads/master | 2021-01-21T15:04:41.877811 | 2020-06-07T21:20:27 | 2020-06-07T21:20:27 | 57,855,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | '''By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.
What is the 10 001st prime number?'''
def is_prime(num):
upperLimit = int(num/2)
for n in range(2, upperLimit):
if num%n == 0:
return False
return True
def prime_numbers(until):
t=0
i=2
while t < until+1:
while not is_prime(i):
i+=1
t+=1
yield i
i+=1
primes = prime_numbers(10001)
allPrimes = [j for j in primes]
print(allPrimes[len(allPrimes)-1])
| [
"karlsmith@bouzou.com"
] | karlsmith@bouzou.com |
bff7b6d57c42b3b74cbfa6b65e9e3e4fd2c58bd0 | a766f6ee10be86bd33d2cfc06c19d94247b6ad08 | /aea/cli/registry/registration.py | e2da0dfcaf986b79c130ef7afa9834bdbe712d07 | [
"Apache-2.0"
] | permissive | ejfitzgerald/agents-aea | 3b07db6c1f9f5fc8fded4ce497a2283ae88f0b84 | 6411fcba8af2cdf55a3005939ae8129df92e8c3e | refs/heads/master | 2022-12-07T05:53:55.379150 | 2020-08-14T15:22:19 | 2020-08-14T15:22:19 | 288,688,666 | 0 | 0 | Apache-2.0 | 2020-08-19T09:24:07 | 2020-08-19T09:24:06 | null | UTF-8 | Python | false | false | 2,053 | py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module with methods for new user registration."""
from typing import List
from click import ClickException
from aea.cli.registry.utils import request_api
def register(
username: str, email: str, password: str, password_confirmation: str
) -> str:
"""
Register new Registry account and automatically login if successful.
:param username: str username.
:param email: str email.
:param password: str password.
:param password_confirmation: str password confirmation.
:return: str auth token.
"""
data = {
"username": username,
"email": email,
"password1": password,
"password2": password_confirmation,
}
resp_json, status_code = request_api(
"POST",
"/rest-auth/registration/",
data=data,
handle_400=False,
return_code=True,
)
if status_code == 400:
errors: List[str] = []
for key in ("username", "email", "password1", "password2"):
param_errors = resp_json.get(key)
if param_errors:
errors.extend(param_errors)
raise ClickException(
"Errors occured during registration.\n" + "\n".join(errors)
)
else:
return resp_json["key"]
| [
"panasevychol@gmail.com"
] | panasevychol@gmail.com |
1e6e066f903701f4d59405a4f73cd24b8d2114a3 | f950882940764ace71e51a1512c16a5ac3bc47bc | /src/ThirdParty/freetype/src/tools/PaxHeaders.20567/chktrcmp.py | b08e1c1af6a391977a0aba1d7924afee49d7c048 | [
"FTL"
] | permissive | ViacheslavN/GIS | 3291a5685b171dc98f6e82595dccc9f235e67bdf | e81b964b866954de9db6ee6977bbdf6635e79200 | refs/heads/master | 2021-01-23T19:45:24.548502 | 2018-03-12T09:55:02 | 2018-03-12T09:55:02 | 22,220,159 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | 30 mtime=1398010730.538379929
30 atime=1417845746.439132282
30 ctime=1398010730.538379929
| [
"nk.viacheslav@gmail.com"
] | nk.viacheslav@gmail.com |
83acc1d478a46b104cd5f8f7702ae959347f562a | 2b757e74a9ec0a208a1591fd7597d2975bbc5f1d | /app.py | 3bd6635749da14fdcab2bd448f077641b760dff8 | [] | no_license | surfeatcoderepeat/multifinger | cb2110f639c9fe0aeb59b0d789e504569e1005df | 627ecebd3fa2d3d6cae58855051a230297d7b705 | refs/heads/main | 2023-07-14T18:17:41.545934 | 2021-08-27T13:17:48 | 2021-08-27T13:17:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,221 | py | import base64
import datetime
import io
import dash
from dash.dependencies import Input, Output, State, MATCH, ALL
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import plotly.express as px
import lasio
import pandas as pd
import plotly.graph_objects as go
import numpy as np
from dash.exceptions import PreventUpdate
import re
import webbrowser
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
app.config['suppress_callback_exceptions'] = True
server = app.server
SIDEBAR_STYLE = {
"position": "fixed",
"top": 0,
"left": 0,
"bottom": 0,
"width": "16rem",
"padding": "2rem 1rem",
"background-color": "#f9f9fa",
"overflow": "scroll",
}
CONTENT_STYLE = {
"margin-left": "18rem",
"margin-right": "2rem",
"padding": "2rem 1rem",
}
sidebar = html.Div([
html.H3("Visualizador Multifinger", className="display-5", style={'textAlign':'center'}),
html.Hr(),
dcc.Upload(
id='upload-data',
children=html.Div([
'Drag and Drop your .las file'
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '2px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
},
),
html.Hr(),
html.Div(id='select_fingers'),
html.Div(id='select_curves'),
html.Hr(),
html.Button('Graficar', id='graficar'),
],
style=SIDEBAR_STYLE,
)
content = html.Div(id="page-content",
style=CONTENT_STYLE,
children=[
dcc.Store(id='stored-data'),
dcc.Store(id='radios_units'),
dbc.Row([
dbc.Col(
[html.H3('POLAR PLOT', style={'textAlign':'center'}),
dcc.Graph(id="plot2d", figure={
'layout': go.Layout(
xaxis = {
'visible': False
},
yaxis = {
'visible': False,
}
)
}),
html.Div(
dcc.Input(id='polar_center',
type='number',
placeholder='Input an MD to plot',
),
style=dict(display='flex', justifyContent='center'),
)
], width=4),
dbc.Col([
html.H3('3D SURFACE', style={'textAlign':'center'}),
dcc.Graph(id="plot3d", figure={
'layout': go.Layout(
xaxis = {
'visible': False
},
yaxis = {
'visible': False,
}
)
}),
dcc.RangeSlider(id='slider',
tooltip = { 'always_visible': True },
),
], width=6 ),
dbc.Col([
html.H5('Z aspect ratio'),
dcc.Input(id='z-aspectratio',
type='number',
value=1
),
html.Hr(),
html.H5('X-Y aspect ratio'),
dcc.Input(id='xy-aspectratio',
type='number',
value=1
),
html.Hr(),
], width=2),
],no_gutters=True, align="center")
]
)
app.layout = html.Div([sidebar, content])
@app.callback(
Output('select_fingers', 'children'),
Output('stored-data', 'data'),
Output('radios_units', 'data'),
Input('upload-data', 'contents'),
State('upload-data', 'filename'),
)
def update_output(contents, filename):
if contents is None:
raise PreventUpdate
else:
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
if '.las' in filename or '.LAS' in filename:
las = lasio.read(io.StringIO(decoded.decode('utf-8')))
curvesdict = {k:las.curvesdict[k].unit for k in las.curvesdict}
curvesdict['step'] = abs(las.well.STEP.value)
df = las.df().reset_index()
options = [{'label':n, 'value':n} for n in range(100)]
data = df.to_dict('records')
children = html.Div([
html.H5(filename),
html.Hr(),
html.H5('Nominal Inner Diameter (mm)'),
dcc.Input(id='nominal_id',
type='number',
placeholder='Input an MD to plot',
value=104.8,
),
html.Hr(),
dcc.Dropdown(id='depth_index',
options = [{'label':c, 'value':c} for c in df.columns],
placeholder='pick depht'),
html.Hr(),
dcc.Dropdown(id='tool_rotation',
options = [{'label':c, 'value':c} for c in df.columns],
placeholder='pick tool rotation'),
html.Hr(),
dcc.Dropdown(id='tool_offset',
options = [{'label':c, 'value':c} for c in df.columns],
placeholder='pick tool offset'),
dcc.Dropdown(id='tool_theta',
options = [{'label':c, 'value':c} for c in df.columns],
placeholder='pick tool angle'),
html.Hr(),
dcc.Dropdown(id='fingers_n',
options = options,
placeholder='pick number of fingers'),
])
return children, data, curvesdict
@app.callback(
Output("select_curves", "children"),
Input("fingers_n", "value"),
State('stored-data','data'),
)
def curves_selection(n_fingers, data):
if n_fingers is not None:
df = pd.DataFrame(data)
options = [{'label':c, 'value':c} for c in df.columns]
return [
html.Hr(),
html.Div(id='curvas', children=[
dcc.Dropdown(id={
'type': 'filter-dropdown',
'index': i
},
options=options,
placeholder='finger_{}'.format(i+1),
# value='FING{:02d}'.format(i+1),
)
for i in range(n_fingers)],
),
]
@app.callback(
Output({'type': 'filter-dropdown', 'index': ALL}, 'value'),
Input({'type': 'filter-dropdown', 'index': ALL}, 'value'),
State({'type': 'filter-dropdown', 'index': ALL}, 'id'),
State("fingers_n", "value"),
)
def find_regex(allvalues, allindex, n_fingers):
try:
index, value = [(i,v) for i,v in enumerate(allvalues) if v is not None][0]
notnumber = re.sub(r"\d+", '#$#', value)
number = re.sub(r'\D', '', value)
if index<9 and number==str(index):
final_values = [notnumber.replace('#$#', str(i)) for i in range(n_fingers)]
elif index<9 and number=='0'+str(index+1):
final_values = [notnumber.replace('#$#', '{:02d}'.format(i+1)) for i in range(n_fingers)]
elif index<9 and number=='0'+str(index):
final_values = [notnumber.replace('#$#', '{:02d}'.format(i)) for i in range(n_fingers)]
return final_values
except:
# raise PreventUpdate
return [None for i in range(n_fingers)]
@app.callback(
Output('plot3d', 'figure'),
Output('plot2d', 'figure'),
Output('slider', 'min'),
Output('slider', 'max'),
Output('slider', 'value'),
Output('polar_center', 'value'),
Output('polar_center', 'step'),
Input('graficar', 'n_clicks'),
Input('slider', 'value'),
Input('polar_center', 'value'),
Input('z-aspectratio', 'value'),
Input('xy-aspectratio', 'value'),
State({'type': 'filter-dropdown', 'index': ALL}, 'value'),
State('stored-data','data'),
State('depth_index', 'value'),
State('tool_rotation', 'value'),
State('tool_offset', 'value'),
State('tool_theta', 'value'),
State('nominal_id', 'value'),
State('radios_units', 'data'),
)
def plot_graf(n_clicks, range_values, polar_center, zratio, xyratio, fingers, data, depth, rot, offset, angle, nomid, curvesdict):
unit = curvesdict[fingers[0]]
step = curvesdict['step']
if unit=='IN':
factor = 25.4
else:
factor = 1
ctx = dash.callback_context
trigger_id = ctx.triggered[0]['prop_id'].split('.')[0]
df = pd.DataFrame(data).sort_values(depth).set_index(depth).dropna()
if trigger_id=='graficar':
radios = df[fingers]
if rot is not None:
rot3d = df[rot]
else:
i_min = np.searchsorted(df.index, range_values[0], side="left")
i_max = np.searchsorted(df.index, range_values[1], side="left")
radios = df.iloc[i_min:i_max][fingers]
if rot is not None:
rot3d = df.iloc[i_min:i_max][rot]
radios = radios*factor
min, max = radios.index.min(), radios.index.max()
nmediciones, npatines = radios.shape
radios_casing = np.full(radios.shape, nomid/2)
diff = radios - radios_casing
Z = np.vstack([radios.index]*npatines)
p = np.linspace(0, 2*np.pi, npatines)
P = np.column_stack([p]*nmediciones)
if rot is not None:
P = P + np.radians(rot3d.values)
X, Y = radios.values.transpose()*np.cos(P), radios.values.transpose()*np.sin(P)
fig3d = go.Figure(data=[go.Surface(x=X, y=Y, z=Z,
surfacecolor=diff.transpose(),
colorscale='Jet',
cmin=-5,
cmax=5,
# customdata=,
hovertemplate='z: %{z:.2f}<extra></extra>'+
'<br><b>z*2</b>: %{z:.2f}<br>',
# text=['ovalizacion: {}'.format(i) for i in Z[:,0]],
)])
fig3d.update_scenes(xaxis_visible=False,
yaxis_visible=False,
zaxis_visible=False,
xaxis_showgrid=False,
yaxis_showgrid=False,
zaxis_showgrid=False,
aspectmode='manual',
aspectratio=dict(x=xyratio, y=xyratio, z=zratio),
)
fig3d.add_trace(go.Scatter3d(x=[X[0,1]], y=[Y[0,1]], z=[Z[0,1]],
mode='markers',
marker = dict(size=10,
color='blue',
opacity=.8,)))
xtop, ytop = nomid/2, 0
fig3d.add_trace(go.Scatter3d(x=[xtop], y=[ytop], z=[Z[0,1]],
mode='markers',
marker = dict(size=10,
color='grey',
opacity=.8,)))
if polar_center is None or trigger_id!='polar_center':
radios_polar_plot = radios.iloc[0].values
polar_depth = radios.index[0]
if rot is not None:
rot2d = df[rot].loc[polar_depth]
else:
i = np.searchsorted(df.index, polar_center, side="right")
radios_polar_plot = df[fingers].iloc[i].values*factor
polar_depth = df.index[i]
if rot is not None:
rot2d = df[rot].loc[polar_depth]
radios_polar_casing = np.full(radios_polar_plot.shape, nomid/2)
diff_polar = radios_polar_plot - radios_polar_casing
if rot is not None:
p = p + np.radians(rot2d)
polar_data = pd.DataFrame({
'theta':[np.degrees(i) for i in p],
'radios':radios_polar_plot,
# 'text':['finger_{}'.format(i) for i in range(1, len(fingers)+1)],
})
fig2d = px.scatter_polar(polar_data,
r="radios",
theta="theta",
# text='text',
color=diff_polar,
color_continuous_scale='jet',
range_color=[-5,5],
)
fig2d.update_traces(marker=dict(size=10),)
fig2d.add_trace(go.Scatterpolar(
r = [0, polar_data.radios.iloc[0]],
theta = [0, polar_data.theta.iloc[0]],
name = "finger_1",
mode = "lines",
))
fig2d.add_trace(go.Scatterpolar(
r = [nomid/2]*len(fingers),
theta = [np.degrees(i) for i in p],
name = "casing ID",
mode = "lines",
line_color = 'black',
line_width = 4,
opacity = .2,
))
if offset is not None and angle is not None:
fig2d.add_trace(go.Scatterpolar(
r = [df[offset].loc[polar_depth]],
theta = [np.radians(df[angle].loc[polar_depth])],
text = 'tool_center',
marker=dict(size=15, color = "magenta", symbol='x'),
name = "tool_center",
))
fig2d.update(layout_coloraxis_showscale=False)
fig2d.update_polars(
radialaxis_range=[0, (nomid//2)+10],
radialaxis_showticklabels=False,
bgcolor='white',
angularaxis_gridcolor='grey',
radialaxis_gridcolor='white',
)
return fig3d, fig2d, df.index.min(), df.index.max(), [min, max], polar_depth, step
if __name__ == '__main__':
url = 'http://127.0.0.1:8050/'
webbrowser.open(url, new=1, autoraise=True)
app.run_server(debug=False)# ,dev_tools_ui=False,dev_tools_props_check=False)
| [
"RY15618@grupo.ypf.com"
] | RY15618@grupo.ypf.com |
f8527e61ab34f1911b17fa049c376e9b2b0500f1 | 2bb2d5f01b1f9c77e8092f1bdbf15eb10b263b2b | /livecareer/items.py | 0e150df1eb46e48395e3d8a284e86ddb68281e46 | [] | no_license | vasarmilan/livecareer-scraper | 5ab96500ed167e319eb6814953cc8f7a885bdffd | f1b545a5de506fb223d94699cfb42c66897a9959 | refs/heads/master | 2022-04-22T09:49:54.860732 | 2020-03-17T11:58:08 | 2020-03-17T11:58:08 | 247,695,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class LivecareerItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"vasarmilan@gmail.com"
] | vasarmilan@gmail.com |
fcf73361e13334179a65507f2fd77fdb971b2c40 | 8ca47670ed87ff22b1086032e14175bb1b6760c7 | /scrapyCrawler/scrapycrawl/scrapycrawl/scrapy_redis/dupefilter.py | e17cd6c15b889ecc4131e8c88f9910d2803b4684 | [] | no_license | public-spider/spider | 789aad3b0f781464267f03234cd26ea4e1147978 | f1e57f3ac6548b1bb8f6a5dc1f03c39c481f9311 | refs/heads/master | 2020-12-24T14:53:38.040968 | 2014-10-20T03:15:27 | 2014-10-20T03:15:27 | 22,784,057 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | '''
Created on Aug 13, 2014
@author: whisky
'''
import redis
import time
from scrapy.dupefilter import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
class RFPDupeFilter(BaseDupeFilter):
def __init__(self, server, key):
"""
initialize duplization filter
"""
self.server = server
self.key = key
@classmethod
def from_settings(cls, settings):
host = settings.get('REDIS_HOST', 'localhost')
port = settings.get('REDIS-PORT', 6379)
server = redis.Redis(host, port)
key = "dupefilter:%s" % int(time.time())
return cls(server, key)
def from_clawler(self, cls, crawler):
return cls.from_settings(crawler.settings)
def request_seen(self, request):
"""
use sismember judge whether fp is duplicate
"""
fp = request_fingerprint(request)
if self.server.sismember(self.key, fp):
return True
self.server.sadd(self.key, fp)
# self.server.sismember(self.key, fp)
return False
def close(self, reson):
"""
delete data on close, called by scrapy's scheduler
"""
self.clear()
def clear(self):
"""
clears fingerprints data
"""
self.server.delete(self.key)
| [
"260643431@qq.com"
] | 260643431@qq.com |
d1564abb5583ba7d937b0d846491cf7aa40a1cb2 | 00ef8e1eb57b73427508b20aadf0266da6b1f900 | /rlf/exp_mgr/viz_utils.py | f323dee2afc60a42bb37336d3b28e50fe18fb7b4 | [] | no_license | amy12xx/rl-toolkit | f4643935cc8afd960356bfeae74c233d2596dea9 | 8254df8346752ea0226ae2064cc1eabc839567b0 | refs/heads/master | 2023-08-14T00:56:52.270642 | 2021-09-28T15:59:32 | 2021-09-28T15:59:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,503 | py | """
Utilities for manipulating images, rendering images, and rendering videos.
"""
import os
import os.path as osp
from argparse import Namespace
from typing import List, Optional, Union
import cv2
import matplotlib.pyplot as plt
import numpy as np
import rlf.rl.utils as rutils
try:
import wandb
except:
pass
def append_text_to_image(
image: np.ndarray, lines: List[str], from_bottom: bool = False
) -> np.ndarray:
"""
Args:
image: The NxMx3 frame to add the text to.
lines: The list of strings (new line separated) to add to the image.
Returns:
image: (np.array): The modified image with the text appended.
"""
h, w, c = image.shape
font_size = 0.5
font_thickness = 1
font = cv2.FONT_HERSHEY_SIMPLEX
blank_image = np.zeros(image.shape, dtype=np.uint8)
if from_bottom:
y = image.shape[0]
else:
y = 0
for line in lines:
textsize = cv2.getTextSize(line, font, font_size, font_thickness)[0]
if from_bottom:
y -= textsize[1] + 10
else:
y += textsize[1] + 10
x = 10
cv2.putText(
blank_image,
line,
(x, y),
font,
font_size,
(255, 255, 255),
font_thickness,
lineType=cv2.LINE_AA,
)
final = image + blank_image
return final
def save_agent_obs(frames, imdim, vid_dir, name):
use_dir = osp.join(vid_dir, name + "_frames")
if not osp.exists(use_dir):
os.makedirs(use_dir)
if imdim != 1:
raise ValueError("Only gray scale is supported right now")
for i in range(frames.shape[0]):
for frame_j in range(frames.shape[1]):
fname = f"{i}_{frame_j}.jpg"
frame = frames[i, frame_j].cpu().numpy()
cv2.imwrite(osp.join(use_dir, fname), frame)
print(f"Wrote observation sequence to {use_dir}")
def save_mp4(frames, vid_dir, name, fps=60.0, no_frame_drop=False, should_print=True):
frames = np.array(frames)
if len(frames[0].shape) == 4:
new_frames = frames[0]
for i in range(len(frames) - 1):
new_frames = np.concatenate([new_frames, frames[i + 1]])
frames = new_frames
if not osp.exists(vid_dir):
os.makedirs(vid_dir)
vid_file = osp.join(vid_dir, name + ".mp4")
if osp.exists(vid_file):
os.remove(vid_file)
w, h = frames[0].shape[:-1]
videodims = (h, w)
fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v")
video = cv2.VideoWriter(vid_file, fourcc, fps, videodims)
for frame in frames:
frame = frame[..., 0:3][..., ::-1]
video.write(frame)
video.release()
if should_print:
print(f"Rendered to {vid_file}")
def plot_traj_data(
pred: np.ndarray,
real: np.ndarray,
save_name: str,
log_name: str,
save_path_info: Union[Namespace, str],
step: int,
y_axis_name: str = "State %i",
no_wb: Optional[bool] = None,
title: str = "",
ylim=None,
):
"""
Plots each state dimension of a trajectory comparing a predicted and real trajectory.
:param pred: Shape [H, D] for a trajectory of length H and state dimension D.
D plots will be created.
:param real: Shape [H, D].
:param save_name: Appended to log_name. This should likely be unique so
files on the disk are not overriden. Include file extension.
:param log_name: Has %i in the name to dynamically insert the state dimension.
Should NOT be unique so the log key is updated.
:param save_path_info: The save path will either be extracted from the args or the
path passed as a string.
:param y_axis_name: string with %i to dynamically insert state dimension.
"""
save_name = log_name + "_" + save_name
if isinstance(save_path_info, str):
save_path = osp.join(save_path_info, save_name)
else:
save_path = osp.join(rutils.get_save_dir(save_path_info), save_name)
if no_wb is None:
if not isinstance(save_path_info, Namespace) and "no_wb" not in vars(
save_path_info
):
raise ValueError(
f"Could not find property `no_wb` in the passed `save_path_info`"
)
no_wb = save_path_info.no_wb
per_state_mse = np.mean((pred - real) ** 2, axis=0)
per_state_sqrt_mse = np.sqrt(per_state_mse)
H, state_dim = real.shape
for state_i in range(state_dim):
use_save_path = save_path % state_i
plt.plot(np.arange(H), real[:, state_i], label="Real")
plt.plot(np.arange(H), pred[:, state_i], label="Pred")
plt.grid(b=True, which="major", color="lightgray", linestyle="--")
plt.xlabel("t")
plt.ylabel(y_axis_name % state_i)
if ylim is not None:
plt.ylim(ylim)
if isinstance(title, list):
use_title = title[state_i]
else:
use_title = title
if len(use_title) != 0:
use_title += "\n"
use_title += "MSE %.4f, SQRT MSE %.4f" % (
per_state_mse[state_i],
per_state_sqrt_mse[state_i],
)
plt.title(use_title)
plt.legend()
rutils.plt_save(use_save_path)
if not no_wb:
use_full_log_name = log_name % state_i
wandb.log(
{use_full_log_name: [wandb.Image(use_save_path)]},
step=step,
)
return np.mean(per_state_mse)
| [
"me@andrewszot.com"
] | me@andrewszot.com |
d3ae884063fc0c7dd51548c9a177d6e35488fb1e | 0687f997984b71293ba896862758f46103901b36 | /compute_prediction/cnn_test.py | 2999faa86459e04be7a87ac6426d9d8c0203540b | [] | no_license | XinYao1994/Clara | 28b6ad41428301a49401d60d36d15741857dbbdc | eea38c52beb17600dd325f465a3740f267bab2e5 | refs/heads/master | 2023-08-29T22:01:17.451714 | 2021-11-01T04:08:25 | 2021-11-01T04:08:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,830 | py | import warnings
warnings.filterwarnings("ignore")
import numpy as np
import tensorflow as tf
#import matplotlib.pyplot as plt
import tqdm
import csv
import re
import pickle
with open("training.pickle",'rb') as ftrain:
dataset_train = pickle.load(ftrain)
X, Y = dataset_train
with open("testing.pickle",'rb') as ftest:
dataset_test = pickle.load(ftest)
X_test, Y_test = dataset_test
with open("nf.pickle",'rb') as factual:
dataset_actual = pickle.load(factual)
X_actual, Y_actual = dataset_actual
with open("source.pickle",'rb') as fsource:
source_text_to_int = pickle.load(fsource)
with open("target.pickle",'rb') as ftarget:
target_text_to_int = pickle.load(ftarget)
# parameters
tf.reset_default_graph()
HIDDEN_SIZE = 512
SENTENCE_LIMIT_SIZE = 70
EMBEDDING_SIZE = 100
source_vocab_size = 125
encoder_embedding_size = 100
filters_size = [3, 5]
num_filters = 50
BATCH_SIZE = 256
EPOCHES = 50
LEARNING_RATE = 0.001
L2_LAMBDA = 10
KEEP_PROB = 0.8
with tf.name_scope("cnn"):
with tf.name_scope("placeholders"):
inputs = tf.placeholder(dtype=tf.int32, shape=(None, SENTENCE_LIMIT_SIZE), name="inputs")
targets = tf.placeholder(dtype=tf.float32, shape=(None, 1), name="targets")
# embeddings
with tf.name_scope("embeddings"):
#embedding_matrix = tf.Variable(initial_value=static_embeddings, trainable=False, name="embedding_matrix")
#embed = tf.nn.embedding_lookup(embedding_matrix, inputs, name="embed")
encoder_embed = tf.contrib.layers.embed_sequence(inputs, source_vocab_size, encoder_embedding_size)
embed_expanded = tf.expand_dims(encoder_embed, -1, name="embed_expand")
# max-pooling results
pooled_outputs = []
# iterate multiple filter
for i, filter_size in enumerate(filters_size):
with tf.name_scope("conv_maxpool_%s" % filter_size):
filter_shape = [filter_size, EMBEDDING_SIZE, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, mean=0.0, stddev=0.1), name="W")
b = tf.Variable(tf.zeros(num_filters), name="b")
conv = tf.nn.conv2d(input=embed_expanded,
filter=W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# activation
a = tf.nn.relu(tf.nn.bias_add(conv, b), name="activations")
# pooling
max_pooling = tf.nn.max_pool(value=a,
ksize=[1, SENTENCE_LIMIT_SIZE - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding="VALID",
name="max_pooling")
pooled_outputs.append(max_pooling)
# filter information
total_filters = num_filters * len(filters_size)
total_pool = tf.concat(pooled_outputs, 3)
flattend_pool = tf.reshape(total_pool, (-1, total_filters))
# dropout
#with tf.name_scope("dropout"):
#dropout = tf.nn.dropout(flattend_pool, KEEP_PROB)
# output
with tf.name_scope("output"):
W = tf.get_variable("W", shape=(total_filters, 1), initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.zeros(1), name="b")
logits = tf.add(tf.matmul(flattend_pool, W), b)
predictions = tf.nn.sigmoid(logits, name="predictions")
# loss
with tf.name_scope("loss"):
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits))
loss = loss + L2_LAMBDA * tf.nn.l2_loss(W)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)
# evaluation
with tf.name_scope("evaluation"):
correct_preds = tf.equal(tf.cast(tf.greater(predictions, 0.5), tf.float32), targets)
accuracy = tf.reduce_sum(tf.reduce_sum(tf.cast(correct_preds, tf.float32), axis=1))
def get_batch(x, y, batch_size=BATCH_SIZE, shuffle=True):
assert x.shape[0] == y.shape[0], print("error shape!")
# shuffle
if shuffle:
shuffled_index = np.random.permutation(range(x.shape[0]))
x = x[shuffled_index]
y = y[shuffled_index]
n_batches = int(x.shape[0] / batch_size)
for i in range(n_batches - 1):
x_batch = x[i*batch_size: (i+1)*batch_size]
y_batch = y[i*batch_size: (i+1)*batch_size]
yield x_batch, y_batch
saver = tf.train.Saver()
import time
with tf.Session() as sess:
#sess.run(tf.global_variables_initializer())
saver.restore(sess, "./models/cnn_final")
writer = tf.summary.FileWriter("./graphs/cnn_final", tf.get_default_graph())
n_batches = int(X.shape[0] / BATCH_SIZE)
print("n_batches: ", n_batches)
total_ind = 0
end_flag = 0
test_sum = 0
t_batches = int(X_test.shape[0] / BATCH_SIZE)
for x_batch, y_batch in get_batch(X_test, Y_test):
answer = sess.run(predictions, feed_dict={inputs: x_batch, targets: y_batch})
for index in range(len(answer)):
test_sum += (abs(answer[index]*64-y_batch[index]*64))/(y_batch[index]*64)
print("Test loss: {}".format(test_sum/(256*(t_batches-1))))
answer = sess.run(predictions, feed_dict={inputs: X_test[-1:], targets: Y_test[-1:]})
#print(answer, Y_test[-1])
#lstm_test_accuracy.append(test_sum/(256*(t_batches-1)))
real_sum = 0
r_batches = int(X.shape[0] / BATCH_SIZE)
for x_batch, y_batch in get_batch(X, Y):
answer = sess.run(predictions, feed_dict={inputs: x_batch, targets: y_batch})
for index in range(len(answer)):
real_sum += (abs(answer[index]*64-y_batch[index]*64))/(y_batch[index]*64)
print("Train loss: {}".format(real_sum/(256*(r_batches-1))))
#lstm_real_accuracy.append(real_sum/(256*(r_batches-1)))
answer = sess.run(predictions, feed_dict={inputs: X_actual, targets: Y_actual})
summation = 0
jndex = 0
pos = 0
nfs = ["aggcounter", "anonipaddr", "forcetcp", "tcp_gen", "tcpack", "tcpresp", "timefilter" ,"udpipencap"]
len_nfs = [15, 5, 17, 15, 2, 19, 12, 4]
nn = a = b = c = 0
temp_list = []
for index in range(89):
a += answer[index]
b += Y_actual[index]
c += abs(answer[index]-Y_actual[index])
summation += abs(answer[index]-Y_actual[index])/Y_actual[index]
nn += abs(answer[index]-Y_actual[index])/Y_actual[index]
if len_nfs[pos] > 1:
len_nfs[pos] -= 1
else:
temp_var = c/a
temp_list.append(temp_var[0])
pos += 1
a = b = c = nn = 0
print("Performance on real Click elements: ")
for index, item in enumerate(temp_list):
print("WMAPE of:", nfs[index], item)
time_start = time.time()
writer.close()
| [
"qiuyimingrichard@gmail.com"
] | qiuyimingrichard@gmail.com |
47b47f8164ca12deea39a8616361bde823c92e50 | 09c976bf8d942bb30e284fff9f76db1845c2aa6a | /UTD_CS_6375/HW6/ScikitKmeansAndKmeans++.py | b8d692a229dbc4680cbc8e6554d8e7a633fb2639 | [] | no_license | mikexie360/UTD_CS | 232d62ca992b43c8f4917f5525fc006fdc7132df | 23f7a6266841f6c25dd649d56060b961343869f7 | refs/heads/master | 2023-04-30T06:40:55.272767 | 2021-05-25T00:48:02 | 2021-05-25T00:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,433 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 10 17:29:41 2020
@author: ROHITH PEDDI
"""
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
data_train = pd.read_csv('leaf.data', header = None).values
M, N_c = data_train.shape
cluster_centers_actual = data_train[:, 0]
X = data_train[:, 1:N_c]
#k_list = [12, 18, 24, 36, 42]
k_list = [36]
tol = 1e-17
max_iter = 1e+4
N = N_c-1
###################################################################################
####################### KMEANS ########################
###################################################################################
def get_vanilla_cluster_centers(n_clusters):
cluster_centers = np.empty((n_clusters, N))
for k in range(n_clusters):
cluster_centers[k] = np.array(np.random.choice(np.arange(-3, 4, 1), N)).reshape(1, N)
return cluster_centers
def Vanilla_Kmeans():
inertia_matrix = np.empty((20, 5))
for i in range(1):
for j in range(len(k_list)):
print('#############################################################################')
n_clusters = k_list[j]
print (n_clusters, ' CLUSTERS, ','ITERATION ', i)
cluster_centers = get_vanilla_cluster_centers(n_clusters)
kmeans = KMeans(n_clusters=n_clusters, init=cluster_centers, tol=tol, max_iter=max_iter, verbose=1, n_init=1).fit(X)
predicted_cluster_labels = kmeans.labels_
print(cluster_centers_actual)
print(predicted_cluster_labels+1)
inertia_matrix[i][j] = kmeans.inertia_
return inertia_matrix
vanilla_inertia_matrix = Vanilla_Kmeans()
vanilla_mean = np.mean(vanilla_inertia_matrix, axis = 0)
vanilla_var = np.var(vanilla_inertia_matrix, axis = 0)
###################################################################################
####################### KMEANS++ ########################
###################################################################################
def get_kmeans_plus_plus_cluster_centers(n_clusters):
# Pick a random point
cluster_centers = []
initial_point = (X[np.random.randint(0, M),:]).reshape(1, -1)
cluster_centers.append(initial_point)
# Run the loop for k-1 times
for k in range(n_clusters-1):
# Find distances of each point from the nearest point
distances = []
for i in range(M):
X_i = X[i]
min_distance = 1e+20
for j in range(len(cluster_centers)):
current_distance = np.sum( (X_i-cluster_centers[j])**2 )
if current_distance < min_distance:
min_distance = current_distance
distances.append(min_distance)
# Normalize distance measures such that sum of them is unity
distances = np.array(distances).reshape(1, M)
distances_sum = np.sum(distances)
distances = distances/distances_sum
# Associate distance with probability measure of picking other points
probabilities = distances.flatten().tolist()
sampled_choice = np.random.choice(list(range(0, M)), 1, p=probabilities)
# Pick new points with corresponding probabilities
new_cluster_center = X[sampled_choice]
cluster_centers.append(new_cluster_center)
return np.array(cluster_centers).reshape(n_clusters, N)
def Kmeans_plus_plus():
kmeans_plus_plus_inertia_matrix = np.empty((20, 5))
for i in range(20):
for j in range(len(k_list)):
print('#############################################################################')
n_clusters = k_list[j]
print (n_clusters, ' CLUSTERS, ','ITERATION ', i)
cluster_centers = get_kmeans_plus_plus_cluster_centers(n_clusters)
kmeans = KMeans(n_clusters=n_clusters, init=cluster_centers, tol=tol, max_iter=max_iter, verbose=1, n_init=1).fit(X)
predicted_cluster_labels = kmeans.labels_
kmeans_plus_plus_inertia_matrix[i][j] = kmeans.inertia_
return kmeans_plus_plus_inertia_matrix
#kmeans_plus_plus_inertia_matrix = Kmeans_plus_plus()
#kmeans_plus_plus_mean = np.mean(kmeans_plus_plus_inertia_matrix, axis = 0)
#kmeans_plus_plus_var = np.var(kmeans_plus_plus_inertia_matrix, axis = 0) | [
"rohith.peddi7@gmail.com"
] | rohith.peddi7@gmail.com |
70243d88dd542f055a340a1e310647eb9f0dc9be | 18810b28505bf2625b40ebd9a673548203616261 | /Job Scraper/apply.py | 71784c63fa34df09b85c8e58f8fd794768b8ecd3 | [] | no_license | hzbrz/Scrapers | b7af43172218da32f0fcf605249803f082a8ec56 | 419341badcf6b0bc1fd01c403837c65ccb0492b5 | refs/heads/master | 2021-07-13T09:26:00.313902 | 2020-06-04T06:29:20 | 2020-06-04T06:29:20 | 136,759,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | def easy_app(easy_app_element):
print(easy_app_element) | [
"hasanthedev@gmail.com"
] | hasanthedev@gmail.com |
fdb31dc080683eafda61d023918635e0d3993089 | 19a5407847be78fcc48dfedbfa677c78e26d39e6 | /PythonLearn/函数式编程/高阶函数/filter.py | 87444e5290e4104e6f75fc8d9fa03eae05144359 | [] | no_license | gong782008371/yuquan | f9ac943ef6f1f8a0f855eb7be289ba5f830fccfe | 93ef594ec671f3ac3a945609065bd481238cead6 | refs/heads/master | 2020-06-05T02:25:47.070041 | 2017-01-11T11:31:18 | 2017-01-11T11:31:18 | 31,074,477 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | # -*- coding:utf-8 -*-
#Python内建的filter()函数用于过滤序列。
#
#和map()类似,filter()也接收一个函数和一个序列。
#和map()不同的时,filter()把传入的函数依次作用于每个元素,然后根据返回值是True还是False决定保留还是丢弃该元素。
def is_odd(n):
return n % 2 == 1
print filter(is_odd, [1, 2, 3, 4, 5, 6, 9]) #[1, 3, 5, 9]
def not_empty(s):
return s and s.strip()
print filter(not_empty, ['A', '', 'B', None, 'C', ' ']) #['A', 'B', 'C']
#练习
#
#请尝试用filter()删除1~100的素数。
import math
def not_prime(x):
if x <= 1:
return True
for i in range(2, int(math.sqrt(x + 0.5)) + 1):
if x % i == 0:
return True
return False
print filter(not_prime, [i for i in range(1, 101)]) | [
"782008371@qq.com"
] | 782008371@qq.com |
394b9512d2800f6c8f99772dfb85f1a9e29e87b2 | d653c7f6667403ca587b6c68fc4af28b74adf083 | /blog/models/post.py | e399a54e1ded0a08e3de3667660873892049d8d9 | [] | no_license | JoshParinussa/django-blog | 5e1aafd6849f5c070ed5f4901d7b8d16df71d7cf | dde1d708617742b84e56d38678acad198eb51b4f | refs/heads/master | 2022-12-12T16:40:56.662777 | 2020-09-12T11:25:12 | 2020-09-12T11:25:12 | 294,923,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | """Banner models."""
from django.db import models
from django.utils.translation import gettext as _
from sorl.thumbnail import ImageField
from blog.helpers import file as file_helper
from .base import BaseModel
class Post(BaseModel):
"""Blog model."""
title = models.CharField(_("title"), max_length=255, db_index=True)
category = models.ForeignKey('Category', on_delete=models.CASCADE, related_name=('blog'), verbose_name=_('category'))
content = models.TextField(_("content"), blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
slug = models.SlugField(max_length=200, unique=True)
image = ImageField(_("image"), upload_to=file_helper.DateUploadPath('blog/blog'))
class Meta:
ordering = ['-created_at']
def __str__(self):
return self.title
| [
"yehezkieljosh@gmail.com"
] | yehezkieljosh@gmail.com |
e6b153bf96ce71b451479b1de85e5d447c280b49 | d848bfea4045d3a4844298aec39e046d1345318a | /catkin_ws/devel/lib/python2.7/dist-packages/unitree_legged_msgs/msg/_IMU.py | 4b189cbbb6583edf67f84b6c1eb127f18b32b696 | [] | no_license | KyleM73/pronto | 37730ec478b9642ff616d61a6c50b1086f547c0f | 819f87fbc39004293413fb9fc137cdbce238f0db | refs/heads/main | 2023-06-02T08:51:44.126947 | 2021-06-18T20:50:09 | 2021-06-18T20:50:09 | 378,260,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | /home/ooboontoo/catkin_ws/devel/.private/unitree_legged_msgs/lib/python2.7/dist-packages/unitree_legged_msgs/msg/_IMU.py | [
"kyle.morgenstein@gmail.com"
] | kyle.morgenstein@gmail.com |
bffe1c48eedf03607efaea59b6631311acfecb45 | 684cc3be4bbc6b7edfc9467e21d80b6bcfa692bc | /db/__init__.py | fc3b5e809327dc514f4fc4dd894ba88dbed3ba87 | [] | no_license | sap-ibso-t4/DemoJam2020-ML | 99ed38735ece9a95a583579c995cf7e9cde6cea5 | cb4cbe77cfb76e25b4792231b9b0444e5a236c81 | refs/heads/master | 2023-01-10T21:44:18.989212 | 2020-11-03T09:35:16 | 2020-11-03T09:35:16 | 297,948,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from .sqliteAPI import SqliteAPI
from .dict_to_itab import data_frame_to_internal_table
| [
"zzfancitizen@gmail.com"
] | zzfancitizen@gmail.com |
eca7173612171328c19b0b90c0a3639ff207df10 | 1860918ce852099127e1109a470e131b49bb1992 | /bdrmapit/bin/cython | 4af1479af91c21da144f2558d3e29a161232d525 | [] | no_license | CAIDA/mapkit-traceroute-bdrmapit-pipeline | 55b002a0df56d2c41d458b9ad05126957f8be519 | 9b709b0f27bcf95c77b65d0b7df511b639503098 | refs/heads/master | 2023-08-28T17:14:51.332621 | 2021-10-10T20:34:09 | 2021-10-10T20:34:09 | 229,122,638 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | #!/project/mapkit/agamerog/country_asn_analysis/elverton/internet-flattening/aslinks_pipeline/bdrmapit/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from Cython.Compiler.Main import setuptools_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(setuptools_main())
| [
"agamerog.mit@gmail.com"
] | agamerog.mit@gmail.com | |
2aeb217b02dbe82cdc5445f4bec4aafb01b07802 | 68049b03dbbd9a3d778571794472e07c05fb00ad | /python/courses/jose_portilla/flask/sandbox/10_databases/10_1_flask_and_databases_practice/setupdatabase.py | e2f38694c1a0eb91547cf484e4e8aa594a19934b | [] | no_license | tjkhara/notes | c9e96ecea6efed860c521eb7df562c5715091aea | 5602a25ba23104e4154700108f1b8a3a0144f712 | refs/heads/master | 2023-01-20T07:42:47.129359 | 2020-11-24T06:43:24 | 2020-11-24T06:43:24 | 285,811,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from basic import db, Puppy
# creates all the tables
# takes classes and converts them into tables
db.create_all()
sam = Puppy('Sammy', 3)
frank = Puppy('Frankie', 4)
miles = Puppy('Miles', 10)
# These will say none because they are not in the database yet
# They don't have any ids
print(sam.id)
print(frank.id)
print(miles.id)
# Add these two objects to the database
db.session.add_all([sam, frank, miles])
# commit changes
db.session.commit()
print(sam.id)
print(frank.id)
print(miles.id) | [
"tkhara@gmail.com"
] | tkhara@gmail.com |
485c81b87d1eebfd760738dacd40b66dfdd2c946 | 74c8c890de1522cff4b8a30b54d801bbc440265f | /Desafios/desafio029.py | a1602a393ace223e763099e9b3764f43c6bcbb62 | [] | no_license | fndalemao/Python | b302d90bdc190c61304868b5c8906e723830220c | d402dc2896ece28e2f46771b279c85ff0296067f | refs/heads/master | 2020-04-03T15:48:18.435243 | 2019-04-11T04:01:06 | 2019-04-11T04:01:06 | 155,301,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | vel = int(input('Digite sua velocidade: '))
if vel > 80:
print('Você foi multado em {} reais!'.format((vel-80)*7))
else:
print('Você está dentro do limite de velocidade!') | [
"fnd.alemao02@gmail.com"
] | fnd.alemao02@gmail.com |
10576346c4146798661b702e5f8b36a7e905b5f2 | 9f68ebfdfdc6d53f83552dcb221ae76d181790d3 | /sales/migrations/0007_invoice_becomes_receipt_too.py | bd1cf54803a022aa54c55d8ea506af4f5b7a18c3 | [] | no_license | marcor/silversly | 90695502cb4eeb5c66274275d22c6aaeec39dc04 | 68cdf30cdca5e475caae6dd1358ae75e0414c2b0 | refs/heads/master | 2023-04-30T22:55:05.203805 | 2021-05-21T17:59:18 | 2021-05-21T17:59:18 | 369,352,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,492 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Receipt.cart'
db.alter_column('sales_receipt', 'cart_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['sales.Cart'], unique=True, null=True))
# Deleting field 'Invoice.cart'
db.delete_column('sales_invoice', 'cart_id')
# Deleting field 'Invoice.id'
db.delete_column('sales_invoice', 'id')
# Adding field 'Invoice.receipt_ptr'
db.add_column('sales_invoice', 'receipt_ptr', self.gf('django.db.models.fields.related.OneToOneField')(default=0, to=orm['sales.Receipt'], unique=True, primary_key=True), keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Receipt.cart'
raise RuntimeError("Cannot reverse this migration. 'Receipt.cart' and its values cannot be restored.")
# Adding field 'Invoice.cart'
db.add_column('sales_invoice', 'cart', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['sales.Cart'], unique=True, null=True), keep_default=False)
# User chose to not deal with backwards NULL issues for 'Invoice.id'
raise RuntimeError("Cannot reverse this migration. 'Invoice.id' and its values cannot be restored.")
# Deleting field 'Invoice.receipt_ptr'
db.delete_column('sales_invoice', 'receipt_ptr_id')
models = {
'inventory.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Category']", 'null': 'True', 'blank': 'True'})
},
'inventory.price': {
'Meta': {'unique_together': "(('pricelist', 'product'),)", 'object_name': 'Price'},
'gross': ('common.models.FixedDecimalField', [], {'max_digits': '7', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'markup': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'net': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'pricelist': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Pricelist']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Product']"})
},
'inventory.pricelist': {
'Meta': {'object_name': 'Pricelist'},
'default_markup': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'default_method': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'primary_key': 'True'})
},
'inventory.product': {
'Meta': {'ordering': "['name', 'code']", 'object_name': 'Product'},
'base_price': ('common.models.FixedDecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '2'}),
'catalogue': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Category']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '13'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_quantity': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '3'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'prices': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['inventory.Pricelist']", 'null': 'True', 'through': "orm['inventory.Price']", 'symmetrical': 'False'}),
'quantity': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '3'}),
'suppliers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['people.Supplier']", 'null': 'True', 'through': "orm['inventory.Supply']", 'symmetrical': 'False'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'inventory.supply': {
'Meta': {'unique_together': "(('product', 'supplier'),)", 'object_name': 'Supply'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('common.models.FixedDecimalField', [], {'max_digits': '8', 'decimal_places': '3'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Product']"}),
'supplier': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Supplier']"}),
'updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'people.bank': {
'Meta': {'object_name': 'Bank'},
'abi': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'cab': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'people.customer': {
'Meta': {'object_name': 'Customer'},
'cf': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'discount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'due': ('common.models.FixedDecimalField', [], {'default': '0', 'max_digits': '8', 'decimal_places': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'pricelist': ('django.db.models.fields.related.ForeignKey', [], {'default': "'Pubblico'", 'to': "orm['inventory.Pricelist']"})
},
'people.supplier': {
'Meta': {'ordering': "['name']", 'object_name': 'Supplier'},
'email': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '15', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'phone': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '15', 'blank': 'True'})
},
'sales.cart': {
'Meta': {'object_name': 'Cart'},
'current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Customer']", 'null': 'True'}),
'discount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'final_discount': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'final_total': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pricelist': ('django.db.models.fields.related.ForeignKey', [], {'default': "'Pubblico'", 'to': "orm['inventory.Pricelist']"}),
'rounded': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'sales.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Cart']"}),
'discount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'final_discount': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'final_price': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'final_value': ('common.models.FixedDecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.Product']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'max_digits': '7', 'decimal_places': '2'}),
'update': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'sales.ddt': {
'Meta': {'ordering': "['date', 'number']", 'object_name': 'Ddt', '_ormbases': ['sales.Receipt']},
'date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'main_address': ('django.db.models.fields.TextField', [], {}),
'number': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'receipt_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sales.Receipt']", 'unique': 'True', 'primary_key': 'True'}),
'shipping_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'shipping_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 5, 21, 13, 41, 58, 142622)'}),
'year': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'sales.invoice': {
'Meta': {'object_name': 'Invoice', '_ormbases': ['sales.Receipt']},
'bank': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['people.Bank']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'costs': ('common.models.FixedDecimalField', [], {'default': '0', 'max_digits': '7', 'decimal_places': '2'}),
'immediate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'number': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'payment_method': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'receipt_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sales.Receipt']", 'unique': 'True', 'primary_key': 'True'}),
'receipts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'proxy_receipt'", 'null': 'True', 'to': "orm['sales.Receipt']"}),
'year': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'sales.receipt': {
'Meta': {'object_name': 'Receipt'},
'cart': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sales.Cart']", 'unique': 'True', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'sales.scontrino': {
'Meta': {'object_name': 'Scontrino', '_ormbases': ['sales.Receipt']},
'cf': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'unique': 'True', 'blank': 'True'}),
'due': ('common.models.FixedDecimalField', [], {'max_digits': '7', 'decimal_places': '2'}),
'receipt_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['sales.Receipt']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['sales']
| [
"marcor@users.noreply.github.com"
] | marcor@users.noreply.github.com |
cf19035358b62ecd6f88a9ffd8a23464e7149a69 | 2863645627f099cad919c12dd1949bb3704923d2 | /tests/test_datasets/test_bottom_up_dataset.py | b218c4942ad7ae8b65bab8f7ea505b0d2bd7baf1 | [
"Apache-2.0"
] | permissive | zhangyu92/mmpose | d43c873111ac5a4614d3675495de3af771626eec | 17557522ce3e41f830973079c5b4321935c41439 | refs/heads/master | 2023-02-18T20:44:54.814555 | 2021-01-12T03:21:11 | 2021-01-12T03:21:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,707 | py | import tempfile
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from mmpose.datasets import DATASETS
def convert_coco_to_output(coco):
outputs = []
for img_id in coco.getImgIds():
preds = []
scores = []
image = coco.imgs[img_id]
ann_ids = coco.getAnnIds(img_id)
for ann_id in ann_ids:
keypoints = np.array(coco.anns[ann_id]['keypoints']).reshape(
(-1, 3))
K = keypoints.shape[0]
if sum(keypoints[:, 2]) == 0:
continue
preds.append(
np.concatenate((keypoints[:, :2], np.ones(
[K, 1]), np.ones([K, 1]) * ann_id),
axis=1))
scores.append(1)
img_path = []
img_path[:0] = image['file_name']
output = (np.stack(preds), scores, img_path, None)
outputs.append(output)
return outputs
def test_bottom_up_COCO_dataset():
dataset = 'BottomUpCocoDataset'
# test COCO datasets
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17
])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False)
_ = dataset_class(
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
test_mode=False)
custom_dataset = dataset_class(
ann_file='tests/data/coco/test_coco.json',
img_prefix='tests/data/coco/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
assert custom_dataset.num_images == 4
_ = custom_dataset[0]
outputs = convert_coco_to_output(custom_dataset.coco)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
assert_almost_equal(infos['AP'], 1.0)
with pytest.raises(KeyError):
_ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
def test_bottom_up_CrowdPose_dataset():
dataset = 'BottomUpCrowdPoseDataset'
# test CrowdPose datasets
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(
num_output_channels=14,
dataset_joints=14,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False)
_ = dataset_class(
ann_file='tests/data/crowdpose/test_crowdpose.json',
img_prefix='tests/data/crowdpose/',
data_cfg=data_cfg,
pipeline=[],
test_mode=False)
custom_dataset = dataset_class(
ann_file='tests/data/crowdpose/test_crowdpose.json',
img_prefix='tests/data/crowdpose/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
image_id = 103319
assert image_id in custom_dataset.img_ids
assert len(custom_dataset.img_ids) == 2
_ = custom_dataset[0]
outputs = convert_coco_to_output(custom_dataset.coco)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
assert_almost_equal(infos['AP'], 1.0)
with pytest.raises(KeyError):
_ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
def test_bottom_up_MHP_dataset():
dataset = 'BottomUpMhpDataset'
# test MHP datasets
dataset_class = DATASETS.get(dataset)
channel_cfg = dict(
dataset_joints=16,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=1,
scale_aware_sigma=False,
)
_ = dataset_class(
ann_file='tests/data/mhp/test_mhp.json',
img_prefix='tests/data/mhp/',
data_cfg=data_cfg,
pipeline=[],
test_mode=False)
custom_dataset = dataset_class(
ann_file='tests/data/mhp/test_mhp.json',
img_prefix='tests/data/mhp/',
data_cfg=data_cfg,
pipeline=[],
test_mode=True)
image_id = 2889
assert image_id in custom_dataset.img_ids
assert len(custom_dataset.img_ids) == 2
_ = custom_dataset[0]
outputs = convert_coco_to_output(custom_dataset.coco)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP')
assert_almost_equal(infos['AP'], 1.0)
with pytest.raises(KeyError):
_ = custom_dataset.evaluate(outputs, tmpdir, 'PCK')
| [
"noreply@github.com"
] | zhangyu92.noreply@github.com |
46037bb5c0b7e53e047bcfb8753331ef30f63ef7 | 2eb413eafcf7b7ea6aefa41420e6adfdebdb21cb | /scripts/xln/trimDerivedTables.py | f42828ed7427c6a64864bb3d5ed7ebfe4871760d | [] | no_license | SerhiiSkrypchenko/Scripts | 92493f3c1e66fab6f496160380094f2c63bf98f9 | 0a5b8f339a3c45d63955d75ec4752d0ac7323678 | refs/heads/master | 2023-02-06T08:47:54.385193 | 2023-01-30T12:50:30 | 2023-01-30T12:50:30 | 217,525,702 | 0 | 0 | null | 2023-01-30T10:29:15 | 2019-10-25T12:09:25 | Python | UTF-8 | Python | false | false | 553 | py | import requests
import config_Luna_Wallet
url = config_Luna_Wallet.xln_mn_2
ADMIN_PASSWORD = config_Luna_Wallet.ADMIN_PASSWORD_T1
def trimDerivedTables(url):
print("---------- START trimDerivedTables on --->>> " + url + " <<< ----")
querystring = {"requestType": "trimDerivedTables", "adminPassword": ADMIN_PASSWORD}
response = requests.request("POST", url + "/api/rpc", params=querystring)
print(response.text)
print("--------END of trimDerivedTables proccess on peer --->>> " + url + " <<< --------")
trimDerivedTables(url)
| [
"43746242+SerhiiSkrypchenko@users.noreply.github.com"
] | 43746242+SerhiiSkrypchenko@users.noreply.github.com |
96f6f306ef276af2e33a2bfaa7d6b1e25cb524ae | 516ddb388a2f32fa5faf2877d0842e8ad6f02550 | /task/bin/pip3 | 30d052acbb62a0461430fcc21f80a3c689d18f10 | [] | no_license | md131376st/task6 | cf49b770c29d51a5abaaa737b089fbce02210d38 | 7ddc384bcb2f700b10cbf2e2d19ecee899b3689e | refs/heads/master | 2020-03-28T12:21:30.112278 | 2018-09-18T16:17:47 | 2018-09-18T16:17:47 | 148,290,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | #!/Users/md/summer/task6/task/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"mona131376st@gmail.com"
] | mona131376st@gmail.com | |
3a9884fb534bd51716b75014723d49e7b5590761 | 59c55725576bbf0e2f6617507ba2f1db639abb3f | /analytic_billing_plan/wizard/analytic_billing_plan_line_make_sale.py | 921890470694b17358282339a41cfc55af455bcf | [] | no_license | bmya/eficent-odoo-addons | e3426ebaf1f59e52726253fc1dd36a09d9363059 | 5d8ddfa384ab4417f42bda103b71d926848035f6 | refs/heads/7.0 | 2021-01-21T16:48:55.312452 | 2015-11-04T14:11:19 | 2015-11-04T14:11:19 | 45,649,141 | 1 | 3 | null | 2015-11-06T00:35:17 | 2015-11-06T00:35:17 | null | UTF-8 | Python | false | false | 11,703 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# <contact@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class analytic_billing_plan_line_make_sale(orm.TransientModel):
_name = "analytic.billing.plan.line.make.sale"
_description = "Analytic billing plan line make sale"
def _get_order_lines(self, cr, uid, context=None):
"""
Returns the order lines associated to the analytic accounts selected.
"""
if context is None:
context = {}
record_ids = context and context.get('active_ids', False)
if record_ids:
order_line_ids = []
line_plan_obj = self.pool.get('analytic.billing.plan.line')
for line in line_plan_obj.browse(cr, uid, record_ids,
context=context):
for order_line in line.order_line_ids:
order_line_id = order_line and order_line.id
order_line_ids.extend([order_line_id])
if order_line_ids:
return order_line_ids
return False
def _get_default_shop(self, cr, uid, context=None):
company_id = self.pool.get('res.users').browse(
cr, uid, uid, context=context).company_id.id
shop_ids = self.pool.get('sale.shop').search(
cr, uid, [('company_id', '=', company_id)], context=context)
if not shop_ids:
raise osv.except_osv(_('Error!'),
_('There is no default shop '
'for the current user\'s company!'))
return shop_ids[0]
_columns = {
'order_line_ids': fields.many2many('sale.order.line',
'make_sale_order_line_rel',
'order_line_id',
'make_sale_order_id'),
'shop_id': fields.many2one('sale.shop', 'Shop', required=True),
'invoice_quantity': fields.selection([('order',
'Ordered Quantities')],
'Invoice on',
help="The sales order will "
"automatically create the "
"invoice proposition "
"(draft invoice).",
required=True),
'order_policy': fields.selection([('manual', 'On Demand')],
'Create Invoice',
help="""This field controls how
invoice and delivery
operations are synchronized.""",
required=True),
}
_defaults = {
'order_line_ids': _get_order_lines,
'shop_id': _get_default_shop,
'order_policy': 'manual',
'invoice_quantity': 'order',
}
def make_sales_orders(self, cr, uid, ids, context=None):
"""
To make sales.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
record_ids = context and context.get('active_ids', False)
make_order = self.browse(cr, uid, ids[0], context=context)
res = []
if record_ids:
billing_plan_obj = self.pool.get('analytic.billing.plan.line')
order_obj = self.pool.get('sale.order')
order_line_obj = self.pool.get('sale.order.line')
partner_obj = self.pool.get('res.partner')
acc_pos_obj = self.pool.get('account.fiscal.position')
list_line = []
customer_data = False
company_id = False
sale_id = False
account_id = False
for line in billing_plan_obj.browse(cr, uid, record_ids,
context=context):
uom_id = line.product_uom_id
if not line.customer_id:
raise osv.except_osv(
_('Could not create sale order !'),
_('You have to enter a customer.'))
if customer_data is not False \
and line.customer_id != customer_data:
raise osv.except_osv(
_('Could not create sale order !'),
_('You have to select lines '
'from the same customer.'))
else:
customer_data = line.customer_id
partner_addr = partner_obj.address_get(
cr, uid, [customer_data.id], ['default',
'invoice',
'delivery',
'contact'])
newdate = datetime.today()
partner = customer_data
pricelist_id = partner.property_product_pricelist \
and partner.property_product_pricelist.id \
or False
price_unit = line.price_unit
line_company_id = line.company_id \
and line.company_id.id \
or False
if company_id is not False \
and line_company_id != company_id:
raise osv.except_osv(
_('Could not create sale order !'),
_('You have to select lines '
'from the same company.'))
else:
company_id = line_company_id
shop_id = make_order.shop_id \
and make_order.shop_id.id \
or False
line_account_id = line.account_id \
and line.account_id.id \
or False
if account_id is not False \
and line_account_id != account_id:
raise osv.except_osv(
_('Could not create billing request!'),
_('You have to select lines from the '
'same analytic account.'))
else:
account_id = line_account_id
sale_order_line = {
'name': line.name,
'product_uom_qty': line.unit_amount,
'product_id': line.product_id.id,
'product_uom': uom_id.id,
'price_unit': price_unit,
'notes': line.notes,
}
taxes = False
if line.product_id:
taxes_ids = line.product_id.product_tmpl_id.taxes_id
taxes = acc_pos_obj.map_tax(
cr, uid, partner.property_account_position,
taxes_ids)
if taxes:
sale_order_line.update({
'tax_id': [(6, 0, taxes)]
})
list_line.append(sale_order_line)
if sale_id is False:
sale_id = order_obj.create(cr, uid, {
'origin': '',
'shop_id': shop_id,
'partner_id': customer_data.id,
'pricelist_id': pricelist_id,
'partner_invoice_id': partner_addr['invoice'],
'partner_order_id': partner_addr['contact'],
'partner_shipping_id': partner_addr['delivery'],
'date_order':
newdate.strftime('%Y-%m-%d %H:%M:%S'),
'fiscal_position':
partner.property_account_position and
partner.property_account_position.id or False,
'company_id': company_id,
'payment_term':
partner.property_payment_term and
partner.property_payment_term.id or False,
'project_id': account_id,
'invoice_quantity': make_order.invoice_quantity,
'order_policy': make_order.order_policy,
}, context=context)
if line.account_id.user_id:
order_obj.message_subscribe_users(
cr, uid, [sale_id],
user_ids=[line.account_id.user_id.id])
sale_order_line.update({
'order_id': sale_id
})
order_line_id = order_line_obj.create(cr, uid,
sale_order_line,
context=context)
values = {
'order_line_ids': [(4, order_line_id)]
}
billing_plan_obj.write(cr, uid, [line.id], values,
context=context)
res.append(order_line_id)
return {
'domain': "[('id','in', ["+','.join(map(str, res))+"])]",
'name': _('Billing request lines'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'sale.order.line',
'view_id': False,
'context': False,
'type': 'ir.actions.act_window'
}
analytic_billing_plan_line_make_sale() | [
"jordi.ballester@eficent.com"
] | jordi.ballester@eficent.com |
e4fd0b88f086e8155bee37b5546c0096f7760d3e | e78154abbb8bacf5afccda9da371684cbeabad36 | /envs/ALPHA-POPEGO/lib/python2.5/site-packages/ipython-0.8.2-py2.5.egg/IPython/Release.py | c22250cf389d6cc8e86540e756de11ec217a66b1 | [
"BSD-3-Clause"
] | permissive | enterstudio/popego | 1a196fabc374c0f45764e5c74bd7752236424040 | 2d09e793d9d2f297139edb325b8a70ddda9b2705 | refs/heads/master | 2021-04-09T16:39:40.781634 | 2016-10-14T16:53:47 | 2016-10-14T16:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,806 | py | # -*- coding: utf-8 -*-
"""Release data for the IPython project.
$Id: Release.py 2855 2007-11-06 06:53:49Z vivainio $"""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
#
# Copyright (c) 2001 Janko Hauser <jhauser@zscout.de> and Nathaniel Gray
# <n8gray@caltech.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# For versions with substrings (like 0.6.16.svn), use an extra . to separate
# the new substring. We have to avoid using either dashes or underscores,
# because bdist_rpm does not accept dashes (an RPM) convention, and
# bdist_deb does not accept underscores (a Debian convention).
revision = '2876M'
#version = '0.8.2.svn.r' + revision.rstrip('M')
version = '0.8.2'
description = "An enhanced interactive Python shell."
long_description = \
"""
IPython provides a replacement for the interactive Python interpreter with
extra functionality.
Main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Readline based name completion.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* Configuration system with easy switching between different setups (simpler
than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs.
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available at the IPython subversion
repository_.
.. _repository: http://ipython.scipy.org/svn/ipython/ipython/trunk#egg=ipython-dev
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez@colorado.edu'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com')
}
url = 'http://ipython.scipy.org'
download_url = 'http://ipython.scipy.org/dist'
platforms = ['Linux','Mac OSX','Windows XP/2000/NT','Windows 95/98/ME']
keywords = ['Interactive','Interpreter','Shell']
| [
"santisiri@gmail.com"
] | santisiri@gmail.com |
914db65235c0f013520dd3bda449daee33be2fff | 0e1c3903ef3b11c2c6fe52a8970c7ec3aed82288 | /posthog/management/commands/create_bulk_events.py | c0a1378511c78b107346ce5d4bd3ff93c72d1e7a | [
"MIT"
] | permissive | mindhash/posthog | 55021cce5b1cc30839296890e5e19b7e5ae6f4b1 | 904c4c5d17a2d50019717b169e107ce441cadac7 | refs/heads/master | 2022-11-21T18:20:37.006995 | 2020-07-13T08:36:38 | 2020-07-13T08:36:38 | 279,254,284 | 1 | 0 | MIT | 2020-07-13T09:15:47 | 2020-07-13T09:15:47 | null | UTF-8 | Python | false | false | 7,878 | py | import random
import json
import uuid
import psycopg2
from urllib.parse import urlparse
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from django.core import serializers
from dateutil.relativedelta import relativedelta
from pathlib import Path
from typing import List
import time
from typing import Iterator, Optional
import io
from posthog.models import (
Event,
Element,
Team,
Person,
PersonDistinctId,
Funnel,
Action,
ActionStep,
FunnelStep,
)
def clean_csv_value(value: Optional[any]) -> str:
if value is None:
return r"\N"
return str(value).replace("\n", "\\n")
class StringIteratorIO(io.TextIOBase):
def __init__(self, iter: Iterator[str]):
self._iter = iter
self._buff = ""
def readable(self) -> bool:
return True
def _read1(self, n: Optional[int] = None) -> str:
while not self._buff:
try:
self._buff = next(self._iter)
except StopIteration:
break
ret = self._buff[:n]
self._buff = self._buff[len(ret) :]
return ret
def read(self, n: Optional[int] = None) -> str:
line = []
if n is None or n < 0:
while True:
m = self._read1()
if not m:
break
line.append(m)
else:
while n > 0:
m = self._read1(n)
if not m:
break
n -= len(m)
line.append(m)
return "".join(line)
class Command(BaseCommand):
help = "Create bulk events for testing"
def add_arguments(self, parser):
parser.add_argument("--team_id", nargs="+", type=int, help="specify the team id eg. --team_id 1")
parser.add_argument(
"--mode",
nargs="+",
default=["create"],
help="""
'delete' for deleting bulk demo data
or 'create' for creating bulk demo data;
default 'create'
eg. --mode delete
""",
)
def handle(self, *args, **options):
team_id = options["team_id"]
mode = options["mode"][0]
if not team_id:
print("Please specify the --team id")
return
team = Team.objects.get(pk=team_id[0])
with open(Path("posthog/demo_data.json").resolve(), "r") as demo_data_file:
demo_data = json.load(demo_data_file)
base_url = "127.0.0.1/bulk_demo/"
if mode.lower() == "delete":
start_time = time.time()
self._delete_demo_data(team)
print("--- %s seconds ---" % (time.time() - start_time))
else:
self._delete_demo_data(team)
self._create_funnel(base_url, team)
start_time = time.time()
self._create_events(demo_data, team, base_url)
print("--- %s seconds ---" % (time.time() - start_time))
def _create_events(self, demo_data, team, base_url):
result = urlparse(settings.DATABASE_URL)
database = result.path[1:]
hostname = result.hostname
try:
conn = psycopg2.connect(dbname=database, host=hostname)
except:
print("Unable to connect to the database")
conn.autocommit = True
cur = conn.cursor()
Person.objects.bulk_create([Person(team=team, properties={"is_demo": True}) for _ in range(0, 100)])
distinct_ids: List[PersonDistinctId] = []
demo_data_index = 0
for index, person in enumerate(Person.objects.filter(team=team)):
distinct_id = str(uuid.uuid4())
distinct_ids.append(PersonDistinctId(team=team, person=person, distinct_id=distinct_id))
if index % 3 == 0:
person.properties.update(demo_data[demo_data_index])
person.save()
demo_data_index += 1
events_string_iterator = StringIteratorIO(
(
"|".join(
map(
clean_csv_value,
(
random.choice(["autocapture", "$pageview", "$hello"]),
json.dumps(
{
"$current_url": base_url + random.choice(["", "1/", "2/"]),
"$browser": random.choice(["Chrome", "Safari", "Firefox"]),
"$lib": "web",
}
),
json.dumps(
{
"tag_name": random.choice(["a", "href"]),
"attr_class": ["btn", "btn-success"],
"attr_id": random.choice(["sign-up", "click"]),
"text": random.choice(["Sign up", "Pay $10"]),
}
),
now() - relativedelta(days=random.choice(range(7))) + relativedelta(seconds=15),
team.id,
distinct_id,
),
)
)
+ "\n"
for _ in range(10000)
)
)
cur.copy_from(
events_string_iterator,
"posthog_event",
sep="|",
columns=["event", "properties", "elements", "timestamp", "team_id", "distinct_id",],
)
PersonDistinctId.objects.bulk_create(distinct_ids)
cur.close()
def _delete_demo_data(self, team):
result = urlparse(settings.DATABASE_URL)
database = result.path[1:]
hostname = result.hostname
try:
conn = psycopg2.connect(dbname=database, host=hostname)
except:
print("Unable to connect to the database")
conn.autocommit = True
cur = conn.cursor()
people = PersonDistinctId.objects.filter(team=team, person__properties__is_demo=True)
distinct_ids = tuple([item["distinct_id"] for item in list(people.values("distinct_id"))])
if distinct_ids:
query = "DELETE from posthog_event WHERE distinct_id in {}".format(str(distinct_ids))
cur.execute(query)
cur.close()
Person.objects.filter(team=team, properties__is_demo=True).delete()
Funnel.objects.filter(team=team, name__contains="HogFlix").delete()
Action.objects.filter(team=team, name__contains="HogFlix").delete()
def _create_funnel(self, base_url, team):
homepage = Action.objects.create(team=team, name="HogFlix homepage view")
ActionStep.objects.create(action=homepage, event="$pageview", url=base_url, url_matching="exact")
user_signed_up = Action.objects.create(team=team, name="HogFlix signed up")
ActionStep.objects.create(
action=homepage, event="$autocapture", url="%s1/" % base_url, url_matching="exact",
)
user_paid = Action.objects.create(team=team, name="HogFlix paid")
ActionStep.objects.create(
action=homepage, event="$autocapture", url="%s2/" % base_url, url_matching="exact",
)
funnel = Funnel.objects.create(team=team, name="HogFlix signup -> watching movie")
FunnelStep.objects.create(funnel=funnel, action=homepage, order=0)
FunnelStep.objects.create(funnel=funnel, action=user_signed_up, order=1)
FunnelStep.objects.create(funnel=funnel, action=user_paid, order=2)
| [
"noreply@github.com"
] | mindhash.noreply@github.com |
a4c42472fa3d15a356100869d5c0683205652cd8 | 8fed701e8e04c4bf84fe9e43bbeaf5f39d416664 | /sagemaker/keras/container/games/tictactoe/keras/NNet.py | 54e973aa59905d8f9f60213abb7a068442b3ced3 | [] | no_license | ggiallo28/neural-network-genetic-algorithm | c992f9f53c0802f4168c1d5419f560fdec051db1 | 0316bb91f5dbeee8f3c352694d27ab662824d13e | refs/heads/master | 2023-04-03T23:45:31.500461 | 2019-06-30T13:17:51 | 2019-06-30T13:17:51 | 172,666,707 | 0 | 0 | null | 2023-03-24T22:55:48 | 2019-02-26T08:12:06 | Python | UTF-8 | Python | false | false | 3,058 | py | import argparse
import os
import shutil
import time
import random
import numpy as np
import math
import sys
sys.path.append('..')
from utils import *
from NeuralNet import NeuralNet
import argparse
from .TicTacToeNNet import TicTacToeNNet as onnet
"""
NeuralNet wrapper class for the TicTacToeNNet.
Author: Evgeny Tyurin, github.com/evg-tyurin
Date: Jan 5, 2018.
Based on (copy-pasted from) the NNet by SourKream and Surag Nair.
"""
#args = dotdict({
# 'lr': 0.001,
# 'dropout': 0.3,
# 'epochs': 3,
# 'batch_size': 64,
# 'cuda': True,
# 'num_channels': 512,
#})
class NNetWrapper(NeuralNet):
def __init__(self, game, args):
self.nnet = onnet(game, args)
self.game = game
self.name = str(hex(id(self)))
self.board_x, self.board_y = game.getBoardSize()
self.action_size = game.getActionSize()
self.loss = 99999999999
self.args = args
def train(self, examples):
"""
examples: list of examples, each example is of form (board, pi, v)
"""
end = time.time()
input_boards, target_pis, target_vs = list(zip(*examples))
input_boards = np.asarray(input_boards)
target_pis = np.asarray(target_pis)
target_vs = np.asarray(target_vs)
train_history = self.nnet.model.fit(x = input_boards, y = [target_pis, target_vs], batch_size = self.args.batch_size, epochs = self.args.epochs, verbose=0)
self.loss = train_history.history['loss']
v0 = len(examples)
v1 = round(time.time()-end,2)
v2 = round(train_history.history['loss'][0],5)
v3 = round(train_history.history['pi_loss'][0],5)
v4 = round(train_history.history['v_loss'][0],5)
print('Examples {} | Time Total: {}s | loss {} | pi_loss {} | v_loss {}'.format(v0,v1,v2,v3,v4))
def predict(self, board):
"""
board: np array with board
"""
# timing
# preparing input
board = board[np.newaxis, :, :]
# run
with self.nnet.session.as_default():
with self.nnet.graph.as_default():
pi, v = self.nnet.model.predict(board)
return pi[0], v[0]
def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(folder, filename)
if not os.path.exists(folder):
os.mkdir(folder)
self.nnet.model.save_weights(filepath)
def load_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
# https://github.com/pytorch/examples/blob/master/imagenet/main.py#L98
filepath = os.path.join(folder, filename)
if not os.path.exists(filepath):
raise("No model in path '{}'".format(filepath))
self.nnet.model.load_weights(filepath)
return self
def get_weights(self):
return np.array(self.nnet.model.get_weights())
def set_weights(self, weights):
self.nnet.model.set_weights(weights)
return self
def get_loss(self):
return self.loss
| [
"gianluigi.mucciolo@xpeppers.com"
] | gianluigi.mucciolo@xpeppers.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.