hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0d26c0d88b76cd2a5fc195797cedcbaa0026ed13
| 16,171
|
py
|
Python
|
reports/models.py
|
nprapps/carebot
|
fc9b87869137144c11ef021e9889b3d55c837ae3
|
[
"FSFAP"
] | 16
|
2015-06-26T13:07:04.000Z
|
2016-06-09T08:01:25.000Z
|
reports/models.py
|
nprapps/carebot
|
fc9b87869137144c11ef021e9889b3d55c837ae3
|
[
"FSFAP"
] | 56
|
2015-01-02T06:05:09.000Z
|
2015-07-08T19:16:15.000Z
|
reports/models.py
|
nprapps/carebot
|
fc9b87869137144c11ef021e9889b3d55c837ae3
|
[
"FSFAP"
] | 4
|
2016-02-11T18:46:12.000Z
|
2021-02-23T11:06:50.000Z
|
#!/usr/bin/env python
from collections import OrderedDict
from copy import copy
from datetime import date, datetime, timedelta
from itertools import izip
import json
import subprocess
from clan import utils as clan_utils
from django.core.urlresolvers import reverse
from django.db import models
from django.dispatch import receiver
from django.utils import timezone
import requests
import yaml
import app_config
import utils
FIELD_DEFINITIONS = clan_utils.load_field_definitions()
class Query(models.Model):
"""
A clan query.
"""
slug = models.SlugField(max_length=128, primary_key=True)
name = models.CharField(max_length=128)
description = models.CharField(max_length=256, default='', blank=True)
is_comparable = models.BooleanField(default=True,
help_text='Should this query be available for cross-project comparison.')
clan_yaml = models.TextField()
class Meta:
ordering = ('name',)
verbose_name_plural = 'queries'
def __unicode__(self):
return self.name
@property
def config(self):
data = yaml.load(self.clan_yaml)
data['name'] = self.name
data['description'] = self.description
return data
class Tag(models.Model):
"""
A tag describing a project.
"""
slug = models.CharField(max_length=32, primary_key=True)
class Meta:
ordering = ('slug',)
def __unicode__(self):
return self.slug
class Project(models.Model):
"""
A project (app/site).
"""
slug = models.SlugField(max_length=128, primary_key=True)
title = models.CharField(max_length=128)
property_id = models.CharField(max_length=10, default='53470309')
domain = models.CharField(max_length=128, default='apps.npr.org', blank=True)
prefix = models.CharField(max_length=128, blank=True)
start_date = models.DateField()
queries = models.ManyToManyField(Query, through='ProjectQuery')
tags = models.ManyToManyField(Tag)
class Meta:
ordering = ('-start_date',)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('reports.views.project', args=[self.slug])
def tag_list(self):
return ','.join([tag.slug for tag in self.tags.all()])
@property
def all_time_report(self):
return self.reports.get(ndays__isnull=True)
def run_reports(self, overwrite=False):
"""
Runs all reports, optionally overwriting existing results.
"""
print 'Running reports for %s' % self.title
updated_reports = []
for report in self.reports.all():
if overwrite or not report.last_run or not report.ndays:
updated = report.run()
if updated and report.ndays:
updated_reports.append(report)
else:
print 'Skipping %s report for %s (already run).' % (report.timespan, self.title)
return updated_reports
def get_clan_config(self):
return {
'title': self.title,
'property-id': self.property_id,
'domain': self.domain or None,
'prefix': self.prefix or None,
'start-date': datetime.strftime(self.start_date, '%Y-%m-%d')
}
@receiver(models.signals.post_save, sender=Project)
def on_project_post_save(sender, instance, created, *args, **kwargs):
"""
Create default reports for a new project.
"""
if created:
default_queries = copy(app_config.DEFAULT_QUERIES)
if instance.start_date > date(2014, 6, 1):
default_queries.extend(app_config.DEFAULT_EVENT_QUERIES)
for i, query_slug in enumerate(default_queries):
ProjectQuery.objects.create(
project=instance,
query=Query.objects.get(slug=query_slug),
order=i
)
Report.objects.create(
project=instance,
ndays=None
)
for ndays in app_config.DEFAULT_REPORT_NDAYS:
Report.objects.create(
project=instance,
ndays=ndays
)
Social.objects.create(project=instance)
class ProjectQuery(models.Model):
"""
M2M relationship between Projects and Queries.
"""
project = models.ForeignKey(Project, related_name='project_queries')
query = models.ForeignKey(Query, related_name='project_queries')
order = models.PositiveIntegerField()
class Meta:
ordering = ('order',)
class Report(models.Model):
"""
A report for a given project over some number of days.
"""
project = models.ForeignKey(Project, related_name='reports')
ndays = models.PositiveIntegerField(null=True)
results_json = models.TextField()
last_run = models.DateTimeField(null=True)
pageviews = models.PositiveIntegerField(null=True)
unique_pageviews = models.PositiveIntegerField(null=True)
users = models.PositiveIntegerField(null=True)
sessions = models.PositiveIntegerField(null=True)
class Meta:
ordering = ('project__start_date', 'ndays',)
def __unicode__(self):
return '%s (%s)' % (self.project.title, self.timespan)
def get_absolute_url(self):
return reverse(
'reports.views.report',
args=[
self.project.slug,
self.ndays or 'all-time'
]
)
@property
def timespan(self):
if self.ndays:
return '%i-day%s' % (self.ndays, 's' if self.ndays > 1 else '')
return 'all-time'
def is_timely(self):
"""
Checks if it has been long enough to have data for this report.
"""
if not self.ndays:
return True
return date.today() >= self.project.start_date + timedelta(days=self.ndays)
def build_clan_yaml(self):
"""
Build YAML configuration for this report.
"""
data = self.project.get_clan_config()
if self.ndays:
data['ndays'] = self.ndays
data['queries'] = []
for project_query in ProjectQuery.objects.filter(project=self.project):
data['queries'].append(project_query.query.config)
return yaml.safe_dump(data, encoding='utf-8', allow_unicode=True)
def run(self):
"""
Run this report, stash it's results and render it out to S3.
"""
if not self.is_timely():
print 'Skipping %s report for %s (not timely).' % (self.timespan, self.project.title)
return False
print 'Running %s report for %s' % (self.timespan, self.project.title)
with open('/tmp/clan.yaml', 'w') as f:
y = self.build_clan_yaml()
f.write(y)
subprocess.call(['clan', 'report', '/tmp/clan.yaml', '/tmp/clan.json'])
with open('/tmp/clan.json') as f:
self.results_json = f.read()
self.last_run = timezone.now()
# Delete existing results
self.query_results.all().delete()
data = json.loads(self.results_json, object_pairs_hook=OrderedDict)
i = 0
# Query results
for project_query, result in izip(self.project.project_queries.all(), data['queries']):
project_title = self.project.title
query = project_query.query
query_name = query.name
metrics = result['config']['metrics']
data_types = result['data_types']
qr = QueryResult(
report=self,
query=query,
order=i,
sampled=result['sampled'],
project_title=project_title,
report_ndays=self.ndays,
query_name=query_name
)
if result['sampled']:
qr.sample_size = result['sampleSize']
qr.sample_space = result['sampleSpace']
qr.sample_percent = float(result['sampleSize']) / result['sampleSpace'] * 100
qr.save()
j = 0
# Metrics
for metric_name in metrics:
self._make_metric(
qr,
metric_name,
j,
data_types[metric_name],
result['data'][metric_name]
)
j += 1
i += 1
qr = self.query_results.get(query__slug='totals')
for metric in qr.metrics.all():
if metric.name == 'ga:pageviews':
self.pageviews = metric.total_dimension.value
elif metric.name == 'ga:uniquePageviews':
self.unique_pageviews = metric.total_dimension.value
elif metric.name == 'ga:users':
self.users = metric.total_dimension.value
elif metric.name == 'ga:sessions':
self.sessions = metric.total_dimension.value
self.save()
return True
def _make_metric(self, query_result, metric_name, order, data_type, dimensions):
"""
Create a Metric and related Dimensions.
"""
total_value = dimensions['total']
metric = MetricResult(
query_result=query_result,
order=order,
name=metric_name,
data_type=data_type,
project_title=query_result.project_title,
report_ndays=query_result.report_ndays,
query_name=query_result.query_name
)
metric.save()
i = 0
# Dimensions
for dimension_name, value in dimensions.items():
self._make_dimension(
metric,
dimension_name,
i,
data_type,
value,
total_value
)
i += 1
def _make_dimension(self, metric, dimension_name, order, data_type, value, total_value):
"""
Create a new Dimension.
"""
dimension = DimensionResult(
order=order,
name=dimension_name,
_value=value,
project_title=metric.project_title,
report_ndays=metric.report_ndays,
query_name=metric.query_name,
metric_name=metric.name,
metric_data_type=metric.data_type
)
if dimension_name != 'total':
if data_type in 'INTEGER' and total_value != 0:
dimension.percent_of_total = float(value) / int(total_value) * 100
dimension.metric = metric
dimension.save()
return dimension
class QueryResult(models.Model):
"""
The results of a query for a certain report.
"""
report = models.ForeignKey(Report, related_name='query_results')
query = models.ForeignKey(Query, related_name='query_results')
order = models.PositiveIntegerField()
sampled = models.BooleanField(default=False)
sample_size = models.PositiveIntegerField(default=0)
sample_space = models.PositiveIntegerField(default=0)
sample_percent = models.FloatField(default=100)
# Denormalized fields
project_title = models.CharField(max_length=128)
report_ndays = models.PositiveIntegerField(null=True)
query_name = models.CharField(max_length=128)
class Meta:
ordering = ('report', 'order')
class MetricResult(models.Model):
"""
The results for a specific metric.
"""
query_result = models.ForeignKey(QueryResult, related_name='metrics')
order = models.PositiveIntegerField()
name = models.CharField(max_length=128)
data_type = models.CharField(max_length=30)
# Denormalized fields
project_title = models.CharField(max_length=128)
report_ndays = models.PositiveIntegerField(null=True)
query_name = models.CharField(max_length=128)
class Meta:
ordering = ('query_result', 'order')
def __unicode__(self):
return self.name
@property
def display_name(self):
return FIELD_DEFINITIONS[self.name]['uiName']
@property
def total_dimension(self):
return self.dimensions.get(name='total')
class DimensionResult(models.Model):
"""
Results for one dimension of a metric.
"""
metric = models.ForeignKey(MetricResult, related_name='dimensions', null=True)
order = models.PositiveIntegerField()
name = models.CharField(max_length=256)
_value = models.CharField(max_length=128)
percent_of_total = models.FloatField(null=True)
# Denormalized fields
project_title = models.CharField(max_length=128)
report_ndays = models.PositiveIntegerField(null=True)
query_name = models.CharField(max_length=128)
metric_name = models.CharField(max_length=128)
metric_data_type = models.CharField(max_length=30)
class Meta:
ordering = ('metric', 'order')
@property
def value(self):
if self.metric_data_type == 'INTEGER':
return int(self._value)
elif self.metric_data_type == 'STRING':
return self._value
elif self.metric_data_type in ['FLOAT', 'PERCENT', 'TIME', 'CURRENCY']:
return float(self._value)
return None
@property
def value_formatted(self):
if self.metric_data_type == 'INTEGER':
return utils.format_comma(int(self._value))
elif self.metric_data_type == 'STRING':
return self._value
elif self.metric_data_type in ['FLOAT', 'PERCENT', 'CURRENCY']:
return '%.1f' % float(self._value)
elif self.metric_data_type == 'TIME':
return clan_utils.format_duration(float(self._value))
return None
@property
def per_1000_sessions(self):
if self.metric_data_type != 'INTEGER':
return None
try:
return float(self.value) / (self.metric.query_result.report.sessions / 1000)
except ZeroDivisionError:
return 'undefined'
class Social(models.Model):
"""
Social count data for a project. NOT timeboxed.
"""
project = models.OneToOneField(Project, primary_key=True)
facebook_likes = models.PositiveIntegerField(default=0)
facebook_shares = models.PositiveIntegerField(default=0)
facebook_comments = models.PositiveIntegerField(default=0)
twitter = models.PositiveIntegerField(default=0)
google = models.PositiveIntegerField(default=0)
pinterest = models.PositiveIntegerField(default=0)
linkedin = models.PositiveIntegerField(default=0)
stumbleupon = models.PositiveIntegerField(default=0)
last_update = models.DateTimeField(null=True)
class Meta:
ordering = ('-project__start_date',)
verbose_name = 'social count'
verbose_name_plural = 'social counts'
def __unicode__(self):
return 'Social counts for %s' % self.project.title
def total(self):
return sum([
self.facebook_shares,
self.twitter,
self.google,
self.pinterest,
self.linkedin,
self.stumbleupon
])
def refresh(self):
secrets = app_config.get_secrets()
if not self.project.domain:
return
url = 'http://%s%s' % (self.project.domain, self.project.prefix)
response = requests.get('https://free.sharedcount.com/url?apikey=%s&url=%s' % (secrets['SHAREDCOUNT_API_KEY'], url))
if response.status_code != 200:
print 'Failed to refresh social data from SharedCount: %i.' % response.status_code
return
print 'Updating social counts from SharedCount'
data = response.json()
self.facebook_likes = data['Facebook']['like_count'] or 0
self.facebook_shares = data['Facebook']['share_count'] or 0
self.facebook_comments = data['Facebook']['comment_count'] or 0
self.twitter = data['Twitter'] or 0
self.google = data['GooglePlusOne'] or 0
self.pinterest = data['Pinterest'] or 0
self.linkedin = data['LinkedIn'] or 0
self.stumbleupon = data['StumbleUpon'] or 0
self.last_update = timezone.now()
self.save()
| 30.396617
| 124
| 0.615917
|
d8646e96253976a9d96d8570eedb6e41a72b19a3
| 6,028
|
py
|
Python
|
nltk/corpus/reader/lin.py
|
SamuraiT/nltk3-alpha
|
18a1a0ff8697eaeeb5d3c0bc6dad251d5b8fe931
|
[
"Apache-2.0"
] | 3
|
2019-04-09T22:59:33.000Z
|
2019-06-14T09:23:24.000Z
|
nltk/corpus/reader/lin.py
|
guker/nltk
|
085399ea9d53318ae6e8568909fa55f0d905ad5a
|
[
"Apache-2.0"
] | null | null | null |
nltk/corpus/reader/lin.py
|
guker/nltk
|
085399ea9d53318ae6e8568909fa55f0d905ad5a
|
[
"Apache-2.0"
] | 2
|
2019-10-28T01:33:22.000Z
|
2019-10-30T06:43:43.000Z
|
# Natural Language Toolkit: Lin's Thesaurus
#
# Copyright (C) 2001-2014 NLTK Project
# Author: Dan Blanchard <dblanchard@ets.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.txt
from __future__ import print_function
import re
from collections import defaultdict
from functools import reduce
from nltk.corpus.reader import CorpusReader
class LinThesaurusCorpusReader(CorpusReader):
""" Wrapper for the LISP-formatted thesauruses distributed by Dekang Lin. """
# Compiled regular expression for extracting the key from the first line of each
# thesaurus entry
_key_re = re.compile(r'\("?([^"]+)"? \(desc [0-9.]+\).+')
@staticmethod
def __defaultdict_factory():
''' Factory for creating defaultdict of defaultdict(dict)s '''
return defaultdict(dict)
def __init__(self, root, badscore=0.0):
'''
Initialize the thesaurus.
:param root: root directory containing thesaurus LISP files
:type root: C{string}
:param badscore: the score to give to words which do not appear in each other's sets of synonyms
:type badscore: C{float}
'''
super(LinThesaurusCorpusReader, self).__init__(root, r'sim[A-Z]\.lsp')
self._thesaurus = defaultdict(LinThesaurusCorpusReader.__defaultdict_factory)
self._badscore = badscore
for path, encoding, fileid in self.abspaths(include_encoding=True, include_fileid=True):
with open(path) as lin_file:
first = True
for line in lin_file:
line = line.strip()
# Start of entry
if first:
key = LinThesaurusCorpusReader._key_re.sub(r'\1', line)
first = False
# End of entry
elif line == '))':
first = True
# Lines with pairs of ngrams and scores
else:
split_line = line.split('\t')
if len(split_line) == 2:
ngram, score = split_line
self._thesaurus[fileid][key][ngram.strip('"')] = float(score)
def similarity(self, ngram1, ngram2, fileid=None):
'''
Returns the similarity score for two ngrams.
:param ngram1: first ngram to compare
:type ngram1: C{string}
:param ngram2: second ngram to compare
:type ngram2: C{string}
:param fileid: thesaurus fileid to search in. If None, search all fileids.
:type fileid: C{string}
:return: If fileid is specified, just the score for the two ngrams; otherwise,
list of tuples of fileids and scores.
'''
# Entries don't contain themselves, so make sure similarity between item and itself is 1.0
if ngram1 == ngram2:
if fileid:
return 1.0
else:
return [(fid, 1.0) for fid in self._fileids]
else:
if fileid:
return self._thesaurus[fileid][ngram1][ngram2] if ngram2 in self._thesaurus[fileid][ngram1] else self._badscore
else:
return [(fid, (self._thesaurus[fid][ngram1][ngram2] if ngram2 in self._thesaurus[fid][ngram1]
else self._badscore)) for fid in self._fileids]
def scored_synonyms(self, ngram, fileid=None):
'''
Returns a list of scored synonyms (tuples of synonyms and scores) for the current ngram
:param ngram: ngram to lookup
:type ngram: C{string}
:param fileid: thesaurus fileid to search in. If None, search all fileids.
:type fileid: C{string}
:return: If fileid is specified, list of tuples of scores and synonyms; otherwise,
list of tuples of fileids and lists, where inner lists consist of tuples of
scores and synonyms.
'''
if fileid:
return self._thesaurus[fileid][ngram].items()
else:
return [(fileid, self._thesaurus[fileid][ngram].items()) for fileid in self._fileids]
def synonyms(self, ngram, fileid=None):
'''
Returns a list of synonyms for the current ngram.
:param ngram: ngram to lookup
:type ngram: C{string}
:param fileid: thesaurus fileid to search in. If None, search all fileids.
:type fileid: C{string}
:return: If fileid is specified, list of synonyms; otherwise, list of tuples of fileids and
lists, where inner lists contain synonyms.
'''
if fileid:
return self._thesaurus[fileid][ngram].keys()
else:
return [(fileid, self._thesaurus[fileid][ngram].keys()) for fileid in self._fileids]
def __contains__(self, ngram):
'''
Determines whether or not the given ngram is in the thesaurus.
:param ngram: ngram to lookup
:type ngram: C{string}
:return: whether the given ngram is in the thesaurus.
'''
return reduce(lambda accum, fileid: accum or (ngram in self._thesaurus[fileid]), self._fileids, False)
######################################################################
# Demo
######################################################################
def demo():
from nltk.corpus import lin_thesaurus as thes
word1 = "business"
word2 = "enterprise"
print("Getting synonyms for " + word1)
print(thes.synonyms(word1))
print("Getting scored synonyms for " + word1)
print(thes.synonyms(word1))
print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
print(thes.synonyms(word1, fileid="simN.lsp"))
print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
print(thes.synonyms(word1, fileid="simN.lsp"))
print("Similarity score for %s and %s:" % (word1, word2))
print(thes.similarity(word1, word2))
if __name__ == '__main__':
demo()
| 38.394904
| 127
| 0.591573
|
2e1b51b158c7df234a4e37a7cf709a8e017b6979
| 1,206
|
py
|
Python
|
hall_of_fame/10_46_59/parameters.py
|
Magssch/IT3105-mcts-hex
|
10e6c1e4a61561a0ddcb241e2dba4ac000fe4d8e
|
[
"MIT"
] | null | null | null |
hall_of_fame/10_46_59/parameters.py
|
Magssch/IT3105-mcts-hex
|
10e6c1e4a61561a0ddcb241e2dba4ac000fe4d8e
|
[
"MIT"
] | null | null | null |
hall_of_fame/10_46_59/parameters.py
|
Magssch/IT3105-mcts-hex
|
10e6c1e4a61561a0ddcb241e2dba4ac000fe4d8e
|
[
"MIT"
] | 1
|
2022-03-28T11:14:07.000Z
|
2022-03-28T11:14:07.000Z
|
from keras.activations import linear, relu, sigmoid, tanh # noqa
from keras.losses import kl_divergence # noqa
from keras.optimizers import SGD, Adagrad, Adam, RMSprop # noqa
from game import Game
VISUALIZE_GAMES = False
FRAME_DELAY = 0.5
RUN_TRAINING = True
# RL parameters
EPISODES = 300
REPLAY_BUFFER_SIZE = 1024
# MCTS parameters
SIMULATION_TIME_OUT = 0.5 # s
UCT_C = 1 # "theoretically 1"
# Simulated World
GAME_TYPE = Game.Hex
LEDGE_BOARD = (0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 2, 0, 0, 1, 1, 1) # (0, 2, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1)
SIZE = 6 if GAME_TYPE == Game.Hex else len(LEDGE_BOARD) # 3 <= k <= 10
STATE_SIZE = 1 + (SIZE ** 2 if GAME_TYPE == Game.Hex else SIZE)
NUMBER_OF_ACTIONS = SIZE ** 2 if GAME_TYPE == Game.Hex else int((SIZE ** 2 - SIZE) / 2) + 1
# ANET
ANET_EPSILON = 0
ANET_EPSILON_DECAY = 1
ANET_LEARNING_RATE = 0.005
ANET_LOSS_FUNCTION = kl_divergence # deepnet_cross_entropy, kl_divergence
ANET_ACTIVATION_FUNCTION = sigmoid # linear, relu, sigmoid, or tanh
ANET_OPTIMIZER = SGD # SGD, Adagrad, Adam, or RMSprop
ANET_DIMENSIONS = (STATE_SIZE, 10, 10, NUMBER_OF_ACTIONS)
ANET_BATCH_SIZE = 64
# TOPP parameters
ANETS_TO_BE_CACHED = 11
NUMBER_OF_GAMES = 10
| 30.923077
| 117
| 0.710614
|
0f117053ccca04c3ec1b573ec7dd249295df4964
| 1,967
|
py
|
Python
|
analyzers/unique_ssh_grandparent/main.py
|
insanitybit/grapl-analyzers
|
9c49d784539ff7a2dd1381442fbd83141e92b632
|
[
"Apache-2.0"
] | 8
|
2019-11-15T01:09:05.000Z
|
2020-04-21T19:41:13.000Z
|
analyzers/unique_ssh_grandparent/main.py
|
grapl-security/grapl-analyzers
|
9c49d784539ff7a2dd1381442fbd83141e92b632
|
[
"Apache-2.0"
] | null | null | null |
analyzers/unique_ssh_grandparent/main.py
|
grapl-security/grapl-analyzers
|
9c49d784539ff7a2dd1381442fbd83141e92b632
|
[
"Apache-2.0"
] | 2
|
2020-01-31T03:20:33.000Z
|
2020-03-28T23:22:00.000Z
|
import os
import redis
from grapl_analyzerlib.analyzer import Analyzer, OneOrMany, A
from grapl_analyzerlib.counters import GrandParentGrandChildCounter
from grapl_analyzerlib.prelude import ProcessQuery, ProcessView
from grapl_analyzerlib.execution import ExecutionHit
COUNTCACHE_ADDR = os.environ['COUNTCACHE_ADDR']
COUNTCACHE_PORT = os.environ['COUNTCACHE_PORT']
r = redis.Redis(host=COUNTCACHE_ADDR, port=int(COUNTCACHE_PORT), db=0, decode_responses=True)
from typing import Any, Type
from pydgraph import DgraphClient
class RareGrandParentOfSsh(Analyzer):
def __init__(self, dgraph_client: DgraphClient, counter: GrandParentGrandChildCounter):
super(RareGrandParentOfSsh, self).__init__(dgraph_client)
self.counter = counter
@classmethod
def build(cls: Type[A], dgraph_client: DgraphClient) -> A:
counter = GrandParentGrandChildCounter(dgraph_client, cache=r)
return RareGrandParentOfSsh(dgraph_client, counter)
def get_queries(self) -> OneOrMany[ProcessQuery]:
return (
ProcessQuery()
.with_process_name(eq="ssh")
.with_parent(
ProcessQuery().with_parent(
ProcessQuery()
.with_process_name()
)
)
)
def on_response(self, response: ProcessView, output: Any):
asset_id = response.get_asset().get_hostname()
count = self.counter.get_count_for(
grand_parent_process_name=response.get_parent().get_parent().get_process_name(),
grand_child_process_name=response.get_process_name(),
)
print(f'Counted {count} for parent -> ssh')
if count <= 3:
output.send(
ExecutionHit(
analyzer_name="Rare GrandParent of SSH",
node_view=response,
risk_score=5,
lenses=asset_id,
)
)
| 32.245902
| 93
| 0.651246
|
f190f7d803401df29522a9445176036ec74af38e
| 1,528
|
py
|
Python
|
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/DescribeETLJobStageOutputSchemaRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/DescribeETLJobStageOutputSchemaRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/DescribeETLJobStageOutputSchemaRequest.py
|
sdk-team/aliyun-openapi-python-sdk
|
384730d707e6720d1676ccb8f552e6a7b330ec86
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeETLJobStageOutputSchemaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'DescribeETLJobStageOutputSchema')
def get_StageName(self):
return self.get_query_params().get('StageName')
def set_StageName(self,StageName):
self.add_query_param('StageName',StageName)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_EtlJobId(self):
return self.get_query_params().get('EtlJobId')
def set_EtlJobId(self,EtlJobId):
self.add_query_param('EtlJobId',EtlJobId)
| 36.380952
| 84
| 0.772906
|
513961b9a7528a7d0e5007319786882fea5ca950
| 143
|
py
|
Python
|
py/cidoc_crm_types/properties/p69i_is_associated_with.py
|
minorg/cidoc-crm-types
|
9018bdbf0658e4d28a87bc94543e467be45d8aa5
|
[
"Apache-2.0"
] | null | null | null |
py/cidoc_crm_types/properties/p69i_is_associated_with.py
|
minorg/cidoc-crm-types
|
9018bdbf0658e4d28a87bc94543e467be45d8aa5
|
[
"Apache-2.0"
] | null | null | null |
py/cidoc_crm_types/properties/p69i_is_associated_with.py
|
minorg/cidoc-crm-types
|
9018bdbf0658e4d28a87bc94543e467be45d8aa5
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
@dataclass
class P69iIsAssociatedWith:
URI = "http://erlangen-crm.org/current/P69i_is_associated_with"
| 20.428571
| 67
| 0.804196
|
85ab5e0c7a8bf87e7dd0bfdef3367e6d0d62ff3e
| 2,195
|
py
|
Python
|
Python/StandardLibrary/spec.py
|
jessicaleete/numerical_computing
|
cc71f51f35ca74d00e617af3d1a0223e19fb9a68
|
[
"CC-BY-3.0"
] | null | null | null |
Python/StandardLibrary/spec.py
|
jessicaleete/numerical_computing
|
cc71f51f35ca74d00e617af3d1a0223e19fb9a68
|
[
"CC-BY-3.0"
] | null | null | null |
Python/StandardLibrary/spec.py
|
jessicaleete/numerical_computing
|
cc71f51f35ca74d00e617af3d1a0223e19fb9a68
|
[
"CC-BY-3.0"
] | null | null | null |
# name this file 'solutions.py'
"""Volume II Lab 1: The Standard Library
<Name>
<Class>
<Date>
"""
# Add import statements here.
# In future labs, do not modify any PROVIDED import statements.
# You may always add others as needed.
# Problem 1: Implement this function.
def prob1(l):
"""Accept a list 'l' of numbers as input and return a new list with the
minimum, maximum, and average of the contents of 'l'.
"""
pass
# Problem 2: Implement this function.
def prob2():
"""Determine which Python objects are mutable and which are immutable. Test
numbers, strings, lists, tuples, and dictionaries. Print your results to the
terminal using the print() function.
"""
pass
# Problem 3: Create a 'calculator' module and use it to implement this function.
def prob3(a,b):
"""Calculate and return the length of the hypotenuse of a right triangle.
Do not use any methods other than those that are imported from the
'calculator' module.
Parameters:
a (float): the length one of the sides of the triangle.
b (float): the length the other nonhypotenuse side of the triangle.
Returns:
The length of the triangle's hypotenuse.
"""
pass
# Problem 4: Utilize the 'matrix_multiply' module and 'matrices.npz' file to
# implement this function.
def prob4():
"""If no command line argument is given, print "No Input."
If anything other than "matrices.npz is given, print "Incorrect Input."
If "matrices.npz" is given as a command line argument, use functions
from the provided 'matrix_multiply' module to load two matrices, then
time how long each method takes to multiply the two matrices together.
Print your results to the terminal.
"""
pass
# Everything under this 'if' statement is executed when this file is run from
# the terminal. In this case, if we enter 'python solutions.py word' into
# the terminal, then sys.argv is ['solutions.py', 'word'], and prob4() is
# executed. Note that the arguments are parsed as strings. Do not modify.
if __name__ == "__main__":
prob4()
# ============================== END OF FILE ================================ #
| 32.761194
| 80
| 0.676082
|
12d768b796469fc421af99677be7547aea2caf01
| 154
|
py
|
Python
|
python_docs/05Functions/05FunctionsAsArguments.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
python_docs/05Functions/05FunctionsAsArguments.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
python_docs/05Functions/05FunctionsAsArguments.py
|
Matheus-IT/lang-python-related
|
dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9
|
[
"MIT"
] | null | null | null |
def add(x, y):
return x + y
def times(func, x, y):
return func(x, y)
a = 2
b = 4
r = times(add, a, b)
print(' - Resultado = ', r)
| 11.846154
| 28
| 0.467532
|
c4dae4dda319795d45d68582a332ccf50d882145
| 1,586
|
py
|
Python
|
sport24/sport24/urls.py
|
sebix354/Portal-Sportowy
|
96c63e7e34e1770b071f3f99301aa0624852a091
|
[
"MIT"
] | null | null | null |
sport24/sport24/urls.py
|
sebix354/Portal-Sportowy
|
96c63e7e34e1770b071f3f99301aa0624852a091
|
[
"MIT"
] | null | null | null |
sport24/sport24/urls.py
|
sebix354/Portal-Sportowy
|
96c63e7e34e1770b071f3f99301aa0624852a091
|
[
"MIT"
] | null | null | null |
"""sport24 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include,re_path
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url, include
from django.views.generic import TemplateView
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from users.views import MyTokenObtainPairView
urlpatterns = [
#path('social_auth/', include('drf_social_oauth2.urls', namespace='drf')),
path('admin/', admin.site.urls),
url(r'^api/', include('sport24app.urls')),
path('api/user/', include('users.urls', namespace='users')),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('api/token/', MyTokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 40.666667
| 82
| 0.735183
|
1fe36a661aa5e5840b4daddeb910af8a03f4fa60
| 6,602
|
py
|
Python
|
scripts/bacnet/proxy_grab_bacnet_config.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | 1
|
2020-05-26T01:29:50.000Z
|
2020-05-26T01:29:50.000Z
|
scripts/bacnet/proxy_grab_bacnet_config.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | null | null | null |
scripts/bacnet/proxy_grab_bacnet_config.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import sys
from csv import DictWriter
import logging
import argparse
import gevent
from gevent.event import AsyncResult
from volttron.platform import get_address, get_home
from volttron.platform.agent import utils
from volttron.platform.agent.bacnet_proxy_reader import BACnetReader
from volttron.platform.keystore import KeyStore
from volttron.platform.vip.agent import errors
from volttron.platform.vip.agent.utils import build_agent
config_writer = None
utils.setup_logging()
_log = logging.getLogger(__name__)
def bacnet_response(context, results):
"""
Response function used as a callback.
This function is used during the retrieval of bacnet responses.
:param context:
:param results:
:return:
"""
global config_writer
_log.debug("Handling bacnet responses: RESULTS: {}".format(results))
message = dict(results=results)
if context is not None:
message.update(context)
# Handle the last return value of the bacnet_reader which signals the
# end of the batch by forgetting it because there will be no results
# for any of the cells. We just check the 'Reference Point Name' here
# however.
if message['results'].get('Reference Point Name', None):
config_writer.writerow(message['results'])
def main():
global agent
global config_writer
# parse the command line arguments
arg_parser = argparse.ArgumentParser(description=__doc__)
arg_parser.add_argument("device_id", type=int,
help="Device ID of the target device")
arg_parser.add_argument("--address",
help="Address of target device, may be needed to help route initial request to device.")
arg_parser.add_argument("--registry-out-file", type=argparse.FileType('w'),
help="Output registry to CSV file",
default=sys.stdout)
arg_parser.add_argument("--driver-out-file", type=argparse.FileType('w'),
help="Output driver configuration to JSON file.",
default=sys.stdout)
arg_parser.add_argument("--max-range-report", nargs='?', type=float,
help='Affects how very large numbers are reported in the "Unit Details" column of the '
'output. Does not affect driver behavior.',
default=1.0e+20)
arg_parser.add_argument("--proxy-id",
help="VIP IDENTITY of the BACnet proxy agent.",
default="platform.bacnet_proxy")
args = arg_parser.parse_args()
_log.debug("initialization")
_log.debug(" - args: %r", args)
key_store = KeyStore()
config_writer = DictWriter(args.registry_out_file,
('Reference Point Name',
'Volttron Point Name',
'Units',
'Unit Details',
'BACnet Object Type',
'Property',
'Writable',
'Index',
'Write Priority',
'Notes'))
config_writer.writeheader()
agent = build_agent(address=get_address(),
volttron_home=get_home(),
publickey=key_store.public,
secretkey=key_store.secret,
enable_store=False)
bn = BACnetReader(agent.vip, args.proxy_id, bacnet_response)
async_result = AsyncResult()
try:
bn.get_iam(args.device_id, async_result.set, args.address)
except errors.Unreachable as ure:
_log.error(ure)
_log.error("No BACnet proxy Agent running on the platform with the VIP IDENTITY {}".format(args.proxy_id))
sys.exit(1)
try:
results = async_result.get(timeout=5.0)
except gevent.Timeout:
_log.error("No response from device id {}".format(args.device_id))
sys.exit(1)
if args.address and args.address != results["address"]:
msg = "Inconsistent results from passed address ({}) and device address ({}) using results.".format(
args.address, results["address"])
_log.warning(msg)
args.address = results["address"]
elif results["address"]:
args.address = results["address"]
bn.read_device_properties(target_address=args.address, device_id=args.device_id)
agent.core.stop()
try:
main()
except Exception as e:
_log.exception("an error has occurred: %s", e)
finally:
_log.debug("finally")
| 37.511364
| 116
| 0.654802
|
415904a6809529391400d50eb640bd3caf9659e1
| 1,095
|
py
|
Python
|
layered.py
|
ground0state/ConvLSTM_PyTorch
|
5465aabe8f3192d022201b9e17a3f43071a823bd
|
[
"MIT"
] | 1
|
2021-12-11T15:27:41.000Z
|
2021-12-11T15:27:41.000Z
|
layered.py
|
TripleEss/ConvLSTM_PyTorch
|
5465aabe8f3192d022201b9e17a3f43071a823bd
|
[
"MIT"
] | null | null | null |
layered.py
|
TripleEss/ConvLSTM_PyTorch
|
5465aabe8f3192d022201b9e17a3f43071a823bd
|
[
"MIT"
] | 1
|
2021-12-10T22:33:49.000Z
|
2021-12-10T22:33:49.000Z
|
import torch
import torch.nn as nn
from conv_lstm import ConvLSTM
class ConvLSTMLayered(nn.Module):
def __init__(self, image_size):
"""Multi layeres ConvLSTM.
Parameters
----------
image_size: (int, int)
Shape of image.
"""
super().__init__()
self.conv_lstm_1 = ConvLSTM(
in_channels=1, hidden_channels=32, kernel_size=3, stride=1, image_size=image_size)
self.conv_lstm_2 = ConvLSTM(
in_channels=32, hidden_channels=32, kernel_size=3, stride=1, image_size=image_size)
self.conv_lstm_3 = ConvLSTM(
in_channels=32, hidden_channels=32, kernel_size=3, stride=1, image_size=image_size)
self.conv2d = nn.Conv2d(32, 1, 1)
def forward(self, x):
x, _ = self.conv_lstm_1(x)
x, _ = self.conv_lstm_2(x)
x, _ = self.conv_lstm_3(x)
seq_output = []
for t in range(x.shape[1]):
tmp = self.conv2d(x[:, t, :, :, :])
seq_output.append(tmp)
output = torch.stack(seq_output, 1)
return output
| 28.815789
| 95
| 0.589954
|
2973f78237c9edbc6ef569091af209c60cb7eb59
| 1,138
|
py
|
Python
|
main-figures/OH-comparison.py
|
tommibergman/gmdd-tm5-soa
|
20168c86caeb14aed72af72b14c3b8185700ddd8
|
[
"MIT"
] | null | null | null |
main-figures/OH-comparison.py
|
tommibergman/gmdd-tm5-soa
|
20168c86caeb14aed72af72b14c3b8185700ddd8
|
[
"MIT"
] | null | null | null |
main-figures/OH-comparison.py
|
tommibergman/gmdd-tm5-soa
|
20168c86caeb14aed72af72b14c3b8185700ddd8
|
[
"MIT"
] | null | null | null |
from scipy.special import erf
from settings import *
import matplotlib.pyplot as plt
import datetime
import glob
import netCDF4 as nc
import re
from mpl_toolkits.basemap import Basemap
from general_toolbox import get_gridboxarea,lonlat,write_netcdf_file,NMB
import os
from scipy.stats import pearsonr
import numpy as np
import logging
import matplotlib as mpl
from mass_budget import mapit_boundary
OH_sim={}
for i in EXPs:
OH_sim[i]=nc.Dataset(rawoutput+i+'/general_TM5_'+i+'_2010.mm.nc')['GAS_OH'][:]
tdata=(OH_sim[EXPs[0]].mean(axis=0)-OH_sim[EXPs[1]].mean(axis=0))/OH_sim[EXPs[1]].mean(axis=0)
print tdata.shape
gbarea=get_gridboxarea('TM53x2')
for i in range(0,34):
print i,(tdata[i,:,:]*gbarea[:,:]).sum()/gbarea[:,:].sum()
data=tdata[i,:,:].squeeze()
#print data[20:40,20:40],OH_sim[EXPs[0]].mean(axis=0)[0,20:40,20:40]
diverging=True
clevs=[0.9,0.95,0.97,0.975,0.98,0.985,0.99,0.995,1.00,1.005,1.01,1.015,1.02,1.025,1.03,1.05,1.1]
clevs=[-0.1,-0.05,-0.03,-0.025,-0.02,-0.015,-0.01,-0.005,0.00,0.005,0.01,0.015,0.02,0.025,0.03,0.05,0.1]
f,ax=plt.subplots(1)
mapit_boundary(data,clevs,ax,diverging=diverging)
plt.show()
| 33.470588
| 105
| 0.723199
|
d18dab35222ecb6564a408891eb6ad7fe46a6f01
| 14,408
|
py
|
Python
|
tests/test_scvi.py
|
adamgayoso/scVI
|
e79fccbfdc9dc9c4d790a8fb686f21b2b2889c50
|
[
"MIT"
] | null | null | null |
tests/test_scvi.py
|
adamgayoso/scVI
|
e79fccbfdc9dc9c4d790a8fb686f21b2b2889c50
|
[
"MIT"
] | null | null | null |
tests/test_scvi.py
|
adamgayoso/scVI
|
e79fccbfdc9dc9c4d790a8fb686f21b2b2889c50
|
[
"MIT"
] | null | null | null |
import numpy as np
from scvi.dataset import CortexDataset, SyntheticDataset, GeneExpressionDataset
from scvi.inference import (
JointSemiSupervisedTrainer,
AlternateSemiSupervisedTrainer,
ClassifierTrainer,
UnsupervisedTrainer,
AdapterTrainer,
TotalTrainer,
)
from scvi.inference.posterior import unsupervised_clustering_accuracy
from scvi.inference.annotation import compute_accuracy_rf, compute_accuracy_svc
from scvi.models import VAE, SCANVI, VAEC, LDVAE, TOTALVI, AutoZIVAE
from scvi.models.classifier import Classifier
from scvi import set_seed
set_seed(0)
use_cuda = True
def test_cortex(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
vae = VAE(cortex_dataset.nb_genes, cortex_dataset.n_batches)
trainer_cortex_vae = UnsupervisedTrainer(
vae, cortex_dataset, train_size=0.5, use_cuda=use_cuda
)
trainer_cortex_vae.train(n_epochs=1)
trainer_cortex_vae.train_set.reconstruction_error()
trainer_cortex_vae.train_set.differential_expression_stats()
trainer_cortex_vae.train_set.imputation(n_samples=1)
trainer_cortex_vae.test_set.imputation(n_samples=5)
trainer_cortex_vae.corrupt_posteriors(corruption="binomial")
trainer_cortex_vae.corrupt_posteriors()
trainer_cortex_vae.train(n_epochs=1)
trainer_cortex_vae.uncorrupt_posteriors()
trainer_cortex_vae.train_set.imputation_benchmark(
n_samples=1, show_plot=False, title_plot="imputation", save_path=save_path
)
full = trainer_cortex_vae.create_posterior(
vae, cortex_dataset, indices=np.arange(len(cortex_dataset))
)
x_new, x_old = full.generate(n_samples=10)
assert x_new.shape == (cortex_dataset.nb_cells, cortex_dataset.nb_genes, 10)
assert x_old.shape == (cortex_dataset.nb_cells, cortex_dataset.nb_genes)
trainer_cortex_vae.train_set.imputation_benchmark(
n_samples=1, show_plot=False, title_plot="imputation", save_path=save_path
)
svaec = SCANVI(
cortex_dataset.nb_genes, cortex_dataset.n_batches, cortex_dataset.n_labels
)
trainer_cortex_svaec = JointSemiSupervisedTrainer(
svaec, cortex_dataset, n_labelled_samples_per_class=3, use_cuda=use_cuda
)
trainer_cortex_svaec.train(n_epochs=1)
trainer_cortex_svaec.labelled_set.accuracy()
trainer_cortex_svaec.full_dataset.reconstruction_error()
svaec = SCANVI(
cortex_dataset.nb_genes, cortex_dataset.n_batches, cortex_dataset.n_labels
)
trainer_cortex_svaec = AlternateSemiSupervisedTrainer(
svaec, cortex_dataset, n_labelled_samples_per_class=3, use_cuda=use_cuda
)
trainer_cortex_svaec.train(n_epochs=1, lr=1e-2)
trainer_cortex_svaec.unlabelled_set.accuracy()
data_train, labels_train = trainer_cortex_svaec.labelled_set.raw_data()
data_test, labels_test = trainer_cortex_svaec.unlabelled_set.raw_data()
compute_accuracy_svc(
data_train,
labels_train,
data_test,
labels_test,
param_grid=[{"C": [1], "kernel": ["linear"]}],
)
compute_accuracy_rf(
data_train,
labels_train,
data_test,
labels_test,
param_grid=[{"max_depth": [3], "n_estimators": [10]}],
)
cls = Classifier(cortex_dataset.nb_genes, n_labels=cortex_dataset.n_labels)
cls_trainer = ClassifierTrainer(cls, cortex_dataset)
cls_trainer.train(n_epochs=1)
cls_trainer.train_set.accuracy()
def test_synthetic_1():
synthetic_dataset = SyntheticDataset()
synthetic_dataset.cell_types = np.array(["A", "B", "C"])
svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
)
trainer_synthetic_svaec = JointSemiSupervisedTrainer(
svaec, synthetic_dataset, use_cuda=use_cuda
)
trainer_synthetic_svaec.train(n_epochs=1)
trainer_synthetic_svaec.labelled_set.entropy_batch_mixing()
trainer_synthetic_svaec.full_dataset.knn_purity()
trainer_synthetic_svaec.labelled_set.show_t_sne(n_samples=5)
trainer_synthetic_svaec.unlabelled_set.show_t_sne(n_samples=5, color_by="labels")
trainer_synthetic_svaec.labelled_set.show_t_sne(
n_samples=5, color_by="batches and labels"
)
trainer_synthetic_svaec.labelled_set.clustering_scores()
trainer_synthetic_svaec.labelled_set.clustering_scores(prediction_algorithm="gmm")
trainer_synthetic_svaec.unlabelled_set.unsupervised_classification_accuracy()
trainer_synthetic_svaec.unlabelled_set.differential_expression_score(
synthetic_dataset.labels.ravel() == 1,
synthetic_dataset.labels.ravel() == 2,
n_samples=2,
M_permutation=10,
)
trainer_synthetic_svaec.unlabelled_set.one_vs_all_degenes(
n_samples=2, M_permutation=10
)
def test_synthetic_2():
synthetic_dataset = SyntheticDataset()
vaec = VAEC(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
)
trainer_synthetic_vaec = JointSemiSupervisedTrainer(
vaec,
synthetic_dataset,
use_cuda=use_cuda,
frequency=1,
early_stopping_kwargs={
"early_stopping_metric": "reconstruction_error",
"on": "labelled_set",
"save_best_state_metric": "reconstruction_error",
},
)
trainer_synthetic_vaec.train(n_epochs=2)
def base_benchmark(gene_dataset):
vae = VAE(gene_dataset.nb_genes, gene_dataset.n_batches, gene_dataset.n_labels)
trainer = UnsupervisedTrainer(vae, gene_dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=1)
return trainer
def ldvae_benchmark(dataset, n_epochs, use_cuda=True):
ldvae = LDVAE(
dataset.nb_genes, n_batch=dataset.n_batches, latent_distribution="normal"
)
trainer = UnsupervisedTrainer(ldvae, dataset, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs)
trainer.test_set.reconstruction_error()
trainer.test_set.marginal_ll()
ldvae = LDVAE(dataset.nb_genes, n_batch=dataset.n_batches, latent_distribution="ln")
trainer = UnsupervisedTrainer(ldvae, dataset, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs)
trainer.test_set.reconstruction_error()
ldvae.get_loadings()
return trainer
def totalvi_benchmark(dataset, n_epochs, use_cuda=True):
totalvae = TOTALVI(
dataset.nb_genes, len(dataset.protein_names), n_batch=dataset.n_batches
)
trainer = TotalTrainer(totalvae, dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs)
trainer.test_set.reconstruction_error()
trainer.test_set.marginal_ll()
trainer.test_set.get_protein_background_mean()
trainer.test_set.get_latent()
trainer.test_set.generate()
trainer.test_set.get_sample_dropout()
trainer.test_set.get_normalized_denoised_expression()
trainer.test_set.imputation()
return trainer
def test_synthetic_3():
gene_dataset = SyntheticDataset()
trainer = base_benchmark(gene_dataset)
adapter_trainer = AdapterTrainer(
trainer.model, gene_dataset, trainer.train_set, frequency=1
)
adapter_trainer.train(n_path=1, n_epochs=1)
def test_nb_not_zinb():
synthetic_dataset = SyntheticDataset()
svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
labels_groups=[0, 0, 1],
reconstruction_loss="nb",
)
trainer_synthetic_svaec = JointSemiSupervisedTrainer(
svaec, synthetic_dataset, use_cuda=use_cuda
)
trainer_synthetic_svaec.train(n_epochs=1)
def test_poisson_not_zinb():
synthetic_dataset = SyntheticDataset()
svaec = SCANVI(
synthetic_dataset.nb_genes,
synthetic_dataset.n_batches,
synthetic_dataset.n_labels,
labels_groups=[0, 0, 1],
reconstruction_loss="poisson",
)
trainer_synthetic_svaec = JointSemiSupervisedTrainer(
svaec, synthetic_dataset, use_cuda=use_cuda
)
trainer_synthetic_svaec.train(n_epochs=1)
def test_classifier_accuracy(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
cls = Classifier(cortex_dataset.nb_genes, n_labels=cortex_dataset.n_labels)
cls_trainer = ClassifierTrainer(
cls,
cortex_dataset,
metrics_to_monitor=["accuracy"],
frequency=1,
early_stopping_kwargs={
"early_stopping_metric": "accuracy",
"save_best_state_metric": "accuracy",
},
)
cls_trainer.train(n_epochs=2)
cls_trainer.train_set.accuracy()
def test_LDVAE(save_path):
synthetic_datset_one_batch = SyntheticDataset(n_batches=1)
ldvae_benchmark(synthetic_datset_one_batch, n_epochs=1, use_cuda=False)
synthetic_datset_two_batches = SyntheticDataset(n_batches=2)
ldvae_benchmark(synthetic_datset_two_batches, n_epochs=1, use_cuda=False)
def test_sampling_zl(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
cortex_vae = VAE(cortex_dataset.nb_genes, cortex_dataset.n_batches)
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae, cortex_dataset, train_size=0.5, use_cuda=use_cuda
)
trainer_cortex_vae.train(n_epochs=2)
cortex_cls = Classifier((cortex_vae.n_latent + 1), n_labels=cortex_dataset.n_labels)
trainer_cortex_cls = ClassifierTrainer(
cortex_cls, cortex_dataset, sampling_model=cortex_vae, sampling_zl=True
)
trainer_cortex_cls.train(n_epochs=2)
trainer_cortex_cls.test_set.accuracy()
def test_annealing_procedures(save_path):
cortex_dataset = CortexDataset(save_path=save_path)
cortex_vae = VAE(cortex_dataset.nb_genes, cortex_dataset.n_batches)
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae,
cortex_dataset,
train_size=0.5,
use_cuda=use_cuda,
n_epochs_kl_warmup=1,
)
trainer_cortex_vae.train(n_epochs=2)
assert trainer_cortex_vae.kl_weight >= 0.99, "Annealing should be over"
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae,
cortex_dataset,
train_size=0.5,
use_cuda=use_cuda,
n_epochs_kl_warmup=5,
)
trainer_cortex_vae.train(n_epochs=2)
assert trainer_cortex_vae.kl_weight <= 0.99, "Annealing should be proceeding"
# iter
trainer_cortex_vae = UnsupervisedTrainer(
cortex_vae,
cortex_dataset,
train_size=0.5,
use_cuda=use_cuda,
n_iter_kl_warmup=1,
n_epochs_kl_warmup=None,
)
trainer_cortex_vae.train(n_epochs=2)
assert trainer_cortex_vae.kl_weight >= 0.99, "Annealing should be over"
def test_differential_expression(save_path):
dataset = CortexDataset(save_path=save_path)
n_cells = len(dataset)
all_indices = np.arange(n_cells)
vae = VAE(dataset.nb_genes, dataset.n_batches)
trainer = UnsupervisedTrainer(vae, dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=2)
post = trainer.create_posterior(vae, dataset, shuffle=False, indices=all_indices)
# Sample scale example
px_scales = post.scale_sampler(
n_samples_per_cell=4, n_samples=None, selection=all_indices
)["scale"]
assert (
px_scales.shape[1] == dataset.nb_genes
), "posterior scales should have shape (n_samples, n_genes)"
# Differential expression different models
idx_1 = [1, 2, 3]
idx_2 = [4, 5, 6, 7]
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="vanilla",
use_permutation=True,
M_permutation=100,
)
de_dataframe = post.differential_expression_score(
idx1=idx_1,
idx2=idx_2,
n_samples=10,
mode="change",
use_permutation=True,
M_permutation=100,
)
print(de_dataframe.keys())
assert (
de_dataframe["confidence_interval_0.5_min"]
<= de_dataframe["confidence_interval_0.5_max"]
).all()
assert (
de_dataframe["confidence_interval_0.95_min"]
<= de_dataframe["confidence_interval_0.95_max"]
).all()
# DE estimation example
de_probabilities = de_dataframe.loc[:, "proba_de"]
assert ((0.0 <= de_probabilities) & (de_probabilities <= 1.0)).all()
def test_totalvi(save_path):
synthetic_dataset_one_batch = SyntheticDataset(n_batches=1)
totalvi_benchmark(synthetic_dataset_one_batch, n_epochs=1, use_cuda=use_cuda)
synthetic_dataset_two_batches = SyntheticDataset(n_batches=2)
totalvi_benchmark(synthetic_dataset_two_batches, n_epochs=1, use_cuda=use_cuda)
def test_autozi(save_path):
data = SyntheticDataset(n_batches=1)
for disp_zi in ["gene", "gene-label"]:
autozivae = AutoZIVAE(
n_input=data.nb_genes,
dispersion=disp_zi,
zero_inflation=disp_zi,
n_labels=data.n_labels,
)
trainer_autozivae = UnsupervisedTrainer(
model=autozivae, gene_dataset=data, train_size=0.5
)
trainer_autozivae.train(n_epochs=2, lr=1e-2)
trainer_autozivae.test_set.elbo()
trainer_autozivae.test_set.reconstruction_error()
trainer_autozivae.test_set.marginal_ll()
def test_multibatches_features():
data = [
np.random.randint(1, 5, size=(20, 10)),
np.random.randint(1, 10, size=(20, 10)),
np.random.randint(1, 10, size=(20, 10)),
np.random.randint(1, 10, size=(30, 10)),
]
dataset = GeneExpressionDataset()
dataset.populate_from_per_batch_list(data)
vae = VAE(dataset.nb_genes, dataset.n_batches)
trainer = UnsupervisedTrainer(vae, dataset, train_size=0.5, use_cuda=use_cuda)
trainer.train(n_epochs=2)
trainer.test_set.imputation(n_samples=2, transform_batch=0)
trainer.train_set.imputation(n_samples=2, transform_batch=[0, 1, 2])
def test_deprecated_munkres():
y = np.array([0, 1, 0, 1, 0, 1, 1, 1])
y_pred = np.array([0, 0, 0, 0, 1, 1, 1, 1])
reward, assignment = unsupervised_clustering_accuracy(y, y_pred)
assert reward == 0.625
assert (assignment == np.array([[0, 0], [1, 1]])).all()
y = np.array([1, 1, 2, 2, 0, 0, 3, 3])
y_pred = np.array([1, 1, 2, 2, 3, 3, 0, 0])
reward, assignment = unsupervised_clustering_accuracy(y, y_pred)
assert reward == 1.0
assert (assignment == np.array([[0, 3], [1, 1], [2, 2], [3, 0]])).all()
| 34.718072
| 88
| 0.71731
|
b5569a8efddf40e28a45fbdd20707b80e06c956e
| 3,474
|
py
|
Python
|
knapsack_benchmark/knapsack.py
|
vladium/study_julia_with_me
|
8c7a0b614098091ef284ebee1250c69f222fa126
|
[
"MIT"
] | 2
|
2019-10-16T00:46:15.000Z
|
2021-07-02T17:08:46.000Z
|
knapsack_benchmark/knapsack.py
|
vladium/study_julia_with_me
|
8c7a0b614098091ef284ebee1250c69f222fa126
|
[
"MIT"
] | null | null | null |
knapsack_benchmark/knapsack.py
|
vladium/study_julia_with_me
|
8c7a0b614098091ef284ebee1250c69f222fa126
|
[
"MIT"
] | null | null | null |
import sys
if sys.hexversion < 0x3000000: raise RuntimeError("expecting python v3+ instead of %x" % sys.hexversion)
import array
import numpy as np
import time
# ----------------------------------------------------------------------------
'''
this is an almost one-for-one translation of the Julia version, see knapsack.jl
for more comments.
running this benchmark from command line:
>python3 -O ...knapsack.py
'''
class item(object):
def __init__(self, value, weight):
self.value = value
self.weight = weight
def __repr__(self):
return "(" + str(self.value) + ", " + str(self.weight) + ")"
# ............................................................................
def opt_value(W, items):
n = len(items)
V = [0 for i in range(W)]
V_prev = [0 for i in range(W)]
# use the next 2 lines to use python "native" arrays instead:
# (for me, this choice is slower 2x than plain python lists)
# V = array.array('q', [0 for i in range(W)])
# V_prev = array.array('q', [0 for i in range(W)])
# use the next 2 lines to use numpy arrays instead:
# (for me, this choice is nearly 2x slower)
# V = np.array([0 for i in range(W)], dtype="i8")
# V_prev = np.array([0 for i in range(W)], dtype="i8")
for w in range(items[0].weight, W + 1):
V[w - 1] = items[0].value;
for j in range(1, n):
V, V_prev = V_prev, V
item_j = items[j]
for w in range(1, W + 1):
V_without_item_j = V_prev[w - 1]
V_allow_item_j = (V_without_item_j if w < item_j.weight
else (item_j.value + (V_prev[w - 1 - item_j.weight] if w != item_j.weight
else 0)))
V[w - 1] = max(V_allow_item_j, V_without_item_j)
return V[W - 1]
# ............................................................................
# some contortions are needed in python to ensure uint64_t arithmetic:
_13 = np.uint64(13)
_7 = np.uint64(7)
_17 = np.uint64(17)
def xorshift_rand(seed):
assert seed != 0
x = np.uint64(seed)
def _next():
nonlocal x
x ^= (x << _13);
x ^= (x >> _7);
x ^= (x << _17);
return int(x);
return _next
def make_random_data(W, seed):
assert W > 1000
n = W // 100
rng = xorshift_rand(seed)
items = []
for i in range(n):
v = rng() % 1000
w = 1 + rng() % (2 * W)
items.append(item(v, w))
return W, items
# ............................................................................
def run(repeats = 5):
times = [0.0 for i in range(repeats)]
seed = 12345
for W in [5000, 10000, 20000, 40000, 80000]:
for repeat in range(repeats):
seed += 1
W, items = make_random_data(W, seed)
start = time.time_ns ()
V = opt_value(W, items)
stop = time.time_ns()
times[repeat] = (stop - start) / 1e9
# print("V = %d, time = %f" % (V, times[repeat]))
times.sort()
print("python, %d, %f" % (W, times[repeats // 2]))
# ............................................................................
if __name__ == '__main__':
run ()
# ----------------------------------------------------------------------------
| 27.792
| 104
| 0.451641
|
3fe24f969858f73982f24586d89154e611426fe5
| 6,793
|
py
|
Python
|
train/login/Login.py
|
1131041715/12306lmqc
|
e2049fd5393bacb08401737fa314ef5e7ab962e6
|
[
"MIT"
] | 1
|
2020-10-22T03:36:46.000Z
|
2020-10-22T03:36:46.000Z
|
train/login/Login.py
|
Any1131041715/12306lmqc
|
e2049fd5393bacb08401737fa314ef5e7ab962e6
|
[
"MIT"
] | 5
|
2021-03-31T19:41:44.000Z
|
2021-12-13T20:39:28.000Z
|
train/login/Login.py
|
1131041715/12306lmqc
|
e2049fd5393bacb08401737fa314ef5e7ab962e6
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("./")
import copy
import json
import time
from collections import OrderedDict
import requests
from conf.urls_conf import loginUrls
from conf.constant import CAPTCHA_CHECK_METHOD_HAND, CAPTCHA_CHECK_METHOD_THREE
from train.login.Capthca import Captcha
from utils import Utils
from utils.Log import Log
from train.cookie.getCookie import get12306Cookie
# from net.NetUtils import EasyHttp
from net.TestNet import EasyHttp
def loginLogic(func):
def wrapper(*args, **kw):
reslut = False
msg = ''
for count in range(20):
Log.v('第%s次尝试获取验证图片' % str(count + 1))
reslut, msg = func(*args, **kw)
if reslut:
break
Log.w(msg)
return reslut, msg
return wrapper
class Login(object):
__LOGIN_SUCCESS_RESULT_CODE = 0
def _passportRedirect(self):
params = {
'redirect': '/otn/login/userLogin',
}
EasyHttp.send(self._urlInfo['userLoginRedirect'])
def _userLogin(self):
params = {
'_json_att': '',
}
EasyHttp.send(self._urlInfo['userLogin'])
def _uamtk(self):
jsonRet = EasyHttp.send(self._urlInfo['uamtk'], data={'appid': 'otn'})
def isSuccess(response):
return response['result_code'] == 0 if response and 'result_code' in response else False
return isSuccess(jsonRet), \
jsonRet['result_message'] if jsonRet and 'result_message' in jsonRet else 'no result_message', \
jsonRet['newapptk'] if jsonRet and 'newapptk' in jsonRet else 'no newapptk'
def _uamtk_static(self):
EasyHttp.send(self._urlInfo['conf'])
jsonRet = EasyHttp.send(self._urlInfo['uamtk-static'], data={'appid': 'otn'})
def isSuccess(response):
return response['result_code'] == 0 if response and 'result_code' in response else False
return isSuccess(jsonRet), \
jsonRet['result_message'] if jsonRet and 'result_message' in jsonRet else 'no result_message', \
jsonRet['newapptk'] if jsonRet and 'newapptk' in jsonRet else 'no newapptk'
def _uamauthclient(self, apptk):
jsonRet = EasyHttp.send(self._urlInfo['uamauthclient'], data={'tk': apptk})
# print(jsonRet)
def isSuccess(response):
return response['result_code'] == 0 if response and 'result_code' in response else False
return isSuccess(jsonRet), '%s:%s' % (jsonRet['username'], jsonRet['result_message']) if jsonRet \
else 'uamauthclient failed'
def login(self, userName, userPwd, autoCheck=1):
self._urlInfo = loginUrls['normal']
status, msg = self._login_init()
if not status:
return status, msg
self._uamtk_static()
result, msg = self._login(userName, userPwd, autoCheck)
if Utils.check(result, msg):
return result, msg
return False, '登录失败'
@loginLogic
def _login(self, userName, userPwd, autoCheck=1):
return self._loginNormal(userName, userPwd,autoCheck)
def _loginNormal(self, userName, userPwd, autoCheck=1):
if autoCheck == CAPTCHA_CHECK_METHOD_THREE:
results, verify = Captcha().verifyCodeAuto()
elif autoCheck == CAPTCHA_CHECK_METHOD_HAND:
results, verify = Captcha().verifyCaptchaByHand()
else:
results, verify = Captcha().verifyCodeAutoByMyself()
if not verify:
return False, '验证码识别错误!'
Log.v('验证码识别成功')
payload = OrderedDict()
payload['username'] = userName
payload['password'] = userPwd
payload['appid'] = 'otn'
payload['answer'] = results
response = EasyHttp.post_custom(self._urlInfo['login'], data=payload)
def isLoginSuccess(responseJson):
return 0 == responseJson['result_code'] if responseJson and 'result_code' in responseJson else False, \
responseJson[
'result_message'] if responseJson and 'result_message' in responseJson else '登录失败'
if response.status_code != requests.codes.ok:
return False, "登录请求被强制重定向,准备重试..."
result, msg = isLoginSuccess(response.json())
if not result :
return False, msg
self._userLogin()
self._passportRedirect()
result, msg, apptk = self._uamtk()
if not Utils.check(result, msg):
return False, 'uamtk failed'
return self._uamauthclient(apptk)
def loginOut(self):
EasyHttp.send(self._urlInfo['loginOut'])
self._init()
return self._uamtk()
def _init(self):
EasyHttp.send(self._urlInfo['init'])
def _login_init(self):
return self._handle_device_code_manual()
def _handle_device_code_auto(self):
status, cookie = get12306Cookie()
if not status:
return False, cookie
EasyHttp.setCookies(RAIL_DEVICEID=cookie['RAIL_DEVICEID'], RAIL_EXPIRATION=cookie['RAIL_EXPIRATION'])
return True, '获取设备指纹成功'
def _handle_device_code_manual(self):
#死方法来手动每次更新deviceid url
url_info = copy.deepcopy(self._urlInfo["getDevicesId"])
url_info['url'] = self._urlInfo["getDevicesId"]['url'] + str(int(time.time()*1000))
devices_id_rsp = EasyHttp.get_custom(url_info)
# params = {"algID": request_alg_id(self._urlInfo['getJS']), "timestamp": int(time.time() * 1000)}
# params = dict(params, **get_hash_code_params())
# devices_id_rsp = EasyHttp.send(self._urlInfo["getDevicesId"],params=params)
if devices_id_rsp:
callback = devices_id_rsp.text[18:-2]
# callback = devices_id_rsp.replace("callbackFunction('", '').replace("')", '')
try:
text = json.loads(callback)
devices_id = text.get('dfp')
exp = text.get('exp')
except Exception as e:
return False,'获取设备指纹失败'
EasyHttp.setCookies(RAIL_DEVICEID=devices_id, RAIL_EXPIRATION=exp)
# Log.d('设备Id:%s'%devices_id)
Log.v('获取设备指纹成功')
return True, '获取设备指纹成功'
EasyHttp.send(self._urlInfo['index'])
EasyHttp.send(self._urlInfo['loginInit'])
return False,'获取设备指纹失败'
if __name__ == '__main__':
# login = Login()
# login.login(USER_NAME, USER_PWD)
# time.sleep(3)
# print(login.loginOut())
devicesIdUrl = copy.deepcopy(loginUrls['normal']["getDevicesId"])
devices_id_rsp = EasyHttp.get_custom(devicesIdUrl)
print(devices_id_rsp.text)
text = devices_id_rsp.text.replace("callbackFunction('",'').replace("')",'')
print(text)
j = json.loads(text)
print(j['exp'])
pass
| 34.658163
| 115
| 0.624466
|
41bc793e2e4ff657f9ad1b30cb9a832a5a1e23f3
| 74
|
py
|
Python
|
sparts/gen/sparts_examples/__init__.py
|
facebook/sparts
|
c03df928677444ad638d10fa96f4144ca4d644e1
|
[
"BSD-3-Clause"
] | 220
|
2015-01-04T07:17:31.000Z
|
2018-10-10T09:06:45.000Z
|
sparts/gen/sparts_examples/__init__.py
|
martina6hall/sparts
|
c03df928677444ad638d10fa96f4144ca4d644e1
|
[
"BSD-3-Clause"
] | 49
|
2015-02-02T22:25:08.000Z
|
2017-07-26T05:46:10.000Z
|
sparts/gen/sparts_examples/__init__.py
|
martina6hall/sparts
|
c03df928677444ad638d10fa96f4144ca4d644e1
|
[
"BSD-3-Clause"
] | 39
|
2015-03-11T03:12:41.000Z
|
2018-10-10T09:07:11.000Z
|
__all__ = ['ttypes', 'constants', 'SpartsFooService', 'SpartsBarService']
| 37
| 73
| 0.72973
|
5767c1e00965127197d5a4f539824ca26d261e64
| 260
|
py
|
Python
|
Python/projecteuler/10.py
|
caa06d9c/Python-examples
|
9d90b9a9adfc343f9fdfab814cfc2b05219d6920
|
[
"MIT"
] | 5
|
2019-10-28T09:56:26.000Z
|
2021-09-03T04:51:18.000Z
|
Python/projecteuler/10.py
|
caa06d9c/Python-examples
|
9d90b9a9adfc343f9fdfab814cfc2b05219d6920
|
[
"MIT"
] | null | null | null |
Python/projecteuler/10.py
|
caa06d9c/Python-examples
|
9d90b9a9adfc343f9fdfab814cfc2b05219d6920
|
[
"MIT"
] | 4
|
2019-10-28T09:56:30.000Z
|
2021-09-03T04:51:23.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://projecteuler.info/problem=10
# 142913828922
from lib import get_primes
if __name__ == '__main__':
primes = get_primes(2000000)
res = 0
for el in primes:
res += el
print(res)
| 15.294118
| 38
| 0.623077
|
a12efed5a2649f29868abdcbe0ce89ffea8ef4ac
| 1,395
|
py
|
Python
|
setup.py
|
sikattin/py_mysql_backup
|
6324d3e467d2265c6cba6c1fdc54f2d15cf3a687
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
sikattin/py_mysql_backup
|
6324d3e467d2265c6cba6c1fdc54f2d15cf3a687
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
sikattin/py_mysql_backup
|
6324d3e467d2265c6cba6c1fdc54f2d15cf3a687
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
requirement = ['py_mysql',
'datetime_skt',
'osfile',
'mylogger',
'iomod',
'connection',
'datatransfer',
's3_transfer']
description = 'MySQL Backup Script.\n' \
'================================================' \
'This script is connecting to MySQL server, ' \
'executes mysqldump and saves dumpfile to specified directory.\n' \
'then, transfer dumpfiles to specified remote host.\n' \
'config file path is ' \
'<python3 lib directory>/site|dist-packages/daily_backup/config/backup.json\n' \
'run following command to executes this scripts!(must be privileged user)\n' \
'python3 <python3 lib directory>/dist-packages/daily_backup/local_backup.py'
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='mysql_backup',
version='1.6',
description=description,
long_description=readme,
author='Takeki Shikano',
author_email='',
require=requirement,
url=None,
license='MIT',
packages=find_packages(exclude=('tests', 'docs')),
package_data={'daily_backup': ['config/backup.json', 'README']}
)
| 30.326087
| 94
| 0.565591
|
eef8f2a82605f04b477a726305e09e5a74007b53
| 4,016
|
py
|
Python
|
tests/test_units.py
|
florczakraf/pytest-logger
|
c69c1f8252493661a5f89867a8b35eb6e3b20bb1
|
[
"MIT"
] | null | null | null |
tests/test_units.py
|
florczakraf/pytest-logger
|
c69c1f8252493661a5f89867a8b35eb6e3b20bb1
|
[
"MIT"
] | null | null | null |
tests/test_units.py
|
florczakraf/pytest-logger
|
c69c1f8252493661a5f89867a8b35eb6e3b20bb1
|
[
"MIT"
] | null | null | null |
import logging
import argparse
import pytest
import pytest_logger.plugin as plugin
def test_sanitize_nodeid():
assert plugin._sanitize_nodeid('test_p.py::test_echo') == 'test_p.py/test_echo'
assert plugin._sanitize_nodeid('classtests/test_y.py::TestClass::()::test_class') == \
'classtests/test_y.py/TestClass.test_class'
assert plugin._sanitize_nodeid('parametrictests/test_z.py::test_param[2-abc]') == \
'parametrictests/test_z.py/test_param-2-abc'
assert plugin._sanitize_nodeid('parametrictests/test_z.py::test_param[4.127-de]') == \
'parametrictests/test_z.py/test_param-4.127-de'
def test_sanitize_level():
assert plugin._sanitize_level(logging.INFO) == logging.INFO
assert plugin._sanitize_level('15') == 15
assert plugin._sanitize_level('warn') == logging.WARN
assert plugin._sanitize_level('FATAL') == logging.FATAL
with pytest.raises(TypeError):
plugin._sanitize_level('WARN ')
with pytest.raises(TypeError):
plugin._sanitize_level('unknown')
with pytest.raises(TypeError):
plugin._sanitize_level(1.0)
assert plugin._sanitize_level('WARN ', raises=False) is None
def test_log_option_parser():
loggers = [
(['a', 'b', 'c'], 20, 10),
(['d'], 30, 15),
(['e', 'f.g.h'], 31, 16),
]
assert plugin._log_option_parser([])('') == []
assert plugin._log_option_parser(loggers)('') == []
assert plugin._log_option_parser(loggers)('a') == [('a', 20)]
assert plugin._log_option_parser(loggers)('a.info') == [('a', logging.INFO)]
assert plugin._log_option_parser(loggers)('a.19') == [('a', 19)]
assert plugin._log_option_parser(loggers)('f.g.h') == [('f.g.h', 31)]
assert plugin._log_option_parser(loggers)('f.g.h.INFO') == [('f.g.h', logging.INFO)]
assert plugin._log_option_parser(loggers)('a,b') == [('a', 20), ('b', 20)]
assert plugin._log_option_parser(loggers)('a,d.19,e.info') == [('a', 20), ('d', 19), ('e', logging.INFO)]
with pytest.raises(argparse.ArgumentTypeError) as e:
plugin._log_option_parser(loggers)('a.unknown')
assert str(e.value) == 'wrong level, expected (INFO, warn, 15, ...), got "unknown"'
with pytest.raises(argparse.ArgumentTypeError) as e:
plugin._log_option_parser(loggers)('alien.info')
assert str(e.value) == 'wrong logger, expected (a, b, c, d, e, f.g.h), got "alien"'
with pytest.raises(argparse.ArgumentTypeError) as e:
plugin._log_option_parser(loggers)('alien.unknown')
assert str(e.value) == 'wrong logger, expected (a, b, c, d, e, f.g.h), got "alien.unknown"'
def test_set_formatter_class():
logcfg = plugin.LoggerConfig()
logcfg.set_formatter_class(logging.Formatter)
logcfg.set_formatter_class(plugin.DefaultFormatter)
with pytest.raises(ValueError) as e:
logcfg.set_formatter_class(plugin.DefaultFormatter())
assert str(e.value) == 'Got a formatter instance instead of its class !'
with pytest.raises(ValueError) as e:
logcfg.set_formatter_class(plugin.LoggerState)
assert str(e.value) == 'Formatter should be a class inheriting from logging.Formatter'
with pytest.raises(TypeError) as e:
logcfg.set_formatter_class(10)
assert str(e.value) == 'issubclass() arg 1 must be a class'
def test_loggers_from_logcfg_empty():
logcfg = plugin.LoggerConfig()
loggers = plugin._loggers_from_logcfg(logcfg, [])
assert loggers.stdout == []
assert loggers.file == []
assert not loggers
def test_loggers_from_logcfg():
logcfg = plugin.LoggerConfig()
logcfg.add_loggers(['a', 'b', 'c'], stdout_level=logging.ERROR, file_level='warn')
logcfg.add_loggers(['d'], stdout_level='10')
log_option = [('b', logging.FATAL), 'd']
loggers = plugin._loggers_from_logcfg(logcfg, log_option)
assert loggers.stdout == [('b', logging.FATAL), ('d', 10)]
assert loggers.file == [('a', logging.WARN), ('b', logging.WARN), ('c', logging.WARN), ('d', 0)]
assert loggers
| 40.565657
| 109
| 0.673556
|
3a5ff9f188951e25bf56486884a1d2d73d8684df
| 37,257
|
py
|
Python
|
.history/src/_fighter_20190422092829.py
|
vidalmatheus/MK-Project
|
6646020c59367ba0424d73a5861e13bbc0daac1f
|
[
"MIT"
] | 1
|
2019-12-25T10:25:30.000Z
|
2019-12-25T10:25:30.000Z
|
.history/src/_fighter_20190422092829.py
|
vidalmatheus/MK-Project
|
6646020c59367ba0424d73a5861e13bbc0daac1f
|
[
"MIT"
] | 1
|
2019-12-25T10:27:15.000Z
|
2019-12-25T10:27:15.000Z
|
.history/src/_fighter_20190422092829.py
|
vidalmatheus/MK-Project
|
6646020c59367ba0424d73a5861e13bbc0daac1f
|
[
"MIT"
] | 1
|
2019-12-25T10:50:05.000Z
|
2019-12-25T10:50:05.000Z
|
from pygame_functions import *
import fightScene
import engine
import menu
class Fighter:
fighterNames = ["Sub-Zero", "Scorpion"]
fightMoves = [["w", "s", "a", "d"], ["up", "down", "left", "right"]]
combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]]
danceLimit = 7
walkLimit = 9
jumpLimit = 3
crouchLimit = 3
punchLimit = [3, 11, 3, 5, 3]
kickLimit = [7, 9, 7, 6, 3]
hitLimit = [3, 3, 6, 2, 3, 14, 11, 10]
blockLimit = 3
specialLimit = [4,7]
victoryLimit = 3
fatalityLimit = 20
dizzyLimit = 7
# indexação
# moves
dance = 0
walk = 1
jump = 2
crouch = 3
# punches
Apunch = 4 # soco fraco
Bpunch = 5 # soco forte
Cpunch = 6 # soco agachado fraco
Dpunch = 7 # soco agachado forte: gancho
# kicks
Akick = 8 # chute fraco
Bkick = 9 # chute forte
Ckick = 10 # chute agachado fraco
Dkick = 11 # chute agachado forte: banda
# hits
Ahit = 12 # soco fraco
Bhit = 13 # chute fraco
Chit = 14 # soco forte
Dhit = 15 # chute agrachado fraco
Ehit = 16 # soco agachado fraco
Fhit = 17 # chute forte e soco forte agachado (gancho)
Ghit = 18 # chute agachado forte: banda
#Hhit = 19 # specialMove
#fatalityHit = 20 # fatality hit
# block
Ablock = 19
#Bblock = 13
# special move
special = 20
# fatality
fatality = 24
def __init__(self, id, scenario):
self.fighterId = id
self.name = self.fighterNames[id]
self.move = self.fightMoves[id]
self.combat = self.combatMoves[id]
# Position
self.x = 150+id*500
if scenario == 1:
self.y = 350
elif scenario == 2:
self.y = 370
elif scenario == 3:
self.y = 400
elif scenario == 4:
self.y = 370
elif scenario == 5:
self.y = 380
elif scenario == 6:
self.y = 380
elif scenario == 7:
self.y = 360
elif scenario == 8:
self.y = 395
# Loading sprites
self.spriteList = []
# moves
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/dance.png', self.danceLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/walk.png', self.walkLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/jump.png', self.jumpLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/crouch.png', self.crouchLimit))
# Punch sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Apunch.png', self.punchLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bpunch.png', self.punchLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Cpunch.png', self.punchLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dpunch.png', self.punchLimit[3]))
# Kick sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Akick.png', self.kickLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bkick.png', self.kickLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ckick.png', self.kickLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dkick.png', self.kickLimit[3]))
# Hit sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ahit.png', self.hitLimit[0])) # soco fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bhit.png', self.hitLimit[1])) # chute fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Chit.png', self.hitLimit[2])) # soco forte
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dhit.png', self.hitLimit[3])) # chute agrachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ehit.png', self.hitLimit[4])) # soco agachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Fhit.png', self.hitLimit[5])) # chute forte e soco forte agachado (gancho)
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ghit.png', self.hitLimit[6])) # chute agachado forte: banda
#self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Hhit.png', self.hitLimit[7])) # specialMove
# blocking sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ablock.png', self.blockLimit)) # defesa em pé
#self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bblock.png', self.blockLimit)) # defesa agachado
# special sprite ----------------------------------
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Special.png', self.specialLimit[self.fighterId])) # Especial
self.act()
def act(self):
# Combat control
combat = False
block = False
alive = False
fatality = False
dizzyCounter = 1
dizzyCounterAux = 1
fatalityCounter = 8
fatalityCounterAux = 1
# Control reflection var
reflection = False
# Dance vars
self.dancing = True
self.frame_dance = 0
self.dance_step = 1
# Walk vars
self.frame_walk = 0
self.walking = False # Variável de status
# Jump vars
self.jumpHeight = 10 # Altura do pulo
self.jumpCounter = 1 # Contador correspodente à subida e descida do pulo
self.jumping = False # Variável de status
self.frame_jumping = 0
self.jump_step = 1
self.end_jump = True
# Crouch vars
self.crouching = False # Variável de status
self.frame_crouching = 0
self.crouch_step = 1
# Punch vars
self.Apunching = False
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
self.Bpunching = False
self.frame_Bpunching = 0
self.Bpunch_step = 1
self.end_Bpunch = True
self.Cpunching = False
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
self.Dpunching = False
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# Kick vars
self.Akicking = False
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
self.Bkicking = False
self.frame_Bkicking = 0
self.Bkick_step = 1
self.end_Bkick = True
self.Ckicking = False
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
self.Dkicking = False
self.frame_Dkicking = 0
self.Dkick_step = 1
self.end_Dkick = True
# Blocking vars
self.Ablocking = False
self.frame_Ablocking = 0
self.Ablock_step = 1
self.Bblocking = False
# Special vars
self.specialMove = False
self.end_special = True
self.frame_special = 0
self.special_step = 1
# Hit vars
self.hit = False
self.hitName = ""
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.Hhitting = False
self.frame_Ahit = 0
self.frame_Bhit = 0
self.frame_Chit = 0
self.frame_Dhit = 0
self.frame_Ehit = 0
self.frame_Fhit = 0
self.frame_Ghit = 0
self.frame_Hhit = 0
self.hit_step = 1
# Life Vars
X_inicio = 37
X_atual = X_inicio
X_fim = X_inicio + 327
self.posFighter()
def fight(self, time, nextFrame):
frame_step = 60
if not self.jumping:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if keyPressed(self.move[0]) and not self.hit:
self.jumping = True
self.end_jump = False
self.curr_sprite = self.spriteList[self.jump]
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> right
elif keyPressed(self.move[3]) and not self.hit:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x += 4
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
# so the modulus 9 allows it to loop
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> left
elif keyPressed(self.move[2]) and not self.hit:# SEGUNDA MUDANÇA and not self.jumping:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x -= 4
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.walkLimit-1-self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> crouch
elif keyPressed(self.move[1]) and not self.hit:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit:
self.curr_sprite = self.spriteList[self.crouch]
self.crouching = self.setState()
self.setEndState()
if time > nextFrame:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit:
moveSprite(self.spriteList[self.crouch], self.x, self.y, True)
self.setSprite(self.spriteList[self.crouch])
changeSpriteImage(self.spriteList[self.crouch], self.frame_crouching)
self.frame_crouching = (self.frame_crouching+self.crouch_step) % self.crouchLimit
if self.frame_crouching == self.crouchLimit - 2:
self.crouch_step = 0
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> crouch
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> jab
if ( (keyPressed(self.combat[0]) and self.end_Cpunch) or (not keyPressed(self.combat[0]) and not self.end_Cpunch) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Cpunch]
self.Cpunching = self.setState()
self.setEndState()
self.end_Cpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Cpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Cpunch])
changeSpriteImage(self.spriteList[self.Cpunch], self.frame_Cpunching)
self.frame_Cpunching = (self.frame_Cpunching+self.Cpunch_step) % (self.punchLimit[2]+1)
if (self.frame_Cpunching == self.punchLimit[2]-1):
self.Cpunch_step = -1
if (self.frame_Cpunching == self.punchLimit[2]):
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> crouch
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Dpunch) or (not keyPressed(self.combat[1]) and not self.end_Dpunch) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Dpunch]
self.Dpunching = self.setState()
self.setEndState()
self.end_Dpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Dpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dpunch])
changeSpriteImage(self.spriteList[self.Dpunch], self.frame_Dpunching)
self.frame_Dpunching = (self.frame_Dpunching+self.Dpunch_step) % (self.punchLimit[3]+1)
if (self.frame_Dpunching == self.punchLimit[3]-1):
self.Dpunch_step = -1
if (self.frame_Dpunching == self.punchLimit[3]):
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> crouch
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> kick
elif ( (keyPressed(self.combat[2]) and self.end_Ckick) or (not keyPressed(self.combat[2]) and not self.end_Ckick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Ckick]
self.Ckicking = self.setState()
self.end_Ckick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ckick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ckick])
changeSpriteImage(self.spriteList[self.Ckick], self.frame_Ckicking)
self.frame_Ckicking = (self.frame_Ckicking+self.Ckick_step) % (self.kickLimit[2]+1)
if (self.frame_Ckicking == self.kickLimit[2]-1):
self.Ckick_step = -1
if (self.frame_Ckicking == self.kickLimit[2]):
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> crouch
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Dkick) or (not keyPressed(self.combat[3]) and not self.end_Dkick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Dkick]
self.Dkicking = self.setState()
self.end_Dkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Dkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dkick])
changeSpriteImage(self.spriteList[self.Dkick], self.frame_Dkicking)
self.frame_Dkicking = (self.frame_Dkicking+self.Dkick_step) % self.kickLimit[3]
if (self.frame_Dkicking == 0):
self.end_Dkick = True
#--------------Hit em agachado--------------------
#Ehit = 16 # chute agachado fraco
#Hhit = 19 # specialMove
#BblockHit = 21 hit agachado
#Ehit = 16 # chute agachado fraco
elif self.hit and self.hitName == "Ehit":
self.curr_sprite = self.spriteList[self.Ehit]
self.Ehitting = self.setState()
moveSprite(self.spriteList[self.Ehit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ehit])
changeSpriteImage(self.spriteList[self.Ehit], self.frame_Ehit)
if time > nextFrame:
self.frame_Ehit = (self.frame_Ehit+self.hit_step) % self.hitLimit[4]
if (self.frame_Ehit == self.hitLimit[4] - 1):
self.hit_step = -1
if (self.frame_Ehit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> jab
elif ((keyPressed(self.combat[0]) and self.end_Apunch) or ( not keyPressed(self.combat[0]) and not self.end_Apunch) ) and (not self.hit) :
print("flag!")
self.curr_sprite = self.spriteList[self.Apunch]
self.Apunching = self.setState()
self.setEndState()
self.end_Apunch = False
if clock() > 0.01*nextFrame:
moveSprite(self.spriteList[self.Apunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Apunch])
changeSpriteImage(self.spriteList[self.Apunch], self.frame_Apunching)
self.frame_Apunching = (self.frame_Apunching+self.Apunch_step) % (self.punchLimit[0]+1)
if (self.frame_Apunching == self.punchLimit[0]-1):
self.Apunch_step = -1
if (self.frame_Apunching == self.punchLimit[0]):
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
nextFrame += 1*frame_step
print("frame_apunching =", self.frame_Apunching)
print("end_Apunch =", self.end_Apunch)
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Bpunch) or (not keyPressed(self.combat[1]) and not self.end_Bpunch) ) and (not self.hit) :
self.curr_sprite = self.spriteList[self.Bpunch]
self.Bpunching = self.setState()
self.end_Bpunch = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bpunch])
changeSpriteImage(self.spriteList[self.Bpunch], self.frame_Bpunching)
self.frame_Bpunching = (self.frame_Bpunching+self.Bpunch_step) % self.punchLimit[1]
if (self.frame_Bpunching == 0):
self.end_Bpunch = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> kick
elif ( (keyPressed(self.combat[2]) and self.end_Akick) or (not keyPressed(self.combat[2]) and not self.end_Akick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Akick]
self.Akicking = self.setState()
self.end_Akick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Akick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Akick])
changeSpriteImage(self.spriteList[self.Akick], self.frame_Akicking)
self.frame_Akicking = (self.frame_Akicking+self.Akick_step) % (self.kickLimit[0]+1)
if (self.frame_Akicking == self.kickLimit[0]-1):
self.Akick_step = -1
if (self.frame_Akicking == self.kickLimit[0]):
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Bkick) or (not keyPressed(self.combat[3]) and not self.end_Bkick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Bkick]
self.Bkicking = self.setState()
self.end_Bkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bkick])
changeSpriteImage(self.spriteList[self.Bkick], self.frame_Bkicking)
self.frame_Bkicking = (self.frame_Bkicking+self.Bkick_step) % self.kickLimit[1]
if (self.frame_Bkicking == 0):
self.end_Bkick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> defesa em pé
elif keyPressed(self.combat[5]) and not self.hit:
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.Ablock_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 2:
self.Ablock_step = 0
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> special move
elif ((keyPressed(self.combat[4]) and self.end_special) or (not keyPressed(self.combat[4]) and not self.end_special) ) and (not self.hit):
print("SpecialMove")
self.curr_sprite = self.spriteList[self.special]
self.specialMove = self.setState()
self.setEndState()
self.end_special = False
if time > nextFrame:
moveSprite(self.spriteList[self.special], self.x, self.y, True)
self.setSprite(self.spriteList[self.special])
changeSpriteImage(self.spriteList[self.special], self.frame_special)
self.frame_special = (self.frame_special+self.special_step) % (self.specialLimit[self.fighterId]+1)
if (self.frame_special == self.specialLimit[self.fighterId]-1):
self.special_step = -1
if (self.frame_special == self.specialLimit[self.fighterId]):
self.frame_special = 0
self.special_step = 1
self.end_special = True
nextFrame += 1*frame_step
# just dance :)
elif not self.hit:
# reset block (hold type)
self.frame_Ablocking = 0
self.Ablock_step = 1
# reset down (hold type)
self.frame_crouching = 0
self.crouch_step = 1
# reset other movement
self.frame_walk = self.frame_jumping = 0
# reset combat frames
self.frame_Apunching = self.frame_Bpunching = self.frame_Cpunching = self.frame_Dpunching = self.frame_Akicking = self.frame_Bkicking = self.frame_Ckicking = self.frame_Dkicking = 0
self.setEndState()
# start to dance
self.curr_sprite = self.spriteList[self.dance]
self.dancing = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.dance], self.x, self.y, True)
self.setSprite(self.spriteList[self.dance])
changeSpriteImage(self.spriteList[self.dance], self.frame_dance)
self.frame_dance = (self.frame_dance+self.dance_step) % self.danceLimit
if (self.frame_dance == self.danceLimit-1):
self.dance_step = -1
if (self.frame_dance == 0):
self.dance_step = 1
nextFrame += frame_step
#--------------Hit em pé--------------------
#Ehit = 16 # chute agachado fraco
#Hhit = 19 # specialMove
#BblockHit = 21 hit agachado
# Ouch! Punch on a face (Ahit = 12 # soco fraco)
elif self.hit and self.hitName == "Apunching":
self.curr_sprite = self.spriteList[self.Ahit]
self.Ahitting = self.setState()
moveSprite(self.spriteList[self.Ahit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ahit])
changeSpriteImage(self.spriteList[self.Ahit], self.frame_Ahit)
if time > nextFrame:
self.frame_Ahit = (self.frame_Ahit+self.hit_step) % self.hitLimit[0]
if (self.frame_Ahit == self.hitLimit[0] - 1):
self.hit_step = -1
if (self.frame_Ahit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! kick on a face (Bhit = 13 # chute fraco)
elif self.hit and self.hitName == "Akicking":
self.curr_sprite = self.spriteList[self.Bhit]
self.Bhitting = self.setState()
if self.fighterId == 0:
self.x -=0.8
else: self.x +=0.8
moveSprite(self.spriteList[self.Bhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bhit])
changeSpriteImage(self.spriteList[self.Bhit], self.frame_Bhit)
if time > nextFrame:
# There are 8 frames of animation in each direction
self.frame_Bhit = (self.frame_Bhit+self.hit_step) % self.hitLimit[1]
if (self.frame_Bhit == self.hitLimit[1] - 1):
self.hit_step = -1
if (self.frame_Bhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! combo punch (Chit = 14 # soco forte)
elif self.hit and self.hitName == "Bpunching":
self.curr_sprite = self.spriteList[self.Chit]
self.Chitting = self.setState()
if self.fighterId == 0:
self.x -=0.5
else: self.x +=0.5
moveSprite(self.spriteList[self.Chit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Chit])
changeSpriteImage(self.spriteList[self.Chit], self.frame_Chit)
if time > nextFrame:
self.frame_Chit = (self.frame_Chit+self.hit_step) % self.hitLimit[2]
if (self.frame_Chit == self.hitLimit[2] - 1):
self.hit_step = -1
if (self.frame_Chit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Dhit = 15 # soco agrachado fraco
elif self.hit and self.hitName == "Cpunching":
self.curr_sprite = self.spriteList[self.Dhit]
self.Dhitting = self.setState()
moveSprite(self.spriteList[self.Dhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dhit])
changeSpriteImage(self.spriteList[self.Dhit], self.frame_Dhit)
if time > nextFrame:
self.frame_Dhit = (self.frame_Dhit+self.hit_step) % self.hitLimit[3]
if (self.frame_Dhit == self.hitLimit[3] - 1):
self.hit_step = -1
if (self.frame_Dhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Fhit = 17 # chute forte e soco forte agachado (gancho)
elif self.hit and self.hitName == "Bkicking":
self.curr_sprite = self.spriteList[self.Fhit]
self.Fhitting = self.setState()
if self.frame_Fhit <= 6:
if self.fighterId == 0:
self.x -=3
else: self.x +=3
moveSprite(self.spriteList[self.Fhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Fhit])
changeSpriteImage(self.spriteList[self.Fhit], self.frame_Fhit)
if time > nextFrame:
self.frame_Fhit = (self.frame_Fhit+self.hit_step) % self.hitLimit[5]
if (self.frame_Fhit == self.hitLimit[5] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#Ghit = 18 # chute agachado forte: banda
elif self.hit and self.hitName == "Dkicking":
self.curr_sprite = self.spriteList[self.Ghit]
self.Ghitting = self.setState()
moveSprite(self.spriteList[self.Ghit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ghit])
changeSpriteImage(self.spriteList[self.Ghit], self.frame_Ghit)
if time > nextFrame:
self.frame_Ghit = (self.frame_Ghit+self.hit_step) % self.hitLimit[6]
if (self.frame_Ghit == self.hitLimit[6] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#blockHit! Defesa em pé.
elif self.hit and self.hitName == "Ablocking":
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.hit_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 1:
self.hit_step = -1
if self.frame_Ablocking == 1:
self.hit_step = 1
self.hit = False
nextFrame += 1*frame_step
else:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if time > nextFrame:
if keyPressed(self.move[2]):
self.x -= 15
if keyPressed(self.move[3]):
self.x += 15
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.setSprite(self.spriteList[self.jump])
self.y -= (self.jumpHeight-self.jumpCounter)*7
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
if (self.jumpCounter < self.jumpHeight -1 or self.jumpCounter > self.jumpHeight +1): # subindo ou descendo
self.frame_jumping = 1
if (self.jumpHeight - 1 <= self.jumpCounter <= self.jumpHeight + 1): # quase parado
self.frame_jumping = 2
if (self.jumpCounter == 2*self.jumpHeight-1):
self.frame_jumping = 0
self.jumpCounter = -1
if clock() > nextFrame:
self.setSprite(self.spriteList[self.jump])
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.end_jump = self.setState()# MUDANÇA
self.jumping = self.setEndState() #MUDANÇA
self.jumpCounter += 2
nextFrame += 1*frame_step
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
tick(120)
return nextFrame
def getX(self):
return self.x
def getY(self):
return self.y
def setX(self,X):
self.x = X
moveSprite(self.curr_sprite,self.x,self.y,True)
def setY(self,Y):
self.y = Y
moveSprite(self.curr_sprite,self.x,self.y,True)
def isWalking(self):
return self.walking
def isCrouching(self):
return self.crouching
def isDancing(self):
return self.dancing
def isApunching(self):
return self.Apunching
def isBpunching(self):
return self.Bpunching
def isCpunching(self):
return self.Cpunching
def isDpunching(self):
return self.Dpunching
def isAkicking(self):
return self.Akicking
def isBkicking(self):
return self.Bkicking
def isCkicking(self):
return self.Ckicking
def isDkicking(self):
return self.Dkicking
def isAblocking(self):
return self.Ablocking
def isHit(self):
return self.hit
def killPlayer(self):
for i in range(0,len(self.spriteList)):
killSprite(self.spriteList[i])
def currentSprite(self):
return self.curr_sprite
def takeHit(self,by):
self.hit = True
self.hitName = by
def stopHit(self):
self.hit = False
self.hitName = ""
def setState(self):
# moves
self.walking = False
self.dancing = False
self.jumping = False
self.crouching = False
# punches
self.Apunching = False
self.Bpunching = False
self.Cpunching = False
self.Dpunching = False
# kicks
self.Akicking = False
self.Bkicking = False
self.Ckicking = False
self.Dkicking = False
# punch hits
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.Hhitting = False
# blocks
self.Ablocking = False
self.Bblocking = False
# special move
self.specialMove = False
# fatality
self.fatality = False
# actual states
return True
def setEndState(self):
self.end_jump = True
self.end_Apunch = True
self.end_Bpunch = True
self.end_Cpunch = True
self.end_Dpunch = True
self.end_Akick = True
self.end_Bkick = True
self.end_Ckick = True
self.end_Dkick = True
self.end_special = True
return False
def setSprite(self,sprite):
for i in range(0,len(self.spriteList)):
if (not sprite == self.spriteList[i]):
hideSprite(self.spriteList[i])
showSprite(sprite)
def posFighter(self):
for i in range(0,len(self.spriteList)):
moveSprite(self.spriteList[i], self.x, self.y, True)
| 46.923174
| 197
| 0.513461
|
8bd9055b193ece6499cc3b6dd2810792a2afb781
| 42,621
|
py
|
Python
|
grr/worker/worker_test.py
|
nickamon/grr
|
ad1936c74728de00db90f6fafa47892b54cfc92d
|
[
"Apache-2.0"
] | null | null | null |
grr/worker/worker_test.py
|
nickamon/grr
|
ad1936c74728de00db90f6fafa47892b54cfc92d
|
[
"Apache-2.0"
] | 1
|
2018-05-08T21:15:51.000Z
|
2018-05-08T21:15:51.000Z
|
grr/worker/worker_test.py
|
nickamon/grr
|
ad1936c74728de00db90f6fafa47892b54cfc92d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Tests for the worker."""
import threading
import time
import mock
from grr import config
from grr.lib import flags
from grr.lib import queues
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import protodict as rdf_protodict
from grr.server import aff4
from grr.server import data_store
from grr.server import flow
from grr.server import flow_runner
from grr.server import front_end
from grr.server import queue_manager
from grr.server import worker
from grr.server.flows.general import administrative
from grr.server.hunts import implementation
from grr.server.hunts import standard
from grr.test_lib import action_mocks
from grr.test_lib import client_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
# A global collector for test results
RESULTS = []
class WorkerSendingTestFlow(flow.GRRFlow):
"""Tests that sent messages are correctly collected."""
@flow.StateHandler()
def Start(self):
for i in range(10):
self.CallClient(
client_test_lib.Test,
rdf_protodict.DataBlob(string="test%s" % i),
data=str(i),
next_state="Incoming")
@flow.StateHandler(auth_required=False)
def Incoming(self, responses):
# We push the result into a global array so we can examine it
# better.
for response in responses:
RESULTS.append(response.string)
class WorkerSendingTestFlow2(WorkerSendingTestFlow):
"""Only send a single request."""
@flow.StateHandler()
def Start(self):
i = 1
self.CallClient(
client_test_lib.Test,
rdf_protodict.DataBlob(string="test%s" % i),
data=str(i),
next_state="Incoming")
class WorkerSendingWKTestFlow(flow.WellKnownFlow):
well_known_session_id = rdfvalue.SessionID(
flow_name="WorkerSendingWKTestFlow")
def ProcessMessage(self, message):
RESULTS.append(message)
class RaisingTestFlow(WorkerSendingTestFlow):
@flow.StateHandler(auth_required=False)
def Incoming(self, responses):
raise AttributeError("Some Error.")
class WorkerStuckableHunt(implementation.GRRHunt):
# Semaphore used by test code to wait until the hunt is being processed.
WAIT_FOR_HUNT_SEMAPHORE = threading.Semaphore(0)
# Semaphore used by the hunt to wait until the external test code does its
# thing.
WAIT_FOR_TEST_SEMAPHORE = threading.Semaphore(0)
@classmethod
def Reset(cls):
cls.WAIT_FOR_HUNT_SEMAPHORE = threading.Semaphore(0)
cls.WAIT_FOR_TEST_SEMAPHORE = threading.Semaphore(0)
@classmethod
def WaitUntilWorkerStartsProcessing(cls):
cls.WAIT_FOR_HUNT_SEMAPHORE.acquire()
@classmethod
def LetWorkerFinishProcessing(cls):
cls.WAIT_FOR_TEST_SEMAPHORE.release()
@flow.StateHandler()
def RunClient(self, responses):
cls = WorkerStuckableHunt
# After starting this hunt, the test should call
# WaitUntilWorkerStartsProcessing() which will block until
# WAIT_FOR_HUNT_SEMAPHORE is released. This way the test
# knows exactly when the hunt has actually started being
# executed.
cls.WAIT_FOR_HUNT_SEMAPHORE.release()
# We block here until WAIT_FOR_TEST_SEMAPHORE is released. It's released
# when the test calls LetWorkerFinishProcessing(). This way the test
# can control precisely when flow finishes.
cls.WAIT_FOR_TEST_SEMAPHORE.acquire()
class WorkerStuckableTestFlow(flow.GRRFlow):
"""Flow that can be paused with sempahores when processed by the worker."""
# Semaphore used by test code to wait until the flow is being processed.
WAIT_FOR_FLOW_SEMAPHORE = threading.Semaphore(0)
# Semaphore used by the flow to wait until the external test code does its
# thing.
WAIT_FOR_TEST_SEMAPHORE = threading.Semaphore(0)
# Semaphore used by the flow to wait until it has to heartbeat.
WAIT_FOR_TEST_PERMISSION_TO_HEARTBEAT_SEMAPHORE = threading.Semaphore(0)
# Semaphore used by the test to wait until the flow heartbeats.
WAIT_FOR_FLOW_HEARTBEAT_SEMAPHORE = threading.Semaphore(0)
HEARTBEAT = False
@classmethod
def Reset(cls, heartbeat=False):
cls.WAIT_FOR_FLOW_SEMAPHORE = threading.Semaphore(0)
cls.WAIT_FOR_TEST_SEMAPHORE = threading.Semaphore(0)
cls.HEARTBEAT = heartbeat
@classmethod
def WaitUntilWorkerStartsProcessing(cls):
cls.WAIT_FOR_FLOW_SEMAPHORE.acquire()
@classmethod
def LetFlowHeartBeat(cls):
if not cls.HEARTBEAT:
raise RuntimeError("LetFlowHeartBeat called, but heartbeat "
"not enabled.")
cls.WAIT_FOR_TEST_PERMISSION_TO_HEARTBEAT_SEMAPHORE.release()
@classmethod
def WaitForFlowHeartBeat(cls, last_heartbeat=False):
"""Called by the test to wait until the flow heartbeats.
Args:
last_heartbeat: If True, the flow won't heartbeat anymore. Consequently,
the test won't be supposed to call LetFlowHeartBeat and
WaitForFlowHeartBeat methods.
Raises:
RuntimeError: if heartbeat is not enabled. Heartbeat can be enabled via
Reset() method.
"""
if not cls.HEARTBEAT:
raise RuntimeError("WaitForFlowHeartBeat called, but heartbeat "
"not enabled.")
if last_heartbeat:
cls.HEARTBEAT = False
cls.WAIT_FOR_FLOW_HEARTBEAT_SEMAPHORE.acquire()
@classmethod
def LetWorkerFinishProcessing(cls):
cls.WAIT_FOR_TEST_SEMAPHORE.release()
@flow.StateHandler()
def Start(self):
cls = WorkerStuckableTestFlow
# After starting this flow, the test should call
# WaitUntilWorkerStartsProcessing() which will block until
# WAIT_FOR_FLOW_SEMAPHORE is released. This way the test
# knows exactly when the flow has actually started being
# executed.
cls.WAIT_FOR_FLOW_SEMAPHORE.release()
while cls.HEARTBEAT:
# The test is expected to call LetFlowHeartBeat(). We block here
# until it's called. This way the test can control
# the way the flow heartbeats. For example, it can mock time.time()
# differently for every call.
cls.WAIT_FOR_TEST_PERMISSION_TO_HEARTBEAT_SEMAPHORE.acquire()
self.HeartBeat()
# The test is expected to call WaitForFlowHeartBeat() and block
# until we release WAIT_FOR_FLOW_HEARTBEAT_SEMAPHORE. This way
# the test knows exactly when the heartbeat was done.
cls.WAIT_FOR_FLOW_HEARTBEAT_SEMAPHORE.release()
# We block here until WAIT_FOR_TEST_SEMAPHORE is released. It's released
# when the test calls LetWorkerFinishProcessing(). This way the test
# can control precisely when flow finishes.
cls.WAIT_FOR_TEST_SEMAPHORE.acquire()
class ShardedQueueManager(queue_manager.QueueManager):
"""Operate on all shards at once.
These tests call the worker's RunOnce and expect to see all notifications.
This doesn't work when shards are enabled, since each worker is only looking
at its own shard. This class gives the worker visibility across all shards.
"""
def GetNotificationsByPriority(self, queue):
return self.GetNotificationsByPriorityForAllShards(queue)
def GetNotifications(self, queue):
return self.GetNotificationsForAllShards(queue)
class GrrWorkerTest(flow_test_lib.FlowTestsBaseclass):
"""Tests the GRR Worker."""
def setUp(self):
super(GrrWorkerTest, self).setUp()
self.client_id = test_lib.TEST_CLIENT_ID
WorkerStuckableTestFlow.Reset()
self.patch_get_notifications = mock.patch.object(
queue_manager, "QueueManager", ShardedQueueManager)
self.patch_get_notifications.start()
# Clear the results global
del RESULTS[:]
def tearDown(self):
super(GrrWorkerTest, self).tearDown()
self.patch_get_notifications.stop()
def SendResponse(self,
session_id,
data,
client_id=None,
well_known=False,
request_id=None):
if not isinstance(data, rdfvalue.RDFValue):
data = rdf_protodict.DataBlob(string=data)
if well_known:
request_id, response_id = 0, 12345
else:
request_id, response_id = request_id or 1, 1
with queue_manager.QueueManager(token=self.token) as flow_manager:
flow_manager.QueueResponse(
rdf_flows.GrrMessage(
source=client_id,
session_id=session_id,
payload=data,
request_id=request_id,
response_id=response_id))
if not well_known:
# For normal flows we have to send a status as well.
flow_manager.QueueResponse(
rdf_flows.GrrMessage(
source=client_id,
session_id=session_id,
payload=rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.OK),
request_id=request_id,
response_id=response_id + 1,
type=rdf_flows.GrrMessage.Type.STATUS))
flow_manager.QueueNotification(
session_id=session_id, last_status=request_id)
timestamp = flow_manager.frozen_timestamp
return timestamp
def testProcessMessages(self):
"""Test processing of several inbound messages."""
# Create a couple of flows
flow_obj = self.FlowSetup("WorkerSendingTestFlow")
session_id_1 = flow_obj.session_id
flow_obj.Close()
flow_obj = self.FlowSetup("WorkerSendingTestFlow2")
session_id_2 = flow_obj.session_id
flow_obj.Close()
manager = queue_manager.QueueManager(token=self.token)
# Check that client queue has messages
tasks_on_client_queue = manager.Query(self.client_id.Queue(), 100)
# should have 10 requests from WorkerSendingTestFlow and 1 from
# SendingTestFlow2
self.assertEqual(len(tasks_on_client_queue), 11)
# Send each of the flows a repeated message
self.SendResponse(session_id_1, "Hello1")
self.SendResponse(session_id_2, "Hello2")
self.SendResponse(session_id_1, "Hello1")
self.SendResponse(session_id_2, "Hello2")
worker_obj = worker.GRRWorker(token=self.token)
# Process all messages
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
# Ensure both requests ran exactly once
RESULTS.sort()
self.assertEqual(2, len(RESULTS))
self.assertEqual("Hello1", RESULTS[0])
self.assertEqual("Hello2", RESULTS[1])
# Check that client queue is cleared - should have 2 less messages (since
# two were completed).
tasks_on_client_queue = manager.Query(self.client_id.Queue(), 100)
self.assertEqual(len(tasks_on_client_queue), 9)
# Ensure that processed requests are removed from state subject
outstanding_requests = list(
data_store.DB.ReadRequestsAndResponses(session_id_1))
self.assertEqual(len(outstanding_requests), 9)
for request, _ in outstanding_requests:
self.assertNotEqual(request.request.request_id, 0)
# This flow is still in state Incoming.
flow_obj = aff4.FACTORY.Open(session_id_1, token=self.token)
self.assertTrue(
flow_obj.context.state != rdf_flows.FlowContext.State.TERMINATED)
self.assertEqual(flow_obj.context.current_state, "Incoming")
# This flow should be done.
flow_obj = aff4.FACTORY.Open(session_id_2, token=self.token)
self.assertTrue(
flow_obj.context.state == rdf_flows.FlowContext.State.TERMINATED)
self.assertEqual(flow_obj.context.current_state, "End")
def testNoNotificationRescheduling(self):
"""Test that no notifications are rescheduled when a flow raises."""
with test_lib.FakeTime(10000):
flow_obj = self.FlowSetup("RaisingTestFlow")
session_id = flow_obj.session_id
flow_obj.Close()
# Send the flow some messages.
self.SendResponse(session_id, "Hello1", request_id=1)
self.SendResponse(session_id, "Hello2", request_id=2)
self.SendResponse(session_id, "Hello3", request_id=3)
worker_obj = worker.GRRWorker(token=self.token)
# Process all messages.
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
delay = flow_runner.FlowRunner.notification_retry_interval
with test_lib.FakeTime(10000 + 100 + delay):
manager = queue_manager.QueueManager(token=self.token)
self.assertFalse(manager.GetNotificationsForAllShards(session_id.Queue()))
def testNotificationReschedulingTTL(self):
"""Test that notifications are not rescheduled forever."""
with test_lib.FakeTime(10000):
worker_obj = worker.GRRWorker(token=self.token)
flow_obj = self.FlowSetup("RaisingTestFlow")
session_id = flow_obj.session_id
flow_obj.Close()
with queue_manager.QueueManager(token=self.token) as manager:
notification = rdf_flows.GrrNotification(
session_id=session_id, timestamp=time.time(), last_status=1)
with data_store.DB.GetMutationPool() as pool:
manager.NotifyQueue(notification, mutation_pool=pool)
notifications = manager.GetNotifications(queues.FLOWS)
# Check the notification is there.
notifications = [n for n in notifications if n.session_id == session_id]
self.assertEqual(len(notifications), 1)
delay = flow_runner.FlowRunner.notification_retry_interval
ttl = notification.ttl
for i in xrange(ttl - 1):
with test_lib.FakeTime(10000 + 100 + delay * (i + 1)):
# Process all messages.
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
notifications = manager.GetNotifications(queues.FLOWS)
# Check the notification is for the correct session_id.
notifications = [n for n in notifications if n.session_id == session_id]
self.assertEqual(len(notifications), 1)
with test_lib.FakeTime(10000 + 100 + delay * ttl):
# Process all messages.
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
notifications = manager.GetNotifications(queues.FLOWS)
self.assertEqual(len(notifications), 0)
def testNoKillNotificationsScheduledForHunts(self):
worker_obj = worker.GRRWorker(token=self.token)
initial_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(100)
try:
with test_lib.FakeTime(initial_time.AsSecondsFromEpoch()):
with implementation.GRRHunt.StartHunt(
hunt_name=WorkerStuckableHunt.__name__,
client_rate=0,
token=self.token) as hunt:
hunt.GetRunner().Start()
implementation.GRRHunt.StartClients(hunt.session_id, [self.client_id])
# Process all messages
while worker_obj.RunOnce():
pass
# Wait until worker thread starts processing the flow.
WorkerStuckableHunt.WaitUntilWorkerStartsProcessing()
# Assert that there are no stuck notifications in the worker's queue.
with queue_manager.QueueManager(token=self.token) as manager:
for queue in worker_obj.queues:
notifications = manager.GetNotificationsByPriority(queue)
self.assertFalse(manager.STUCK_PRIORITY in notifications)
finally:
# Release the semaphore so that worker thread unblocks and finishes
# processing the flow.
WorkerStuckableHunt.LetWorkerFinishProcessing()
worker_obj.thread_pool.Join()
def testKillNotificationsScheduledForFlows(self):
worker_obj = worker.GRRWorker(token=self.token)
initial_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(100)
try:
with test_lib.FakeTime(initial_time.AsSecondsFromEpoch()):
flow.GRRFlow.StartFlow(
flow_name=WorkerStuckableTestFlow.__name__,
client_id=self.client_id,
token=self.token,
sync=False)
# Process all messages
worker_obj.RunOnce()
# Wait until worker thread starts processing the flow.
WorkerStuckableTestFlow.WaitUntilWorkerStartsProcessing()
# Assert that there are no stuck notifications in the worker's
# queue.
with queue_manager.QueueManager(token=self.token) as manager:
for queue in worker_obj.queues:
notifications = manager.GetNotificationsByPriority(queue)
self.assertFalse(manager.STUCK_PRIORITY in notifications)
finally:
# Release the semaphore so that worker thread unblocks and finishes
# processing the flow.
WorkerStuckableTestFlow.LetWorkerFinishProcessing()
worker_obj.thread_pool.Join()
def testStuckFlowGetsTerminated(self):
worker_obj = worker.GRRWorker(token=self.token)
initial_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(100)
try:
with test_lib.FakeTime(initial_time.AsSecondsFromEpoch()):
session_id = flow.GRRFlow.StartFlow(
flow_name=WorkerStuckableTestFlow.__name__,
client_id=self.client_id,
token=self.token,
sync=False)
# Process all messages
while worker_obj.RunOnce():
pass
# Wait until worker thread starts processing the flow.
WorkerStuckableTestFlow.WaitUntilWorkerStartsProcessing()
# Set the time to max worker flow duration + 1 minute. The flow is
# currently blocked because of the way semaphores are set up.
# Worker should consider the flow to be stuck and terminate it.
stuck_flows_timeout = flow_runner.FlowRunner.stuck_flows_timeout
future_time = (
initial_time + rdfvalue.Duration("1m") + stuck_flows_timeout)
with test_lib.FakeTime(future_time.AsSecondsFromEpoch()):
worker_obj.RunOnce()
finally:
# Release the semaphore so that worker thread unblocks and finishes
# processing the flow.
WorkerStuckableTestFlow.LetWorkerFinishProcessing()
worker_obj.thread_pool.Join()
killed_flow = aff4.FACTORY.Open(session_id, token=self.token)
self.assertEqual(killed_flow.context.state,
rdf_flows.FlowContext.State.ERROR)
self.assertEqual(killed_flow.context.status,
"Terminated by user test. Reason: Stuck in the worker")
def testStuckNotificationGetsDeletedAfterTheFlowIsTerminated(self):
worker_obj = worker.GRRWorker(token=self.token)
initial_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(100)
stuck_flows_timeout = flow_runner.FlowRunner.stuck_flows_timeout
try:
with test_lib.FakeTime(initial_time.AsSecondsFromEpoch()):
session_id = flow.GRRFlow.StartFlow(
flow_name=WorkerStuckableTestFlow.__name__,
client_id=self.client_id,
token=self.token,
sync=False)
# Process all messages
worker_obj.RunOnce()
# Wait until worker thread starts processing the flow.
WorkerStuckableTestFlow.WaitUntilWorkerStartsProcessing()
# Set the time to max worker flow duration + 1 minute. The flow is
# currently blocked because of the way how semaphores are set up.
# Worker should consider the flow to be stuck and terminate it.
future_time = (
initial_time + rdfvalue.Duration("1m") + stuck_flows_timeout)
with test_lib.FakeTime(future_time.AsSecondsFromEpoch()):
worker_obj.RunOnce()
killed_flow = aff4.FACTORY.Open(session_id, token=self.token)
self.assertEqual(killed_flow.context.state,
rdf_flows.FlowContext.State.ERROR)
self.assertEqual(killed_flow.context.status,
"Terminated by user test. Reason: Stuck in the worker")
# Check that stuck notification has been removed.
qm = queue_manager.QueueManager(token=self.token)
notifications_by_priority = qm.GetNotificationsByPriority(queues.FLOWS)
self.assertTrue(qm.STUCK_PRIORITY not in notifications_by_priority)
finally:
# Release the semaphore so that worker thread unblocks and finishes
# processing the flow.
WorkerStuckableTestFlow.LetWorkerFinishProcessing()
worker_obj.thread_pool.Join()
def testHeartBeatingFlowIsNotTreatedAsStuck(self):
worker_obj = worker.GRRWorker(token=self.token)
initial_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(100)
stuck_flows_timeout = flow_runner.FlowRunner.stuck_flows_timeout
lease_timeout = rdfvalue.Duration(worker.GRRWorker.flow_lease_time)
WorkerStuckableTestFlow.Reset(heartbeat=True)
try:
with test_lib.FakeTime(initial_time.AsSecondsFromEpoch()):
session_id = flow.GRRFlow.StartFlow(
flow_name=WorkerStuckableTestFlow.__name__,
client_id=self.client_id,
token=self.token,
sync=False)
# Process all messages
worker_obj.RunOnce()
# Wait until worker thread starts processing the flow.
WorkerStuckableTestFlow.WaitUntilWorkerStartsProcessing()
# Increase the time in steps, using LetFlowHeartBeat/WaitForFlowHeartBeat
# to control the flow execution that happens in the parallel thread.
current_time = rdfvalue.RDFDatetime(initial_time)
future_time = initial_time + stuck_flows_timeout + rdfvalue.Duration("1m")
while current_time <= future_time:
current_time += lease_timeout - rdfvalue.Duration("1s")
with test_lib.FakeTime(current_time.AsSecondsFromEpoch()):
checked_flow = aff4.FACTORY.Open(session_id, token=self.token)
WorkerStuckableTestFlow.LetFlowHeartBeat()
WorkerStuckableTestFlow.WaitForFlowHeartBeat(
last_heartbeat=current_time > future_time)
# Now current_time is > future_time, where future_time is the time
# when stuck flow should have been killed. Calling RunOnce() here,
# because if the flow is going to be killed, it will be killed
# during worker.RunOnce() call.
with test_lib.FakeTime(current_time.AsSecondsFromEpoch()):
worker_obj.RunOnce()
# Check that the flow wasn't killed forecfully.
checked_flow = aff4.FACTORY.Open(session_id, token=self.token)
self.assertEqual(checked_flow.context.state,
rdf_flows.FlowContext.State.RUNNING)
finally:
# Release the semaphore so that worker thread unblocks and finishes
# processing the flow.
with test_lib.FakeTime(current_time.AsSecondsFromEpoch()):
WorkerStuckableTestFlow.LetWorkerFinishProcessing()
worker_obj.thread_pool.Join()
# Check that the flow has finished normally.
checked_flow = aff4.FACTORY.Open(session_id, token=self.token)
self.assertEqual(checked_flow.context.state,
rdf_flows.FlowContext.State.TERMINATED)
def testNonStuckFlowDoesNotGetTerminated(self):
worker_obj = worker.GRRWorker(token=self.token)
initial_time = rdfvalue.RDFDatetime().FromSecondsFromEpoch(100)
stuck_flows_timeout = flow_runner.FlowRunner.stuck_flows_timeout
with test_lib.FakeTime(initial_time.AsSecondsFromEpoch()):
session_id = flow.GRRFlow.StartFlow(
flow_name="WorkerSendingTestFlow",
client_id=self.client_id,
token=self.token,
sync=False)
# Process all messages
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
flow_obj = aff4.FACTORY.Open(session_id, token=self.token)
self.assertEqual(flow_obj.context.state,
rdf_flows.FlowContext.State.RUNNING)
# Set the time to max worker flow duration + 1 minute. If the 'kill'
# notification isn't deleted we should get it now.
future_time = initial_time + rdfvalue.Duration("1m") + stuck_flows_timeout
with test_lib.FakeTime(future_time.AsSecondsFromEpoch()):
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
flow_obj = aff4.FACTORY.Open(session_id, token=self.token)
# Check that flow didn't get terminated due to a logic bug.
self.assertEqual(flow_obj.context.state,
rdf_flows.FlowContext.State.RUNNING)
def testProcessMessagesWellKnown(self):
worker_obj = worker.GRRWorker(token=self.token)
# Send a message to a WellKnownFlow - ClientStatsAuto.
session_id = administrative.GetClientStatsAuto.well_known_session_id
client_id = rdf_client.ClientURN("C.1100110011001100")
self.SendResponse(
session_id,
data=rdf_client.ClientStats(RSS_size=1234),
client_id=client_id,
well_known=True)
# Process all messages
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token)
stats = client.Get(client.Schema.STATS)
self.assertEqual(stats.RSS_size, 1234)
# Make sure no notifications have been sent.
user = aff4.FACTORY.Open(
"aff4:/users/%s" % self.token.username, token=self.token)
notifications = user.Get(user.Schema.PENDING_NOTIFICATIONS)
self.assertIsNone(notifications)
def testWellKnownFlowResponsesAreProcessedOnlyOnce(self):
worker_obj = worker.GRRWorker(token=self.token)
# Send a message to a WellKnownFlow - ClientStatsAuto.
client_id = rdf_client.ClientURN("C.1100110011001100")
self.SendResponse(
rdfvalue.SessionID(queue=queues.STATS, flow_name="Stats"),
data=rdf_client.ClientStats(RSS_size=1234),
client_id=client_id,
well_known=True)
# Process all messages
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token)
stats = client.Get(client.Schema.STATS)
self.assertEqual(stats.RSS_size, 1234)
aff4.FACTORY.Delete(client_id.Add("stats"), token=self.token)
# Process all messages once again - there should be no actual processing
# done, as all the responses were processed last time.
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
# Check that stats haven't changed as no new responses were processed.
client = aff4.FACTORY.Open(client_id.Add("stats"), token=self.token)
self.assertIsNone(client.Get(client.Schema.STATS))
def CheckNotificationsDisappear(self, session_id):
worker_obj = worker.GRRWorker(token=self.token)
manager = queue_manager.QueueManager(token=self.token)
notification = rdf_flows.GrrNotification(session_id=session_id)
with data_store.DB.GetMutationPool() as pool:
manager.NotifyQueue(notification, mutation_pool=pool)
notifications = manager.GetNotificationsByPriority(queues.FLOWS).get(
notification.priority, [])
# Check the notification is there. With multiple worker queue shards we can
# get other notifications such as for audit event listeners, so we need to
# filter out ours.
notifications = [x for x in notifications if x.session_id == session_id]
self.assertEqual(len(notifications), 1)
# Process all messages
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
notifications = manager.GetNotificationsByPriority(queues.FLOWS).get(
notification.priority, [])
notifications = [x for x in notifications if x.session_id == session_id]
# Check the notification is now gone.
self.assertEqual(len(notifications), 0)
def testWorkerDeletesNotificationsForBrokenObjects(self):
# Test notifications for objects that don't exist.
session_id = rdfvalue.SessionID(queue=queues.FLOWS, flow_name="123456")
self.CheckNotificationsDisappear(session_id)
# Now check objects that are actually broken.
# Start a new flow.
session_id = flow.GRRFlow.StartFlow(
flow_name="WorkerSendingTestFlow",
client_id=self.client_id,
token=self.token)
# Overwrite the type of the object such that opening it will now fail.
data_store.DB.Set(session_id, "aff4:type", "DeprecatedClass")
# Starting a new flow schedules notifications for the worker already but
# this test actually checks that there are none. Thus, we have to delete
# them or the test fails.
data_store.DB.DeleteSubject(queues.FLOWS)
# Check it really does.
with self.assertRaises(aff4.InstantiationError):
aff4.FACTORY.Open(session_id, token=self.token)
self.CheckNotificationsDisappear(session_id)
def testNotificationRacesAreResolved(self):
# We need a random flow object for this test.
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id,
flow_name="WorkerSendingTestFlow",
token=self.token)
worker_obj = worker.GRRWorker(token=self.token)
manager = queue_manager.QueueManager(token=self.token)
manager.DeleteNotification(session_id)
manager.Flush()
# We simulate a race condition here - the notification for request #1 is
# there but the actual request #1 is not. The worker should pick up the
# notification, notice that the request #1 is not there yet and reschedule
# the notification.
notification = rdf_flows.GrrNotification(
session_id=session_id, last_status=1)
with data_store.DB.GetMutationPool() as pool:
manager.NotifyQueue(notification, mutation_pool=pool)
notifications = manager.GetNotifications(queues.FLOWS)
# Check the notification is there.
notifications = [n for n in notifications if n.session_id == session_id]
self.assertEqual(len(notifications), 1)
# Process all messages
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
delay = flow_runner.FlowRunner.notification_retry_interval
with test_lib.FakeTime(time.time() + 10 + delay):
requeued_notifications = manager.GetNotifications(queues.FLOWS)
# Check that there is a new notification.
notifications = [n for n in notifications if n.session_id == session_id]
self.assertEqual(len(requeued_notifications), 1)
self.assertEqual(requeued_notifications[0].first_queued,
notifications[0].first_queued)
self.assertNotEqual(requeued_notifications[0].timestamp,
notifications[0].timestamp)
def testNoValidStatusRaceIsResolved(self):
# This tests for the regression of a long standing race condition we saw
# where notifications would trigger the reading of another request that
# arrives later but wasn't completely written to the database yet.
# Timestamp based notification handling should eliminate this bug.
# We need a random flow object for this test.
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id,
flow_name="WorkerSendingTestFlow",
token=self.token)
worker_obj = worker.GRRWorker(token=self.token)
manager = queue_manager.QueueManager(token=self.token)
manager.DeleteNotification(session_id)
manager.Flush()
# We have a first request that is complete (request_id 1, response_id 1).
self.SendResponse(session_id, "Response 1")
# However, we also have request #2 already coming in. The race is that
# the queue manager might write the status notification to
# session_id/state as "status:00000002" but not the status response
# itself yet under session_id/state/request:00000002
request_id = 2
response_id = 1
flow_manager = queue_manager.QueueManager(token=self.token)
flow_manager.FreezeTimestamp()
flow_manager.QueueResponse(
rdf_flows.GrrMessage(
source=self.client_id,
session_id=session_id,
payload=rdf_protodict.DataBlob(string="Response 2"),
request_id=request_id,
response_id=response_id))
status = rdf_flows.GrrMessage(
source=self.client_id,
session_id=session_id,
payload=rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.OK),
request_id=request_id,
response_id=response_id + 1,
type=rdf_flows.GrrMessage.Type.STATUS)
# Now we write half the status information.
data_store.DB.StoreRequestsAndResponses(new_responses=[(status, None)])
# We make the race even a bit harder by saying the new notification gets
# written right before the old one gets deleted. If we are not careful here,
# we delete the new notification as well and the flow becomes stuck.
def WriteNotification(self, arg_session_id, start=None, end=None):
if arg_session_id == session_id:
flow_manager.QueueNotification(session_id=arg_session_id)
flow_manager.Flush()
self.DeleteNotification.old_target(
self, arg_session_id, start=start, end=end)
with utils.Stubber(queue_manager.QueueManager, "DeleteNotification",
WriteNotification):
# This should process request 1 but not touch request 2.
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
flow_obj = aff4.FACTORY.Open(session_id, token=self.token)
self.assertFalse(flow_obj.context.backtrace)
self.assertNotEqual(flow_obj.context.state,
rdf_flows.FlowContext.State.ERROR)
request_data = data_store.DB.ReadResponsesForRequestId(session_id, 2)
request_data.sort(key=lambda msg: msg.response_id)
self.assertEqual(len(request_data), 2)
# Make sure the status and the original request are still there.
self.assertEqual(request_data[0].args_rdf_name, "DataBlob")
self.assertEqual(request_data[1].args_rdf_name, "GrrStatus")
# But there is nothing for request 1.
request_data = data_store.DB.ReadResponsesForRequestId(session_id, 1)
self.assertEqual(request_data, [])
# The notification for request 2 should have survived.
with queue_manager.QueueManager(token=self.token) as manager:
notifications = manager.GetNotifications(queues.FLOWS)
self.assertEqual(len(notifications), 1)
notification = notifications[0]
self.assertEqual(notification.session_id, session_id)
self.assertEqual(notification.timestamp, flow_manager.frozen_timestamp)
self.assertEqual(RESULTS, ["Response 1"])
# The last missing piece of request 2 is the actual status message.
flow_manager.QueueResponse(status)
flow_manager.Flush()
# Now make sure request 2 runs as expected.
worker_obj.RunOnce()
worker_obj.thread_pool.Join()
self.assertEqual(RESULTS, ["Response 1", "Response 2"])
def testUniformTimestamps(self):
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id,
flow_name="WorkerSendingTestFlow",
token=self.token)
# Convert to int to make test output nicer in case of failure.
frozen_timestamp = int(self.SendResponse(session_id, "Hey"))
request_id = 1
messages = data_store.DB.ReadResponsesForRequestId(session_id, request_id)
self.assertEqual(len(messages), 2)
self.assertItemsEqual([m.args_rdf_name for m in messages],
["DataBlob", "GrrStatus"])
for m in messages:
self.assertEqual(m.timestamp, frozen_timestamp)
def testEqualTimestampNotifications(self):
frontend_server = front_end.FrontEndServer(
certificate=config.CONFIG["Frontend.certificate"],
private_key=config.CONFIG["PrivateKeys.server_key"],
message_expiry_time=100,
threadpool_prefix="notification-test")
# This schedules 10 requests.
session_id = flow.GRRFlow.StartFlow(
client_id=self.client_id,
flow_name="WorkerSendingTestFlow",
token=self.token)
# We pretend that the client processed all the 10 requests at once and
# sends the replies in a single http poll.
messages = [
rdf_flows.GrrMessage(
request_id=i,
response_id=1,
session_id=session_id,
payload=rdf_protodict.DataBlob(string="test%s" % i),
generate_task_id=True) for i in range(1, 11)
]
status = rdf_flows.GrrStatus(status=rdf_flows.GrrStatus.ReturnedStatus.OK)
statuses = [
rdf_flows.GrrMessage(
request_id=i,
response_id=2,
session_id=session_id,
payload=status,
type=rdf_flows.GrrMessage.Type.STATUS,
generate_task_id=True) for i in range(1, 11)
]
frontend_server.ReceiveMessages(self.client_id, messages + statuses)
with queue_manager.QueueManager(token=self.token) as q:
all_notifications = q.GetNotificationsByPriorityForAllShards(
rdfvalue.RDFURN("aff4:/F"))
medium_priority = rdf_flows.GrrNotification.Priority.MEDIUM_PRIORITY
medium_notifications = all_notifications[medium_priority]
my_notifications = [
n for n in medium_notifications if n.session_id == session_id
]
# There must not be more than one notification.
self.assertEqual(len(my_notifications), 1)
notification = my_notifications[0]
self.assertEqual(notification.first_queued, notification.timestamp)
self.assertEqual(notification.last_status, 10)
def testCPULimitForFlows(self):
"""This tests that the client actions are limited properly."""
result = {}
client_mock = action_mocks.CPULimitClientMock(result)
client_mock = flow_test_lib.MockClient(
self.client_id, client_mock, token=self.token)
client_mock.EnableResourceUsage(
user_cpu_usage=[10], system_cpu_usage=[10], network_usage=[1000])
worker_obj = worker.GRRWorker(token=self.token)
flow.GRRFlow.StartFlow(
client_id=self.client_id,
flow_name=flow_test_lib.CPULimitFlow.__name__,
cpu_limit=1000,
network_bytes_limit=10000,
token=self.token)
self._Process([client_mock], worker_obj)
self.assertEqual(result["cpulimit"], [1000, 980, 960])
self.assertEqual(result["networklimit"], [10000, 9000, 8000])
return result
def _Process(self, client_mocks, worker_obj):
while True:
client_msgs_processed = 0
for client_mock in client_mocks:
client_msgs_processed += client_mock.Next()
worker_msgs_processed = worker_obj.RunOnce()
worker_obj.thread_pool.Join()
if not client_msgs_processed and not worker_msgs_processed:
break
def testCPULimitForHunts(self):
worker_obj = worker.GRRWorker(token=self.token)
client_ids = ["C.%016X" % i for i in xrange(10, 20)]
result = {}
client_mocks = []
for client_id in client_ids:
client_mock = action_mocks.CPULimitClientMock(result)
client_mock = flow_test_lib.MockClient(
rdf_client.ClientURN(client_id), client_mock, token=self.token)
client_mock.EnableResourceUsage(
user_cpu_usage=[10], system_cpu_usage=[10], network_usage=[1000])
client_mocks.append(client_mock)
flow_runner_args = rdf_flows.FlowRunnerArgs(
flow_name=flow_test_lib.CPULimitFlow.__name__)
with implementation.GRRHunt.StartHunt(
hunt_name=standard.GenericHunt.__name__,
flow_runner_args=flow_runner_args,
cpu_limit=5000,
per_client_cpu_limit=10000,
network_bytes_limit=1000000,
client_rate=0,
token=self.token) as hunt:
hunt.GetRunner().Start()
implementation.GRRHunt.StartClients(hunt.session_id, client_ids[:1])
self._Process(client_mocks, worker_obj)
implementation.GRRHunt.StartClients(hunt.session_id, client_ids[1:2])
self._Process(client_mocks, worker_obj)
implementation.GRRHunt.StartClients(hunt.session_id, client_ids[2:3])
self._Process(client_mocks, worker_obj)
# The limiting factor here is the overall hunt limit of 5000 cpu
# seconds. Clients that finish should decrease the remaining quota
# and the following clients should get the reduced quota.
self.assertEqual(result["cpulimit"], [
5000.0, 4980.0, 4960.0, 4940.0, 4920.0, 4900.0, 4880.0, 4860.0, 4840.0
])
self.assertEqual(result["networklimit"], [
1000000L, 999000L, 998000L, 997000L, 996000L, 995000L, 994000L, 993000L,
992000L
])
result.clear()
with implementation.GRRHunt.StartHunt(
hunt_name=standard.GenericHunt.__name__,
flow_runner_args=flow_runner_args,
per_client_cpu_limit=3000,
per_client_network_limit_bytes=3000000,
client_rate=0,
token=self.token) as hunt:
hunt.GetRunner().Start()
implementation.GRRHunt.StartClients(hunt.session_id, client_ids[:1])
self._Process(client_mocks, worker_obj)
implementation.GRRHunt.StartClients(hunt.session_id, client_ids[1:2])
self._Process(client_mocks, worker_obj)
implementation.GRRHunt.StartClients(hunt.session_id, client_ids[2:3])
self._Process(client_mocks, worker_obj)
# This time, the per client limit is 3000s / 3000000 bytes. Every
# client should get the same limit.
self.assertEqual(result["cpulimit"], [
3000.0, 2980.0, 2960.0, 3000.0, 2980.0, 2960.0, 3000.0, 2980.0, 2960.0
])
self.assertEqual(result["networklimit"], [
3000000, 2999000, 2998000, 3000000, 2999000, 2998000, 3000000, 2999000,
2998000
])
result.clear()
for client_mock in client_mocks:
client_mock.EnableResourceUsage(
user_cpu_usage=[500], system_cpu_usage=[500], network_usage=[1000000])
with implementation.GRRHunt.StartHunt(
hunt_name=standard.GenericHunt.__name__,
flow_runner_args=flow_runner_args,
per_client_cpu_limit=3000,
cpu_limit=5000,
per_client_network_limit_bytes=3000000,
network_bytes_limit=5000000,
client_rate=0,
token=self.token) as hunt:
hunt.GetRunner().Start()
implementation.GRRHunt.StartClients(hunt.session_id, client_ids[:1])
self._Process(client_mocks, worker_obj)
implementation.GRRHunt.StartClients(hunt.session_id, client_ids[1:2])
self._Process(client_mocks, worker_obj)
implementation.GRRHunt.StartClients(hunt.session_id, client_ids[2:3])
self._Process(client_mocks, worker_obj)
# The first client gets the full per client limit of 3000s, and
# uses all of it. The hunt has a limit of just 5000 total so the
# second client gets started with a limit of 2000. It can only run
# two of three states, the last client will not be started at all
# due to out of quota.
self.assertEqual(result["cpulimit"],
[3000.0, 2000.0, 1000.0, 2000.0, 1000.0])
self.assertEqual(result["networklimit"],
[3000000, 2000000, 1000000, 2000000, 1000000])
errors = list(hunt.GetClientsErrors())
self.assertEqual(len(errors), 2)
# Client side out of cpu.
self.assertIn("CPU limit exceeded", errors[0].log_message)
# Server side out of cpu.
self.assertIn("Out of CPU quota", errors[1].backtrace)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 37.952805
| 80
| 0.711762
|
b056e659b103013658d1ef0ca4f62337b523b920
| 84,754
|
py
|
Python
|
AC_tools/obsolete/plotting_REDUNDANT.py
|
LukeFakes/AC_tools
|
31eb4786f1266d4b932eef238b249044e2e5d419
|
[
"MIT"
] | null | null | null |
AC_tools/obsolete/plotting_REDUNDANT.py
|
LukeFakes/AC_tools
|
31eb4786f1266d4b932eef238b249044e2e5d419
|
[
"MIT"
] | null | null | null |
AC_tools/obsolete/plotting_REDUNDANT.py
|
LukeFakes/AC_tools
|
31eb4786f1266d4b932eef238b249044e2e5d419
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Redundant plotting functions to be removed from AC_tools
Notes
-------
- These functions are a mixture fo those made redundant by shift from matplotlib's basemap to cartopy and those that are now longer in use/years old/not useful/not pythonic
"""
import sys
# - Required modules:
if sys.version_info.major < 3:
try:
from mpl_toolkits.basemap import Basemap
except ModuleNotFoundError:
print('WARNING: Module not found error raised for: mpl_toolkits.basemap')
import matplotlib.pyplot as plt
import matplotlib as mpl
from pylab import setp
import functools
import matplotlib
import cartopy.crs as ccrs
import cartopy.feature as cfeature
# Time
import time
import calendar
import datetime as datetime
from datetime import datetime as datetime_
# I/O/Admin...
import gc
# The below imports need to be updated,
# imports should be specific and in individual functions
# import tms modules with shared functions
from .. variables import *
from .. generic import *
from .. AC_time import *
from .. planeflight import *
from .. GEOSChem_nc import *
from .. GEOSChem_bpch import *
# math
from math import log10, floor
import numpy as np
import scipy
# colormaps - Additional maps from Eric Sofen
#from option_c import test_cm as cmc
#from option_d import test_cm as cmd
# -------------- Redundant Functions
# NOTE(s):
# (1) These are retained even though they are redundant for back compatibility
# (2) It is not advised to use these.
def plot_lons_lats_spatial_on_map(lons=None, lats=None, p_size=50, color='red',
title=None, f_size=15, dpi=320, fig=None, ax=None,
label=None,
return_axis=False, marker='o', alpha=1, ylabel=True,
xlabel=True,
window=False, axis_titles=True,
split_title_if_too_long=False,
resolution='c'):
"""
Plot a list of lons and lats spatially on a map using basemap
Parameters
-------
p_size (int): size of plot location point (lon, lat)
lons, lats (list): list of locations (in decimal londitude and latitude )
color (str): color of points on map for locations
title (str): title for plot
f_size (float): fontsize
dpi (int): resolution of figure (dots per sq inch)
return_axis (boaol): return the basemap axis instance
marker (str): marker style
Notes
-----
- matplotlib's basemap is now redundant! use cartopy
"""
import matplotlib.pyplot as plt
# --- Setup plot
if isinstance(fig, type(None)):
fig = plt.figure(dpi=dpi, facecolor='w', edgecolor='k')
if isinstance(ax, type(None)):
ax = fig.add_subplot(111)
# Plot up white background (on a blank basemap plot)
arr = np.zeros((72, 46))
plt, m = map_plot(arr.T, return_m=True, cmap=plt.cm.binary,
f_size=f_size, window=window,
fixcb=[0, 0], ax=ax, no_cb=True, resolution=resolution,
ylabel=ylabel, xlabel=xlabel, title=title, axis_titles=axis_titles,
split_title_if_too_long=split_title_if_too_long)
# Plot up all sites as a scatter plot of points on basmap
m.scatter(lons, lats, edgecolors=color, c=color, marker=marker,
s=p_size, alpha=alpha, label=label)
# Return axis?
if return_axis:
return m
def plot_map(arr, return_m=False, grid=False, centre=False, cmap=None, no_cb=False,
cb=None, rotatecbunits='horizontal', fixcb=None, nticks=10,
mask_invalids=False,
format='%.2f', adjust_window=0, f_size=20, alpha=1, log=False,
set_window=False, res=None, ax=None, case='default', units=None,
drawcountries=True, set_cb_ticks=True, title=None, lvls=None,
interval=15, resolution='c', shrink=0.4, window=False, everyother=1,
extend='neither', degrade_resolution=False, discrete_cmap=False,
lon_0=None, lon_1=None, lat_0=None, lat_1=None, norm=None,
cb_sigfig=2, fixcb_buffered=None, ylabel=True,
xlabel=True, wd=None, verbose=True, debug=False, tight_layout=False,
**Kwargs):
"""
Make a global/regional 2D (lon, lat) spatial plot
WARNING - This is an updated version of map_plot (incomplement/develop),
use map_plot instead!
Parameters
----------
adjust_window (int): amount of array entries to remove the edges of array
alhpa (float): transparency of plotter data
arr (np.array): input (2D) array
case (str or int): case for type of plot (vestigle: use log=True of False (default))
cmap (str): force colormap selection by providing name
centre (bool): use centre points of lon/lat grid points for mapping data surface
drawcountries (bool): add countries to basemap?
debug (bool): legacy debug option, replaced by python logging
degrade_resolution (bool): reduce resolution of underlay map detail
discrete_cmap (bool): use a discrete instead of conitunous colorbar map
everyother (int): use "everyother" axis tick (e.g. 3=use every 3rd)
f_size (float): fontsise
fixcb (np.array): minimium and maximum to fix colourbar
fixcb_buffered (array): minimium and maximum to fix colourbar, with buffer space
format (str): format string for colorbar formating
grid (bool): apply a grid over surface plot?
extend (str): colorbar format settings ( 'both', 'min', 'both' ... )
interval (int): x/y tick interval in degrees lat/lon (default=15)
lvls (list): manually provide levels for colorbar
log (bool): use a log scale for the plot and colorbar
no_cb (bool): include a coloubar?
norm (norm object): normalisation to use for colourbar and array
nticks (int): number of ticks on colorbar
mask_invalids (bool): mask invalid numbers (to allow saving to PDF)
res (str): GEOS-Chem output configuration resolution ( '4x5' etc... )
resolution (str): basemasp resolution settings ( 'c' = coarse, 'f' = fine ... )
rotatecbunits (str): orientation of colourbar units
shrink (bool): colorbar size settings ( fractional shrink )
set_window (bool): set the limits of the plotted data (lon_0, lon_1, lat_0, lat_1)
(for nested boundary conditions )
cb_sigfig (int): significant figure rounding to use for colourbar
set_cb_ticks (bool): mannually set colorbar ticks? (vestigle)
title (str): plot title (deafult is ==None, therefore no title)
tight_layout (bool): use use tight lyaout for figure
ylabel, xlabel (bool): label x/y axis?
units (str): units of given data for plot title
verbose (bool): legacy debug option, replaced by python logging
wd (str): Specify the wd to get the results from a run.
window (bool): use window plot settings (fewer axis labels/resolution of map)
Returns
-------
optionally returns basemap (return_m==True) and colorbar (no_cb!=True) object
Notes
-----
- Takes a numpy array and the resolution of the output. The plot extent is then set by this output.
"""
if isinstance(arr, type(None)):
logging.error("No data given to map_plot!")
raise AssertionError("No data given to map_plot")
elif not len(arr.shape) == 2:
logging.error("Input array should be 2D. Got shape {shape}"
.format(shape=arr.shape))
logging.info("map_plot called")
# Find out what resolution we are using if not specified
if isinstance(res, type(None)):
try: # Attempt to extract resolution from wd
logging.debug("No resolution specified, getting from wd")
res = get_gc_res(wd)
except TypeError: # Assume 4x5 resolution
# Assume 4x5 resolution
logging.warning('No resolution specified or found. Assuming 4x5')
logging.warning(
'Try specifying the wd or manualy specifying the res')
res = '4x5'
# ---------------- Cases for arrays ----------------
# Keep the names for easier reading
####################################################################################################
# Old
# if ( not isinstance( case, int ) ):
# case = {
# 'linear':3, 'default':3, 'IO': 1,'limit_to_1_2': 2, 'log': 4,
# }[case]
####################################################################################################
# New
if (isinstance(case, int)):
case = {
1: 'IO',
2: 'limit_to_1_2',
3: 'default',
4: 'log',
}[case]
if case == "IO":
IO = True
elif case == "limit_to_1_2":
limit_to_1_2 = True
elif case == "default":
default = True
elif case == "log":
log = True
else:
raise ValueError("Unknown case of {case}".format(case=case))
####################################################################################################
# Make sure the input data is usable and try to fix it if not.
(res_lat, res_lon) = get_dims4res(res, just2D=True)
expected_shape = (res_lon, res_lat)
if arr.shape == expected_shape:
pass
elif arr.shape == expected_shape:
arr = arr.T
logging.warning("Array was wrong shape and has been transposed!")
else:
logging.error("Array is the wrong shape. Should be {}. Got {}"
.format(str(expected_shape), arr.shape))
raise AssertionError("Incorrect array shape.")
# Add a invalid warning!
# Mask for percent arrays containing invalid values ( to allow PDF save )
if mask_invalids:
arr = np.ma.masked_invalid(arr)
# --- Window plot settings
if window:
interval = 30 # double interval size
degrade_resolution = True
if res == '0.5x0.666':
interval, adjust_window, resolution, shrink = 0.5, 3, 'f', 0.6
if degrade_resolution:
resolution = 'l'
nested_res = ['0.25x0.3125', '0.25x0.3125_CH', '0.25x0.3125_WA']
if res in nested_res:
centre = False
adjust_window = 6
# Get lons and lats
lon, lat, NIU = get_latlonalt4res(res, centre=centre, wd=wd)
if set_window:
# Convert lats and lons to GC lats and restrict lats, lons, and arr
if not isinstance(lat_0, type(None)):
gclat_0, gclat_1 = [get_gc_lat(i, res=res) for i in (lat_0, lat_1)]
lat = lat[gclat_0:gclat_1]
if not isinstance(lon_0, type(None)):
gclon_0, gclon_1 = [get_gc_lon(i, res=res) for i in (lon_0, lon_1)]
lon = lon[gclon_0:gclon_1]
# ---------------- Basemap setup ----------------
# Grid/Mesh values
x, y = np.meshgrid(lon, lat)
# Set existing axis to current if axis provided
if not isinstance(ax, type(None)):
plt.sca(ax)
# ---- Setup map ("m") using Basemap
m = get_basemap(lat=lat, lon=lon, resolution=resolution, res=res,
everyother=everyother, interval=interval, f_size=f_size,
ylabel=ylabel,
xlabel=xlabel, drawcountries=drawcountries)
# Process data to grid
x, y = np.meshgrid(*m(lon, lat))
# reduce plotted region for nested grids/subregion plots
if set_window:
plt.xlim(lon_0, lon_1)
plt.ylim(lat_0, lat_1)
else:
plt.xlim(lon[0+adjust_window], lon[-1-adjust_window])
plt.ylim(lat[0+adjust_window], lat[-1-adjust_window])
####################################################################################################
# -------- colorbar variables...
# Set cmap range I to limit poly, if not given cmap )
fixcb_ = fixcb
# New approach
if isinstance(fixcb_, type(None)) or isinstance(cmap, type(None)):
fixcb_ = np.array([(i.min(), i.max()) for i in [arr]][0])
if isinstance(cmap, type(None)):
# Set readable levels for cb, then use these to dictate cmap
if isinstance(lvls, type(None)):
lvls = get_human_readable_gradations(vmax=fixcb_[1], vmin=fixcb_[0],
nticks=nticks, cb_sigfig=cb_sigfig)
# Setup Colormap
cmap, fixcb_buffered = get_colormap(np.array(fixcb_),
nticks=nticks, fixcb=fixcb_,
buffer_cmap_upper=True)
# Update colormap with buffer
cmap = get_colormap(arr=np.array(
[fixcb_buffered[0], fixcb_buffered[1]]))
# Allow function to operate without fixcb_buffered provided
if isinstance(fixcb_buffered, type(None)):
fixcb_buffered = fixcb_
fixcb_ = fixcb_buffered
logging.info('colorbar variables: ' + str([fixcb_buffered, fixcb, fixcb_, lvls,
cmap, lvls]))
# Use a discrete colour map?
# if discrete_cmap:
# if isinstance( fixcb, type(None) ):
# cmap, norm = mk_discrete_cmap( vmin=arr.min(), vmax=arr.max(), \
# nticks=nticks, cmap=cmap )
# else:
# cmap, norm = mk_discrete_cmap( vmin=fixcb[0], vmax=fixcb[1], \
# nticks=nticks, cmap=cmap )
NEW_VERSION = False
if not NEW_VERSION:
####################################################################################################
# Old version is here
####################################################################################################
# -------------- Linear plots -------------------------------
# standard plot
linear_cases = ["default", 9]
if case == 9 or default:
debug_list = [fixcb_, arr.shape, [len(i) for i in (lon, lat)], ]
debug_list += [norm, cmap]
if debug:
print(debug_list)
poly = m.pcolor(lon, lat, arr, cmap=cmap, norm=norm, alpha=alpha,
vmin=fixcb_[0], vmax=fixcb_[1])
# ----------------- Log plots --------------------------------
if log: # l
poly = m.pcolor(lon, lat, arr, norm=LogNorm(vmin=fixcb_[0], vmax=fixcb_[1]),
cmap=cmap)
if no_cb:
pass
else:
# Get logarithmically spaced integers
lvls = np.logspace(
np.log10(fixcb[0]), np.log10(fixcb[1]), num=nticks)
# Normalise to Log space
norm = mpl.colors.LogNorm(vmin=fixcb_[0], vmax=fixcb_[1])
# Create colourbar instance
cb = plt.colorbar(poly, ax=m.ax, ticks=lvls, format=format, shrink=shrink,
alpha=alpha, norm=norm, extend='min')
logging.debug(np.ma.min(np.ma.log(arr)),
np.ma.max(np.ma.log(arr)), lvls)
# ---------------- Colorbars ----------------
if not no_cb:
if isinstance(cb, type(None)):
# if linear plot without fixcb set, then define here
ax = plt.gca()
# Create colourbar instance
cb = plt.colorbar(poly, ax=ax, shrink=shrink,
alpha=alpha, extend=extend)
# set ylabel tick properties
for t in cb.ax.get_yticklabels():
t.set_fontsize(f_size)
if not isinstance(units, type(None)):
cb.ax.set_ylabel(
units, rotation=rotatecbunits, labelpad=f_size)
# Special treatment for log colorbars
if log:
def round_to_n(x, n): return round(
x, -int(floor(log10(x))) + (n - 1))
tick_locs = [float('{:.2g}'.format(t)) for t in lvls]
# for asectics, round colorbar labels to sig figs given
for n, lvl in enumerate(lvls):
try:
lvls[n] = round_to_n(lvl, cb_sigfig)
except:
lvls[n] = lvl
else:
tick_locs = np.array(lvls).copy()
# Turn tick locations into floats
tick_locs = [float(_tick) for _tick in tick_locs]
cb.set_ticks(np.array(tick_locs))
# the format is not correctly being set... - do this manually instead
if not isinstance(format, type(None)):
lvls = [format % (i) for i in lvls]
cb.set_ticklabels(lvls) # , format=format )
####################################################################################################
if NEW_VERSION:
if case == 9 or default:
vmin = fixcb_[0]
vmax = fixcb_[1]
if log:
norm = LogNorm(vmin=fixcb_[0], vmax=fixcb_[1], cmap=cmap)
lvls = np.logspace(
np.log10(fixcb[0]), np.log10(fixcb[1]), num=nticks)
# Normalise to Log space
norm = mpl.colors.LogNorm(vmin=fixcb_[0], vmax=fixcb_[1])
extend = 'min'
ticks = lvls
ax = m.ax
def rount_to_n(x, n): return round(
x, -int(floor(log10(x))) + (n-1))
tick_locs = [float('{:.2g}'.format(t)) for t in lvls]
# for asectics, round colorbar labels to sig figs given
for n, lvl in enumerate(lvls):
try:
lvls[n] = round_to_n(lvl, cb_sigfig)
except:
lvls[n] = lvl
else:
tick_locs = np.array(lvls).copy()
# Turn tick locations into floats.
tick_locs = [float(_tick) for _tick in tick_locs]
# Create the colormap
poly = m.pcolor(lon, lat, arr, norm=norm, vmax=vmax, vmin=vmin,
cmap=cmap, alpha=alpha)
# Add the colorbar if needed.
if not no_cb:
# if linear plot without fixcb set, then define here
if isinstance(cb, type(None)):
ax = plt.gca()
cb = plt.colorbar(poly, ax=ax, ticks=lvls, format=format,
shrink=shrink, alpha=alpha, norm=norm, extend=extend)
# Set ylabel tick properties
cb.ax.tick_params(labelsize=f_size)
# Set ylabel units:
if not isinstance(units, type(None)):
cb.ax.set_ylabel(
units, rotation=rotatecbunits, labelpad=f_size)
cb.set_ticks(np.array(tick_locs))
cb.set_ticklabels(lvls)
# logging.info(tick_locs, lvls, [ type(i) for i in tick_locs, lvls ])
#logging.info(cb.get_clim(), title, format)
# Set number of ticks
# FIX NEEDED - this currently doesn't doesn't work for log plots
# if (not default) and (not no_cb) and ( not log ):
# if set_cb_ticks:
# tick_locator = ticker.MaxNLocator( nticks=nticks )
# cb.locator = tick_locator
# cb.update_ticks()
# Add grid lines to the plot?
plt.grid(grid)
# Add title to plot?
max_title_len = 30
if tight_layout == True:
plt.tight_layout()
if not isinstance(title, type(None)):
# Check if the title is too long and if not split it over lines
if len(title) > max_title_len:
print("tile takes up multiple lines. Splitting over lines now.")
import textwrap
title = "\n".join(textwrap.wrap(title, max_title_len))
print(title)
# Adjust the top of the plot by 0.05 for every line the title takes
# plt.subplots_adjust(top=1-0.05*(len(title)%max_title_len))
plt.title(title, fontsize=f_size*1.5)
# Setup list of return variables
return_l = [plt]
if not no_cb:
return_l += [cb]
if return_m:
return_l += [m]
return return_l
def sonde_plot(fig, ax, arr, n=0, title=None, subtitle=None,
f_size=10, color=None, err_bar=False, obs=True,
legend=False, units='nmol mol$^{-1}$', stddev=True,
c_l=['k', 'red', 'green', 'blue', 'purple'], xlimit=None,
loc='upper left', c_off=37, label=None, ancillary=True,
plt_txt_x=0.5, plt_txt_y=0.94,
ylabel=True, xlabel=True, hPa_labels=True,
debug=False,
# redundant arguments?
rasterized=True, tropics=False,
):
"""
Create plot of vertical data for sonde observations/model
Parameters
-------
hPa_labels (bool): include labels for hPa on axis?
plt_txt_x, plt_txt_y (float): ordinal locations for alt text
ancillary (bool): add ancillary labels etc. to plot
label (str): label to use for line / legend.
c_off (int): index to plot model levels too (e.g. 37 for trop. )
xlimit (float): value to limit x axis to (e.g. 100 ppbv)
c_l (list):colors to use (index by n )
uints (str): units to use for axis labels
obs (bool): overide plot settings with those for observations.
err_bar (bool): apply bar to show quartile range of plots
stddev (bool): as above (see err_bar)
color (str/color instance): color for plotted line
f_size (float): fontsise
tropics (bool): mask for tropics?
title (str): title for plot
subtitle (str): subtitle for plot (e.g. location)
n (int): number of plot within window plot figure
fig (figure instance): fig. to use
ax (axis instance): axis to use
ylabel, xlabel (bool): include axis labels for plot?
Returns
-------
Notes
-----
- should be re-written (age >3 years) to work with updated AC_tools
"""
# Get overall vars
alt, press = [gchemgrid(i) for i in ('c_km_geos5_r', 'c_hPa_geos5_r')]
# Cut off at Tropopause?
if obs:
arr = arr[:c_off, :]
else:
arr = arr[:c_off]
alt, press = [i[:c_off] for i in (alt, press)]
# if color not set, get color
if isinstance(color, type(None)):
color = c_l[n]
if obs:
# print [len(i) for i in arr[:,0], alt ]
ax.plot(arr[:, 0], alt, color=color, label=label)
# limit cb to top of troposphere
min, max = [(np.ma.min(i), np.ma.max(i)) for i in [arr[:, 0]]][0]
else:
ax.plot(arr, alt, label=label, color=color)
# limit cb to top of troposphere
min, max = [(np.ma.min(i), np.ma.max(i)) for i in [arr]][0]
# Add legend
if legend:
plt.legend(loc=loc, fontsize=f_size*.75)
# Sonde data = mean, 1st and 3rd Q
if err_bar:
if stddev:
ax.errorbar(arr[:, 0], alt, xerr=arr[:, 3], fmt='o', color=color,
elinewidth=0.75, markersize=2, alpha=0.75)
else:
ax.errorbar(arr[:, 0], alt, xerr=[arr[:, 1], arr[:, 2]], fmt='o',
color=color, elinewidth=.75, markersize=5, alpha=0.75)
# Beautify plot ( e.g. add hPa, units, etc... )
if ancillary:
if not isinstance(title, type(None)):
plt.title(title, fontsize=f_size, y=1.0)
if not isinstance(subtitle, type(None)):
plt.text(plt_txt_x, plt_txt_y,
subtitle, ha='center', va='center',
transform=ax.transAxes, fontsize=f_size*.65)
if ylabel:
plt.ylabel('Altitude (km)', fontsize=f_size*.75)
else:
plt.tick_params(axis='y', which='both', labelleft='off')
if xlabel:
plt.xlabel(units, fontsize=f_size*.75)
else:
plt.tick_params(axis='x', which='both', labelbottom='off')
if xlimit == None:
# plt.xlim( min-(min*0.02), max+(max*0.02) )
plt.xlim(0.1, max+(max*0.50))
else:
plt.xlim(0.1, xlimit)
if hPa_labels:
ax2 = ax.twinx()
press = [myround(i, 100) for i in press][::-1]
ax2.set_yticks(press[::10])
ax2.set_yticklabels(press[::10])
majorFormatter = mpl.ticker.FormatStrFormatter('%d')
ax2.yaxis.set_minor_formatter(majorFormatter)
ax2.set_ylabel('Press. (hPa)', fontsize=f_size*.75)
ax2.invert_yaxis()
if ylabel:
pass
else:
ax.tick_params(axis='y', which='both', labelleft='off')
ax2.tick_params(axis='y', which='both', labelleft='off')
def monthly_plot(ax, data, f_size=20, pos=0, posn=1, lw=1, ls='-', color=None,
title=None, subtitle=None, legend=False, xrotation=90,
window=False, label=None, ylabel=None, xlabel=True,
title_loc_y=1.09, plt_txt_x=0.5, plt_txt_y=1.05,
plot_Q1_Q3=False, low=None, high=None, loc='upper right'):
"""
Plot up seaonal (monthly) data.
Parameters
----------
ax (axis object): matplotlib axis object to plot onto
data (nd.array): numpy array of data to plot
pos (int): position number of subplot
posn (int): number of subplots being plotted
title (str): title string for plot
subtitle (str): subtitle string for plot (insert in plot)
ls (str): linestyle to use
color (str): colour for line
xrotation (float): rotation of x axis ticks
ylabel (bool): label y axis
xlabel (bool): label x axis
label (str): label for line
title_loc_y (int): y axis position for title str
plt_txt_x (float): x axis position for subtitle str
plt_txt_y (float): y axis position for subtitle str
plot_Q1_Q3 (bool): plot quartiles on for data?
low (np.array): array of low extents to plot as shaded region
high (np.array): array of high extents to plot as shaded region
Returns
-------
(colormap)
Notes
-----
"""
# setup color list if not provided
if isinstance(color, type(None)):
color = color_list(posn)[pos]
# if this is a window plot, then reduce text size
if window:
f_size = int(f_size/2)
# Plot up provide monthly data
plt.plot(np.arange(1, len(data)+1), data, color=color, lw=lw, ls=ls,
label=label)
# Also add 5th and 95th %ile (or what was provided as "low" and "high")
if plot_Q1_Q3: # Plot quartiles as shaded area?
ax.fill_between(np.arange(1, len(data)+1), low, high, alpha=0.2,
color=color)
# Beautify
ax.set_xticklabels([i.strftime("%b")
for i in [datetime.datetime(2009, int(i), 0o1)
for i in np.arange(1, 13)]])
plt.xticks(list(range(1, 13)), fontsize=f_size)
plt.xticks(rotation=xrotation)
if not xlabel:
plt.tick_params(axis='x', which='both', bottom='on', top='off',
labelbottom='off')
plt.xlim(0.5, 12.5)
if ylabel != None:
plt.ylabel(ylabel, fontsize=f_size)
plt.yticks(fontsize=f_size)
print((pos, posn-1))
if not isinstance(title, type(None)):
t = plt.title(title, fontsize=f_size)
t.set_y(title_loc_y)
if not isinstance(subtitle, type(None)):
plt.text(plt_txt_x, plt_txt_y, subtitle, ha='center',
va='center', transform=ax.transAxes, fontsize=f_size*.65)
if legend:
plt.legend(loc=loc, fontsize=int(f_size/1.5))
def timeseries_seasonal_plot(ax, dates, data, f_size=20, pos=0, posn=1,
title=None, legend=False, everyother=24, x_nticks=12,
window=False, label=None, ylabel=None, loc='upper left',
lw=1, ls='-', color=None, showmeans=False, boxplot=True,
plt_median=False, plot_Q1_Q3=False, pcent1=25, pcent2=75,
ylim=None, xtickrotation=45, alt_text=None, alt_text_x=.5,
alt_text_y=.5, xlabel=None, rm_yticks=False, log=False,
debug=False):
"""
Plot up timeseries of seasonal data.
Parameters
----------
ax (axis object): axis to plot onto
dates (np.array): array of dates (as datetime.datetime objects)
data (np.array): 1D array of data
plot_Q1_Q3 (bool): plot up quartiles on timeseries plot
Returns
-------
(None)
"""
# Process data - reduce resolution to daily, and get std
df = DataFrame(data, index=dates)
# Force use of a standard year
months = list(range(1, 13))
datetime_months = [datetime.datetime(2009, int(i), 1) for i in months]
labels = [i.strftime("%b") for i in datetime_months]
if debug:
print(labels)
# Get Data by month
monthly = [df[df.index.month == i] for i in months]
if debug:
print([i.shape for i in monthly])
if boxplot:
bp = ax.boxplot(monthly, months, showmeans=showmeans)
else:
# remove nans to allow for percentile calc.
data_nan = [np.ma.array(i).filled(np.nan) for i in monthly]
data_nan = [i.flatten() for i in data_nan]
if plt_median:
plt.plot(months,
[np.nanpercentile(i, 50, axis=0) for i in data_nan],
color=color, lw=lw, ls=ls, label=label)
if plot_Q1_Q3: # Plot quartiles as shaded area?
low = [np.nanpercentile(i, pcent1, axis=0)
for i in data_nan]
high = [np.nanpercentile(i, pcent2, axis=0)
for i in data_nan]
ax.fill_between(months, low, high, alpha=0.2,
color=color)
else:
plt.plot(months, [i.mean() for i in monthly], color=color,
lw=lw, ls=ls, label=label)
# Beatify plot
ax.set_xticks(months)
if xlabel:
ax.set_xticklabels(labels, rotation=xtickrotation)
else:
ax.tick_params(axis='x', which='both', labelbottom='off')
if not isinstance(ylim, type(None)):
ax.set_ylim(ylim)
if debug:
print(('!'*50, alt_text, alt_text_x, alt_text_y))
if not isinstance(alt_text, type(None)):
if debug:
print(('!'*50, alt_text, alt_text_x, alt_text_y, f_size))
plt.text(alt_text_x, alt_text_y,
alt_text, ha='center', va='center',
transform=ax.transAxes, fontsize=f_size*.5)
if legend:
if debug:
print(('>'*500, 'Adding legend', '<'*50, loc))
plt.legend(fontsize=f_size*.75, loc=loc)
if not isinstance(title, type(None)):
plt.title(title)
if not isinstance(ylabel, type(None)):
plt.ylabel(ylabel, fontsize=f_size*0.75) # Why is this x0.75?
else:
if rm_yticks:
ax.tick_params(axis='y', which='both', labelleft='off')
# Log scale?
if log:
ax.set_yscale('log')
else:
ax.set_yscale('linear')
def timeseries_daily_plot(fig, ax, dates, data, pos=1, posn=1,
bin_size=1/24., widths=0.01, rotatexlabel='vertical',
white_fill=True, alpha=0.1, linewidth=0.5, xlabel=True,
title=None, alt_text=None, f_size=7.5, units='ppbv',
showmeans=False, plt_median=False, boxplot=True,
ylabel=True, color='blue', label=None,
plot_Q1_Q3=True, pcent1=25, pcent2=75, debug=False):
"""
Plot up daily timeseries of values. Requires data, and dates in numpy
array form. Dates must be as datetime.datetime objects.
Notes:
- Boxplot is the default presentation of data
- Otherwise a meidan is used
"""
# get_day_fraction(i)
dates = np.array([get_day_fraction(i) for i in dates])
# bin data
b_all, bins_used = bin_data(data, dates, bin_size, debug=debug)
# Plot
if boxplot:
bp = ax.boxplot(b_all, positions=bins_used, widths=widths,
showmeans=showmeans, patch_artist=True)
else:
# remove nans to allow for percentile calc.
# print b_all
data_nan = [np.ma.array(i).filled(np.nan) for i in b_all]
data_nan = [i.flatten() for i in data_nan]
# Plot average
if plt_median:
ln = plt.plot(bins_used,
[np.nanpercentile(i, 50, axis=0) for i in data_nan],
color=color, label=None)
else:
ln = plt.plot(bins_used, [i.mean() for i in data_nan], color=color)
# Plot quartiles as shaded area?
if plot_Q1_Q3:
low = [np.nanpercentile(i, pcent1, axis=0) for i in data_nan]
high = [np.nanpercentile(i, pcent2, axis=0) for i in data_nan]
ax.fill_between(bins_used, low, high, alpha=0.2, color=color)
# Beautify
if not isinstance(title, type(None)):
plt.title(title, fontsize=f_size)
if not isinstance(alt_text, type(None)):
# ax.text(x=0.85,y=0.85, s=alt_text, fontsize=f_size*1.5 )
ax.annotate(xytext=alt_text, xy=(0.85, 0.85),
textcoords='axes fraction')
ax.set_xticklabels(np.arange(0, 24, 1))
plt.xticks(np.arange(0, 1, 1/24.), fontsize=f_size*.75,
rotation=rotatexlabel)
# plt.xlim(-0.05, 23/24.)
plt.xlim(-0.05, 1.0)
if xlabel:
ax.set_xlabel('Hour of day', labelpad=f_size)
else:
ax.tick_params(axis='x', which='both', labelbottom='off')
# Setup y axis
if ylabel:
ax.set_ylabel('{}'.format(units), labelpad=f_size)
else:
ax.tick_params(axis='y', which='both', labelleft='off')
# --- Highlight bins
bs = np.arange(0, 24, bin_size) # [ bs[0] - bin_size ] + bs
[plt.axvline(x=i, color='k', linewidth=linewidth, alpha=alpha,
linestyle='dashed') for i in bs]
def timeseries_month_plot(ax, dates, data, f_size=20, pos=0, posn=1,
title=None, legend=False, everyother=7*24, x_nticks=12,
window=False, label=None, ylabel=None, loc='upper left',
lw=1, ls='-', color=None, start_month=7, end_month=7,
boxplot=True, showmeans=False, alt_text=None, r_plt=False,
unitrotation=45, color_by_z=False, fig=None, xlabel=True,
second_title='', add_dates2title=True, positive=None,
debug=False):
"""
Plot up month timeseries of values. Requires data, and dates in numpy
array form. Dates must be as datetime.datetime objects.
NOTE(s):
- This just plot up timeseries, why is the name "timeseries_*month*_plot"?
- Update this!
"""
# Process data - reduce resolution to daily, and get std
df = DataFrame(data, index=dates)
# remove dates outside of range (start_month > t < end_month )
def get_month(x):
return x.month
df['month'] = df.index.map(get_month)
df = df[df['month'] <= end_month]
df = df[df['month'] >= start_month]
# remove 'month' column
df = df.drop('month', 1)
# label once per week
days = [i.to_datetime() for i in df.index]
labels = [i.strftime("%-d %b") for i in days][::everyother]
# Color in line another provided variables
if color_by_z:
if debug:
print('Coloring line by normalised z values')
if debug:
print((df.columns))
x = df.index
y, z = [df[df.columns[i]] for i in range(2)]
cmap = get_colormap(z.copy(), positive=positive)
if debug:
print([(i.min(), i.max()) for i in (x, y, z)])
colorline(x, y, z, cmap=cmap, linewidth=lw, ax=ax,
norm=plt.Normalize(0, 360), fig=fig)
else:
plt.plot(days, df.values, label=label, color=color, ls=ls, lw=lw)
# set xticks
if xlabel:
plt.xticks(days[::everyother], labels, rotation=unitrotation)
else:
plt.tick_params(axis='x', which='both', labelbottom='off')
# Beatify plot
if not isinstance(title, type(None)):
if add_dates2title:
title += ' for {}-{} {}'.format(num2month(start_month),
num2month(end_month), second_title)
plt.title(title)
if not isinstance(alt_text, type(None)):
plt.figtext(x=0.05, y=0.85, s=alt_text, fontsize=f_size*.75)
if not isinstance(ylabel, type(None)):
plt.ylabel(ylabel, fontsize=f_size*.75)
if legend:
plt.legend(fontsize=f_size*.75, loc=loc)
def north_pole_surface_plot(arr, return_m=False, grid=True, centre=False,
cmap=None, format='%.2f', m=None, fixcb=False, nbins=25,
res='4x5', ax=None, alpha=1, nticks=10, everyother=1,
drawcountries=True, set_cb_ticks=True, title=None, \
# rotatecbunits='horizontal', extend='neither',
interval=1, resolution='l', shrink=0.4, window=False, \
lon_0=0, boundinglat=40, degrade_resolution=False, \
no_cb=False, cb=None, units=None, f_size=20, \
debug=False, **Kwargs):
"""
Plot up data at north pole as a 2D slice.
NOTES:
- Requires data (arr) as numpy array. Arr should be full global size
(lon, lat) for given resolution
"""
# ---- Grid/Mesh values for Lat, lon, & alt + cb
if isinstance(cmap, type(None)):
cmap = get_colormap(arr.copy())
err_list = [[i.min(), i.max(), i.mean(), type(i)] for i in [arr]]
if debug:
print(('>'*5, err_list))
lon, lat, NIU = get_latlonalt4res(res, centre=centre)
# restrict lat to projection
lat = lat[get_gc_lat(boundinglat, res=res):]
lon, lat = np.meshgrid(lon, lat)
# ---------------- Basemap setup ----------------
if isinstance(m, type(None)):
m = Basemap(projection='npstere', boundinglat=boundinglat, lon_0=lon_0,
resolution=resolution, round=True)
# Beautify basemap plot
m.drawcoastlines()
m.drawcountries()
parallels = np.arange(-90, 91, 20*interval)
# meridians = np.arange(-180,181,20*interval)
meridians = np.arange(-180, 180, 20*interval)
m.drawparallels(parallels, labels=[False, False, False, False],
fontsize=f_size*.25)
m.drawmeridians(meridians, labels=[True, False, True, True],
fontsize=f_size*.25)
# set x and y
x, y = m(lon, lat)
if debug:
print((1, len(x), len(y)))
# Printing for debuggin?
debug_ptr = [len(i) for i in (x, y, lat, lon)]
debug_ptr += [[i.min(), i.mean(), i.max(), i.shape] for i in (arr,
arr[:,
get_gc_lat(boundinglat,
res=res):])]
if debug:
print((2, 'len:', debug_ptr))
# -------- colorbar variables...
# set cb label sizes
if not no_cb:
# if fixcb:
if not isinstance(fixcb, type(None)):
tickmin, tickmax = fixcb[0], fixcb[1]
else:
tickmin, tickmax = arr.min(), arr.max()
# ----------------------- Linear plots -------------------------------
# plt.contourf( x, y, arr[:,get_gc_lat(boundinglat,res=res):].T, alpha=alpha)
# if fixcb:
polar_arr = arr[:, get_gc_lat(boundinglat, res=res):].T
if debug:
print([(i.min(), i.max(), i.mean()) for i in [polar_arr]])
if not isinstance(fixcb, type(None)):
poly = m.pcolor(x, y, polar_arr, cmap=cmap, alpha=alpha,
vmin=fixcb[0], vmax=fixcb[1])
else:
poly = m.pcolor(x, y, polar_arr, cmap=cmap, alpha=alpha)
# -------------- Log plots ---------------------------------------------
# ---------------- Colorbars ----------------
if not no_cb:
if isinstance(cb, type(None)):
cb = plt.colorbar(poly, ax=m.ax, shrink=shrink, alpha=alpha,
format=format, ticks=np.linspace(tickmin, tickmax,
nticks))
for t in cb.ax.get_yticklabels():
t.set_fontsize(f_size)
if units != None:
cb.ax.set_ylabel(units, rotation=rotatecbunits, labelpad=f_size)
# Set number of ticks
if set_cb_ticks:
tick_locator = mpl.ticker.MaxNLocator(nticks=nticks)
cb.locator = tick_locator
cb.update_ticks()
if not isinstance(title, type(None)):
plt.title(title, fontsize=f_size*1.5)
return_list = [poly]
if not no_cb:
return_list += [cb]
if (return_m):
return_list += [m]
return return_list
# --------
# X.XX - South Pole surface plot
# --------
def south_pole_surface_plot(arr, return_m=False, grid=True, centre=False,
cmap=None, format='%.2f', res='4x5', ax=None, alpha=1,
title=None,
fixcb=False, nbins=25, nticks=10, drawcountries=True,
set_cb_ticks=True,
# rotatecbunits='horizontal', extend='neither',
interval=1, resolution='l', shrink=0.4, window=False,
everyother=1,
lon_0=0, boundinglat=-40, degrade_resolution=False,
no_cb=False, cb=None,
units=None, f_size=20, debug=False, **Kwargs):
"""
Plot up data at south pole 2D slice.
NOTES:
- Requires data (arr) as numpy array. Arr should be full global size
(lon, lat) for given resolution
"""
# ---- Grid/Mesh values for Lat, lon, & alt + cb
if isinstance(cmap, type(None)):
cmap = get_colormap(arr.copy())
debug_ptr = [[i.min(), i.max(), i.mean(), type(i)] for i in [arr]]
if debug:
print(('>'*5, debug_ptr))
lon, lat, NIU = get_latlonalt4res(res, centre=centre)
# restrict lat to projection
lat = lat[:get_gc_lat(boundinglat, res=res) + 2]
lon, lat = np.meshgrid(lon, lat)
# ---------------- Basemap setup ----------------
m = Basemap(projection='spstere', boundinglat=boundinglat, lon_0=lon_0,
resolution=resolution, round=True)
# set x and y
x, y = m(lon, lat)
if debug:
print((1, len(x), len(y)))
# Beautify plot
m.drawcoastlines()
m.drawcountries()
parallels = np.arange(-90, 91, 20*interval)
# meridians = np.arange(-180,181,20*interval)
meridians = np.arange(-180, 180, 20*interval)
m.drawparallels(parallels, labels=[False, False, False, False],
fontsize=f_size*.25)
m.drawmeridians(meridians, labels=[True, False, True, True],
fontsize=f_size*.25)
# printing if debugging...
debug_ptr = [len(i) for i in (x, y, lat, lon)]
debug_ptr += [[i.min(), i.mean(), i.max()] for i in [arr]]
if debug:
print((2, 'len:', debug_ptr))
# -------- colorbar variables...
# set cb label sizes
# if fixcb:
if not isinstance(fixcb, type(None)):
tickmin, tickmax = fixcb[0], fixcb[1]
else:
tickmin, tickmax = arr.min(), arr.max()
# -------------------- Linear plots -------------------------------
# plt.contourf( x, y, arr[:,get_gc_lat(boundinglat,res=res):].T, alpha=alpha)
# if fixcb:
polar_arr = arr[:, :get_gc_lat(boundinglat, res=res)+2].T
if debug:
print([(i.min(), i.max(), i.mean()) for i in [polar_arr]])
if not isinstance(fixcb, type(None)):
poly = m.pcolor(x, y, polar_arr, cmap=cmap, alpha=alpha,
vmin=fixcb[0], vmax=fixcb[1])
else:
poly = m.pcolor(x, y, polar_arr, cmap=cmap, alpha=alpha)
# ----------- Log plots ---------------------------------------------
# ---------------- Colorbars ----------------
if not no_cb:
if isinstance(cb, type(None)):
cb = plt.colorbar(poly, ax=m.ax, shrink=shrink, alpha=alpha,
format=format, ticks=np.linspace(tickmin, tickmax,
nticks))
for t in cb.ax.get_yticklabels():
t.set_fontsize(f_size)
if units != None:
cb.ax.set_ylabel(units, rotation=rotatecbunits, labelpad=f_size)
# Set number of ticks
if (not no_cb):
if set_cb_ticks:
tick_locator = mpl.ticker.MaxNLocator(nticks=nticks)
cb.locator = tick_locator
cb.update_ticks()
if not isinstance(title, type(None)):
plt.title(title, fontsize=f_size*1.5)
if (return_m):
return plt, cb, m
elif no_cb:
return plt
else:
return plt, cb
def plot_specs_surface_change_monthly2pdf(arr, res='4x5', dpi=160,
no_dstr=True, f_size=20, pcent=True, specs=None,
dlist=None,
savetitle='', diff=False, extend='neither',
column=False,
scale=1, units=None, set_window=False,
lat_0=None, lat_1=None,
mask_invalids=False, debug=False):
"""
Create multipage PDF with each page containing a 2D (lon,lat) slice
plot for given species in list of "specs"
Takes 5D array ( species ,lon , lat, alt, time)
"""
logging.info('plot_specs_surface_change_monthly2pdf called')
# setup pdfs + titles
if column:
savetitle = 'Column_by_spec'+savetitle
else:
savetitle = 'Surface_by_spec'+savetitle
pdff = plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=no_dstr)
left = 0.05
right = 0.9
bottom = 0.05
top = 0.875
hspace = 0.315
wspace = 0.1
# Loop species
for n, spec in enumerate(specs):
if debug:
print((n, spec))
# Get units/scale for species + setup fig (allow of
if isinstance(units, type(None)):
if column and (not pcent):
units, scale = 'DU', 1
elif pcent:
units, scale = '%', 1
else:
units, scale = tra_unit(spec, scale=True, global_unit=True)
# setup masking...
cbarr = arr[n, :, :, 0, :].copy() * scale
if pcent:
mask_invalids = True
# mask for changes greater than 500%
if len(cbarr[cbarr > 500]) > 0:
cbarr = np.ma.masked_where(cbarr > 500, cbarr)
extend = 'max'
elif len(cbarr[cbarr < -500]) > 0:
cbarr = np.ma.masked_where(cbarr < -500, cbarr)
if extend == 'max':
extend = 'both'
else:
extend = 'min'
else:
extend = 'neither'
else:
extend = 'neither'
cbarr = cbarr
# Set the correct title
ptitle = '{}'.format(latex_spec_name(spec))
if column:
ptitle += ' column'
else:
ptitle += ' surface'
if diff:
ptitle += ' $\Delta$ concentration'
else:
ptitle += ' concentration'
ptitle += ' ({})'.format(units)
fig = plt.figure(figsize=(22, 14), dpi=dpi,
facecolor='w', edgecolor='w')
# set cb ranges for whiole data period
fixcb = [(i.min(), i.max()) for i in [cbarr]][0]
# Kludge, force max cap at .2
# if units == 'ratio':
# fixcb = [ fixcb[0], 0.2 ]
# extend = 'max'
if (units == 'pmol mol$^{-1}$ m$^{-3}$') and (spec == 'AERI'):
fixcb = [fixcb[0], 500]
extend = 'max'
cmap = get_colormap(fixcb)
# Loop thorugh months
for m, month in enumerate(dlist):
fig.add_subplot(4, 3, m+1)
# plot up spatial surface change
map_plot(arr[n, :, :, 0, m].T*scale, cmap=cmap, case=9, res=res,
no_cb=True, f_size=f_size, fixcb=fixcb, window=True,
set_window=set_window, lat_0=lat_0, lat_1=lat_1,
mask_invalids=mask_invalids, clevs=clevs, debug=debug)
plt.title(month.strftime("%b"), fontsize=f_size*2)
# Add single colorbar
mk_cb(fig, units=units, left=0.9, cmap=cmap, vmin=fixcb[0],
vmax=fixcb[1], f_size=f_size, extend=extend)
# sort out ascetics - adjust plots and add title
fig.subplots_adjust(bottom=bottom, top=top, left=left,
right=right, hspace=hspace, wspace=wspace)
fig.suptitle(ptitle, fontsize=f_size*2, x=.55, y=.95)
# save out figure
plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=no_dstr)
# close fig
plt.clf()
plt.close()
del fig
# save entire pdf
plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=no_dstr)
def plot_specs_zonal_change_monthly2pdf(Vars, res='4x5', dpi=160,
no_dstr=True, f_size=20, pcent=False, specs=None,
dlist=None,
t_ps=None, savetitle='', diff=False,
extend='neither',
set_window=False, lat_0=None, lat_1=None,
mask_invalids=False,
set_lon=None, units=None, debug=False):
"""
Create multipage PDF with each page containing a zonalplot for given species in
list of "specs"
NOTES:
- Takes 5D array ( species ,lon , lat, alt, time)
- Needs descriptions update.
"""
savetitle = 'Zonal_by_spec'+savetitle
pdff = plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=no_dstr)
left = 0.05
right = 0.9
bottom = 0.05
top = 0.875
hspace = 0.315
wspace = 0.2
# Loop species
for n, spec in enumerate(specs):
if debug:
print((n, spec, Vars.shape))
# Get units/scale for species + setup fig
scale = 1
if isinstance(units, type(None)):
units, scale = tra_unit(spec, scale=True, global_unit=True)
if pcent:
units, scale = '%', 1
mask_invalids = True
# Set the correct title
ptitle = '{}'.format(latex_spec_name(spec))
if diff:
ptitle += ' zonal $\Delta$ concentration'
else:
ptitle += ' zonal concentration'
ptitle += ' ({})'.format(units)
fig = plt.figure(figsize=(22, 14), dpi=dpi,
facecolor='w', edgecolor='w')
cbVars = Vars[n, :, :, :, :].copy()*scale
# set ranges for whiole data period
if pcent:
if len(cbVars[cbVars > 500]) > 0:
cbVars = np.ma.masked_where(cbVars > 500, cbVars)
extend = 'max'
elif len(cbVars[cbVars < -500]) > 0:
cbVars = np.ma.masked_where(cbVars < -500, cbVars)
if extend == 'max':
extend = 'both'
else:
extend = 'min'
else:
extend = 'neither'
else:
extend = 'neither'
if set_lon:
set_lon = get_gc_lon(set_lon, res=res)
fixcb = [(i.min(), i.max()) for i in [cbVars[set_lon, ...]]][0]
print(('SETTING LON to GC index: ', set_lon))
else:
cbVars = cbVars.mean(axis=0)
if set_window:
gclat_0, gclat_1 = [get_gc_lat(i, res=res) for i in (lat_0, lat_1)]
cbVars = cbVars[..., gclat_0:gclat_1, :]
fixcb = [(i.min(), i.max()) for i in [cbVars]][0]
# Kludge, force max cap at .2
if units == 'ratio':
fixcb = [fixcb[0], 0.2]
extend = 'max'
cmap = get_colormap(fixcb)
# Loop thorugh months
for m, month in enumerate(dlist):
axn = [3, 4, m+1]
ax = fig.add_subplot(*axn)
# if pcent:
# print [ (np.min(i), np.max(i)) \
# for i in [ Vars[n,:,:,:,m].mean(axis=0)*scale ] ]
# print [ (np.min(i), np.max(i)) \
# for i in [ Vars[n,:,:,:,m].median(axis=0)*scale ] ]
if set_lon:
arr = Vars[n, set_lon, :, :, m]*scale
else:
arr = Vars[n, :, :, :, m].mean(axis=0)*scale
if set_window:
arr = arr[..., get_gc_lat(
lat_0, res=res): get_gc_lat(lat_1, res=res), :]
# plot up spatial surface change
zonal_plot(fig, ax, arr, title=month.strftime("%b"), debug=debug,
tropics=False, units=units, f_size=f_size, c_off=37, no_cb=True,
lat_0=lat_0, lat_1=lat_1, set_window=set_window, fixcb=fixcb,
extend=extend, window=True, lower_limited=True, res=res,
mask_invalids=mask_invalids, cmap=cmap)
# only show troposphere
greyoutstrat(fig, t_ps.mean(axis=0)[:, :, m], axn=axn, res=res)
# Add single colorbar
mk_cb(fig, units=units, left=0.915, cmap=cmap, vmin=fixcb[0],
vmax=fixcb[1], f_size=f_size, extend=extend)
# sort out ascetics - adjust plots and add title
fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right,
hspace=hspace, wspace=wspace)
fig.suptitle(ptitle, fontsize=f_size*2, x=.55, y=.95)
# save out figure
plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=no_dstr)
# close fig
plt.clf()
plt.close()
del fig
# save entire pdf
plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=no_dstr)
def plot_specs_poles_change_monthly2pdf(specs=None, arr=None, res='4x5',
dpi=160, no_dstr=True, f_size=20, pcent=False,
diff=False,
dlist=None, savetitle='', units=None,
perspective='north',
format=None, extend='neither', boundinglat=50,
verbose=True, debug=False):
"""
Takes a 5D np.array ( species, lon, lat, alt, time ) and plots up the
output by species by month, and saves this as a mulitpage pdf
Parameters
-------
arr (array): 5D np.array ( species, lon, lat, alt, time )
res (str): the resolution if wd not given (e.g. '4x5' )
dpi (int): dots per inch of saved output PDF...
boundinglat (int):
format (str): formayt of axis labels
dlist (list): list of dates (datetimes)
no_dstr (bool): date string in output filename ("no date string")
f_size (float): fontsise
savetitle (str): string to add to filename of PDF
units (str): units label for colorbar
pcent (bool): setup the plot as if the input values were %
diff (bool): setup the plot as if the input values were a difference
boundinglat (int): latitude to show poles until.
perspective (str): looking at north or south pole?
extend (str): colorbar format settings ( 'both', 'min', 'both' ... )
Returns
-------
(None)
Notes
-----
- Takes 5D array ( species ,lon , lat, alt, time)
- needs update to description/acsetics
"""
debug_ptr = (arr, no_dstr, f_size, pcent, res,
dpi, specs, dlist, savetitle)
if debug:
print(debug_ptr)
# Setup PDF filename for saving file
if perspective == 'north':
savetitle = 'North_'+savetitle
if perspective == 'south':
savetitle = 'South_'+savetitle
# Initialise PDF
pdff = plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=no_dstr)
# Set ascetics
left = 0.01
right = 0.925
bottom = 0.025
top = 0.95
hspace = 0.15
wspace = -0.1
# Loop species
for n, spec in enumerate(specs):
# Debug print statement?
if verbose:
print((n, spec, arr.shape, units, perspective, '<'))
# Get units/scale for species + setup fig
scale = 1
if pcent: # and (not units == 'DU') :
units = '%'
if isinstance(units, type(None)):
units, scale = tra_unit(spec, scale=True, global_unit=True)
parr = arr[n, :, :, 0, :]*scale
debug_ptr = [parr.shape, n, spec, units, scale]
debug_ptr += [(i.min(), i.max(), i.mean()) for i in [parr, arr]]
if debug:
print(debug_ptr)
# Set the correct title
ptitle = '{}'.format(latex_spec_name(spec))
# Create new figure
fig = plt.figure(figsize=(22, 14), dpi=dpi,
facecolor='w', edgecolor='w')
# Select north or south polar areas specified to define cb
if perspective == 'north':
cbarr = parr[:, get_gc_lat(boundinglat, res=res):, :].copy()
if perspective == 'south':
cbarr = parr[:, :get_gc_lat(-boundinglat, res=res), :].copy()
# Mask above and below 500/-500 % if values in array
if pcent:
if len(cbarr[cbarr > 500]) > 0:
cbarr = np.ma.masked_where(cbarr > 500, cbarr)
extend = 'max'
elif len(cbarr[cbarr < -500]) > 0:
cbarr = np.ma.masked_where(cbarr < -500, cbarr)
if extend == 'max':
extend = 'both'
else:
extend = 'min'
else:
extend = 'neither'
else:
extend = 'neither'
# Setup colormap
fixcb = np.array([(i.min(), i.max()) for i in [cbarr]][0])
if verbose:
print(('fixcb testing ', fixcb, parr.shape))
# Kludge, force max cap at .2
# if units == 'ratio':
# fixcb = [ fixcb[0], 0.2 ]
# extend = 'max'
cmap = get_colormap(fixcb)
# Loop thorugh months
for m, month in enumerate(dlist):
ax = fig.add_subplot(3, 4, m+1)
# Plot up spatial surface change
if perspective == 'north':
north_pole_surface_plot(parr[:, :, m], no_cb=True,
fixcb=fixcb, diff=diff, pcent=pcent, res=res,
f_size=f_size*2, cmap=cmap,
boundinglat=boundinglat)
if perspective == 'south':
south_pole_surface_plot(parr[:, :, m], no_cb=True,
fixcb=fixcb, diff=diff, pcent=pcent, res=res,
f_size=f_size*2, cmap=cmap,
boundinglat=-boundinglat)
plt.text(1.1, -0.01, month.strftime("%b"), fontsize=f_size*2,
transform=ax.transAxes, ha='right', va='bottom')
# Add single colorbar
mk_cb(fig, units=units, left=0.915, cmap=cmap, vmin=fixcb[0],
vmax=fixcb[1], f_size=f_size*.75, extend=extend, format=format)
# Sort out ascetics - adjust plots and add title
fig.subplots_adjust(bottom=bottom, top=top, left=left, right=right,
hspace=hspace, wspace=wspace)
fig.suptitle(ptitle, fontsize=f_size*2, x=.475, y=.975)
# Save out figure
plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=no_dstr)
# Close fig
plt.clf()
plt.close()
del parr
# Save entire pdf
plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=no_dstr)
def X_Y_scatter(x, y, z=None, fig=None, ax=None, vmin=None, vmax=None,
left=0.1, width=0.60, bottom=0.1, height=0.60, widthII=0.2,
lim2std=10, trendline=True, f_size=20, lw=10, title=None,
line121=True, X_title=None, Y_title=None):
"""
Plot up a X Y scatter plot of x vs. y
"""
# rect_scatter = [left, bottom, width, height]
if isinstance(fig, type(None)):
fig = plt.figure(1, figsize=(8, 8))
if isinstance(ax, type(None)):
ax = plt.axes() # rect_scatter)
# Plot up - normalising colors against z if given
if isinstance(z, type(None)):
plt.scatter(x, y)
else:
# Scale colors.
if isinstance(vmin, type(None)):
vmin = float(np.ma.min(z))
if isinstance(vmax, type(None)):
vmin = float(np.ma.max(z))
s_z = [cmap((float(i) - vmin) / (np.array([vmin, vmax])).ptp())
for i in z]
pts = axScatter.scatter(x, y, c=z)
if lim2std != False:
stds = [np.std(i) for i in (x, y)]
means = [np.mean(i) for i in (x, y)]
print((stds, means))
mins = [means[0]-(stds[0]*lim2std), means[1]-(stds[1]*lim2std)]
maxs = [means[0]+(stds[0]*lim2std), means[1]+(stds[1]*lim2std)]
# do not let miniums be less than zero.
ind = [n for n, i in enumerate(mins) if (i < 0)]
if len(ind) > 0:
for n in ind:
mins[n] = 0
plt.xlim(mins[0], maxs[0])
plt.ylim(mins[1], maxs[1])
else:
min_, max_ = [[i.min(), i.max()] for i in [np.array([x, y])]][0]
plt.xlim(min_, max_)
plt.ylim(min_, max_)
if line121:
xmin, xmax = np.min(x), np.max(x)
line121 = np.arange(xmin/2, xmax*2)
plt.plot(line121, line121, color='red', ls='--', lw=lw)
# add trendline
if trendline:
Trendline(ax, x, y, order=1, intervals=700, f_size=f_size, lw=lw,
color='green')
if not isinstance(X_title, type(None)):
plt.xlabel(X_title, fontsize=f_size)
if not isinstance(Y_title, type(None)):
plt.ylabel(Y_title, fontsize=f_size)
if not isinstance(title, type(None)):
plt.title(title, fontsize=f_size)
plt.xticks(fontsize=f_size)
plt.yticks(fontsize=f_size)
def timeseries_plot(ax, dates, data, f_size=20, pos=0, posn=1,
title=None, legend=False, everyother=7*24, x_nticks=12,
window=False, label=None, ylabel=None, loc='upper left',
lw=1, ls='-', color=None, start_date=None, end_date=None,
boxplot=True, showmeans=False, alt_text=None, r_plt=False,
unitrotation=45, color_by_z=False, fig=None, xlabel=True,
positive=None, plt_median=False, add_Q1_Q3=False, pcent1=25,
pcent2=75,
debug=False):
"""
Plot up timeseries of values.
NOTES:
- Requires data, and dates in numpy array form.
- Dates must be as datetime.datetime objects.
"""
# Process data - reduce resolution to daily, and get std
df = DataFrame(data, index=dates)
# Take start and end dates from "dates" if not set in arguments.
if isinstance(start_date, type(None)):
start_date = dates[0]
if isinstance(end_date, type(None)):
end_date = dates[-1]
df = df[start_date:end_date]
# label once per week ( set by "everyother" )
days = [i.to_datetime() for i in df.index]
labels = [i.strftime("%-d %b") for i in days][::everyother]
# Color in line another provided variables
if color_by_z:
if debug:
print('Coloring line by normalised z values')
if debug:
print((df.columns))
x = df.index
y, z = [df[df.columns[i]] for i in range(2)]
cmap = get_colormap(z.copy(), positive=positive)
print([(i.min(), i.max()) for i in (x, y, z)])
colorline(x, y, z, cmap=cmap, linewidth=lw, ax=ax,
norm=plt.Normalize(0, 360), fig=fig) # np.min(z), 1500))
# colorline(x, y, linewidth=lw, ax=ax)
else:
if plt_median: # Plot average
pass
# ln = plt.plot( days, np.nanpercentile( df.values, 50, ),
# color=color, ls=ls, lw=lw, label=None )
else: # Plot all
plt.plot(days, df.values, label=label, color=color, ls=ls, lw=lw)
# Plot quartiles as shaded area?
if plot_Q1_Q3:
pass
# low =np.nanpercentile( df.values, pcent1, )
# high = np.nanpercentile( df.values, pcent2, )
# ax.fill_between( bins_used, low, high, alpha=0.2, color=color )
# Setup X axis
if xlabel:
plt.xticks(days[::everyother], labels, rotation=unitrotation,
fontsize=f_size)
else:
plt.tick_params(axis='x', which='both', labelbottom='off')
# Beautify plot
if not isinstance(title, type(None)):
plt.title(title + ' for {}-{}'.format(start_date.strftime(
'%d/%m/%y'), end_date.strftime('%d/%m/%y')),
fontsize=f_size)
# Alt text annotate as fig text?
if not isinstance(alt_text, type(None)):
plt.figtext(x=0.05, y=0.85, s=alt_text, fontsize=f_size*.75)
# Setup y axis
plt.yticks(fontsize=f_size)
if not isinstance(ylabel, type(None)):
plt.ylabel(ylabel, fontsize=f_size)
# Legend?
if legend:
plt.legend(fontsize=f_size*.75, loc=loc)
def plt_4Darray_surface_by_month(arr, res='4x5', dpi=160,
no_dstr=True, f_size=10, dlist=None, fixcb=None,
format=None,
savetitle='', extend='neither', wd=None, ax=None,
fig=None,
cb_sigfig=3, nticks=7, discrete_cmap=False,
units=None, set_window=False, lat_0=None, lat_1=None,
return_m=False, log=False, window=True, interval=3,
ylabel=True,
norm=None, fig_title=False, pdftitle='',
pdf=False, show=False, verbose=False, debug=False):
"""
Create a window plot of surface amp plots from a 4D array
"""
# Setup local variables + figure
# left=0.015; right=0.9; bottom=0.05; top=0.95; hspace=0.225; wspace=-0.01
left = 0.015
right = 0.87
bottom = 0.05
top = 0.95
hspace = 0.225
wspace = -0.01
fig = plt.figure(figsize=(14, 10), dpi=dpi, facecolor='w', edgecolor='w')
# Get datetime
if isinstance(dlist, type(None)):
dlist = get_gc_datetime(wd=wd)
# set cb ranges for whole data period
if isinstance(fixcb, type(None)):
fixcb = [(i.min(), i.max()) for i in [arr]][0]
# Create figure if not provided
if isinstance(fig, type(None)):
fig = plt.figure(figsize=(15, 10), dpi=dpi,
facecolor='w', edgecolor='w')
# Set readable levels for cb, then use these to dictate cmap
lvls = get_human_readable_gradations(vmax=fixcb[1],
vmin=fixcb[0], nticks=nticks,
cb_sigfig=cb_sigfig)
# Setup Colormap
cmap, fixcb_buffered = get_colormap(np.array(fixcb),
nticks=nticks, fixcb=fixcb,
buffer_cmap_upper=True)
if discrete_cmap:
cmap, norm = mk_discrete_cmap(nticks=nticks,
vmin=fixcb[0], vmax=fixcb[1], cmap=cmap)
debug_list = [(i.min(), i.max(), i.mean()) for i in [arr.mean(axis=0)]]
if debug:
print(debug_list)
# Loop thorugh months
for m, month in enumerate(dlist):
# add axis
axn = [4, 3, m+1]
ax = fig.add_subplot(*axn)
if verbose:
print((arr[..., m].mean(axis=0).shape, arr.shape))
# Only show x/y axis on edge plots
ylabel = False
xlabel = False
num_cols = 3
num_rows = 4
if (m in range(len(dlist))[-num_cols:]):
xlabel = True
if any([axn[-1] == i for i in range(1, len(dlist)+1)[::num_cols]]):
ylabel = True
# Plot up
map_plot(arr[..., 0, m].T, format=format, cmap=cmap, ax=ax,
fixcb=fixcb, return_m=return_m, log=log, window=window,
no_cb=True, norm=norm, f_size=f_size*.75, res=res,
fixcb_buffered=fixcb_buffered, interval=interval,
ylabel=ylabel, xlabel=xlabel, verbose=verbose, debug=debug)
# add month
plt.title(month.strftime("%b"), fontsize=f_size*1.5)
# Add single colorbar
# mk_cb(fig, units=units, left=0.9, cmap=cmap, vmin=fixcb[0], format=format,\
mk_cb(fig, units=units, left=0.87, cmap=cmap, vmin=fixcb[0], format=format,
vmax=fixcb[1], nticks=nticks, f_size=f_size, extend=extend)
if verbose:
print((nticks, fixcb, lvls))
# Sort out ascetics - adjust plots and add title
if fig_title:
fig.suptitle('{}'.format(latex_spec_name(spec)), fontsize=f_size*2,
x=.55, y=.95)
top = 0.9 # allow space for figure title
fig.subplots_adjust(bottom=bottom, top=top, left=left,
right=right, hspace=hspace, wspace=wspace)
# save as pdf ?
if pdf:
plot2pdf(title=pdftitle)
if show:
plt.show()
def plt_4Darray_zonal_by_month(arr, res='4x5', dpi=160,
no_dstr=True, f_size=15, dlist=None, fixcb=None,
savetitle='', extend='neither', wd=None, ax=None,
fig=None,
cb_sigfig=3, nticks=7, discrete_cmap=False,
units=None, set_window=False, lat_0=None, lat_1=None,
return_m=False, log=False, window=True, interval=3,
ylabel=True,
norm=None, fig_title=False, pdftitle='', t_ps=None,
xlabel=True,
format=None, orientation='vertical', trop_limit=True,
region='All',
pdf=False, show=False, verbose=False, debug=False):
"""
Create a window plot of zonal plots from a 4D array
"""
# Average over lon
arr = arr.mean(axis=0)
# Setup local variables + figure
left = 0.075
right = 0.875
bottom = 0.085
top = 0.955
hspace = 0.325
wspace = 0.1
fig = plt.figure(figsize=(7, 7), dpi=dpi, facecolor='w', edgecolor='w')
# Get datetime
if isinstance(dlist, type(None)):
dlist = get_gc_datetime(wd=wd)
# set cb ranges for whole data period
if isinstance(fixcb, type(None)):
fixcb = [(i.min(), i.max()) for i in [arr]][0]
# Create figure if not provided
if isinstance(fig, type(None)):
fig = plt.figure(figsize=(15, 10), dpi=dpi,
facecolor='w', edgecolor='w')
# Set readable levels for cb, then use these to dictate cmap
lvls = get_human_readable_gradations(vmax=fixcb[1],
vmin=fixcb[0], nticks=nticks,
cb_sigfig=cb_sigfig)
# Setup Colormap
cmap, fixcb_buffered = get_colormap(np.array(fixcb),
nticks=nticks, fixcb=fixcb,
buffer_cmap_upper=True)
if discrete_cmap:
cmap, norm = mk_discrete_cmap(nticks=nticks,
vmin=fixcb[0], vmax=fixcb[1], cmap=cmap)
if debug:
print([(i.min(), i.max(), i.mean()) for i in [arr]])
# Get time in the troposphere diagnostic if not provide as agrument
if isinstance(t_ps, type(None)):
t_ps = get_GC_output(wd, vars=['TIME_TPS__TIMETROP'], trop_limit=True)
# Loop thorugh months
for m, month in enumerate(dlist):
# add axis
axn = [4, 3, m+1]
ax = fig.add_subplot(*axn)
# set when to use y and x labels
xlabel = False
if m in range(12)[-3:]:
xlabel = True
ylabel = False
if m in range(12)[::3]:
ylabel = True
# Plot zonally
zonal_plot(arr[..., m], fig, ax=ax, set_window=set_window, log=log,
format=format, cmap=cmap, lat_0=lat_0, lat_1=lat_1,
fixcb=fixcb, f_size=f_size*.75, res=res, norm=norm,
fixcb_buffered=fixcb_buffered, no_cb=True, trop_limit=True,
window=window, interval=interval, xlabel=xlabel, ylabel=ylabel,
verbose=verbose, debug=debug)
# Only show troposphere
greyoutstrat(fig, t_ps.mean(axis=0).mean(axis=-1), axn=axn, res=res)
# add month
plt.title(month.strftime("%b"), fontsize=f_size*1.5)
# Add single colorbar
mk_cb(fig, units=units, left=0.895, cmap=cmap, vmin=fixcb[0],
vmax=fixcb[1], nticks=nticks, f_size=f_size*1.25, extend=extend,
width=0.015*1.5, height=.95, bottom=0.11)
if debug:
print((nticks, fixcb, lvls))
# sort out ascetics - adjust plots and add title
fig.subplots_adjust(bottom=bottom, top=top, left=left,
right=right, hspace=hspace, wspace=wspace)
if fig_title:
fig.suptitle('{}'.format(latex_spec_name(spec)), fontsize=f_size*2,
x=.55, y=.95)
# save as pdf ?
if pdf:
plot2pdf(title=pdftitle)
if show:
plt.show()
def get_seasonal_plot(arr, fixcb=None, fig=None, f_size=15,
case='linear', format=None, extend='neither', units=None,
right=0.9, left=0.05, bottom=0.05, top=0.85, hspace=0.1,
wspace=0.1, log=False, title=None, dpi=80, debug=False):
"""
Takes any 4D array and plot a 4 subplot window plot by season
"""
# Split by quater (DJF, MAM, JJA, SON)
ars, seasons = split_4D_array_into_seasons(arr, annual_plus_seasons=False)
# create figure
if isinstance(fig, type(None)):
fig = plt.figure(figsize=(22, 14), dpi=dpi, facecolor='w',
edgecolor='w')
# fix color mapping
if isinstance(fixcb, type(None)):
fixcb = [(i.min(), i.max()) for i in [arr]][0]
cmap = get_colormap(fixcb)
# loop seasons
for n, arr in enumerate(ars):
# Plot up on new axis
ax = fig.add_subplot(2, 2, n+1)
map_plot(arr.T, title=seasons[n], units=None, window=True,
case=case, f_size=f_size, rotatecbunits='vertical',
no_cb=True, cmap=cmap, fixcb=fixcb, fig=fig)
# make color bar
mk_cb(fig, units=units, left=0.925, cmap=cmap, vmin=fixcb[0],
vmax=fixcb[1], log=log, f_size=f_size*.5, extend=extend,
format=format, debug=debug)
# add title if provided
if not isinstance(title, type(None)):
fig.suptitle(title, fontsize=f_size*2, x=.55, y=.95)
# Adjust figure
fig.subplots_adjust(bottom=bottom, top=top, left=left,
right=right, hspace=hspace, wspace=wspace)
def plot_specs_surface_change_annual2pdf(arr, res='4x5', dpi=160,
no_dstr=True, f_size=20, pcent=True, specs=None,
dlist=None,
savetitle='', diff=False, extend='neither',
column=False,
scale=1, units=None, set_window=False,
lat_0=None, lat_1=None,
mask_invalids=False, debug=False):
"""
Create multipage PDF with each page containing a 2D (lon,lat) slice
plot for given species in list of "specs" Takes 5D array ( species ,lon , lat, alt,
time)
"""
logging.info('plot_specs_surface_change_monthly2pdf called')
# setup pdfs + titles
if column:
savetitle = 'Column_by_spec'+savetitle
else:
savetitle = 'Surface_by_spec'+savetitle
pdff = plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=no_dstr)
left = 0.05
right = 0.9
bottom = 0.05
top = 0.875
hspace = 0.315
wspace = 0.1
# Loop species
for n, spec in enumerate(specs):
if debug:
print((n, spec))
# Get units/scale for species + setup fig (allow of
if isinstance(units, type(None)):
if column and (not pcent):
units, scale = 'DU', 1
elif pcent:
units, scale = '%', 1
else:
units, scale = tra_unit(spec, scale=True, global_unit=True)
# setup masking...
cbarr = arr[n, :, :, 0, :].mean(axis=-1).copy() * scale
if pcent:
mask_invalids = True
# mask for changes greater than 500%
if len(cbarr[cbarr > 500]) > 0:
cbarr = np.ma.masked_where(cbarr > 500, cbarr)
extend = 'max'
elif len(cbarr[cbarr < -500]) > 0:
cbarr = np.ma.masked_where(cbarr < -500, cbarr)
if extend == 'max':
extend = 'both'
else:
extend = 'min'
else:
extend = 'neither'
else:
extend = 'neither'
cbarr = cbarr
# Set the correct title
ptitle = 'Annual Avg. {}'.format(latex_spec_name(spec))
if column:
ptitle += ' column'
else:
ptitle += ' surface'
if diff:
ptitle += ' $\Delta$ concentration'
else:
ptitle += ' concentration'
ptitle += ' ({})'.format(units)
fig = plt.figure(figsize=(22, 14), dpi=dpi,
facecolor='w', edgecolor='w')
# set cb ranges for whiole data period
fixcb = [(i.min(), i.max()) for i in [cbarr]][0]
# Kludge, force max cap at .2
# if units == 'ratio':
# fixcb = [ fixcb[0], 0.2 ]
# extend = 'max'
if (units == 'pmol mol$^{-1}$ m$^{-3}$') and (spec == 'AERI'):
fixcb = [fixcb[0], 500]
extend = 'max'
cmap = get_colormap(fixcb)
# Loop thorugh months
# for m, month in enumerate(dlist):
fig.add_subplot(111)
# plot up spatial surface change
map_plot(arr[n, :, :, 0, :].mean(axis=-1).T*scale, cmap=cmap, case=9,
res=res, no_cb=True, f_size=f_size, fixcb=fixcb, window=True,
set_window=set_window, lat_0=lat_0, lat_1=lat_1,
mask_invalids=mask_invalids, debug=debug)
# Add single colorbar
mk_cb(fig, units=units, left=0.905, cmap=cmap, vmin=fixcb[0],
vmax=fixcb[1], f_size=f_size, extend=extend)
# sort out ascetics - adjust plots and add title
fig.suptitle(ptitle, fontsize=f_size*2, x=.55, y=.95)
# save out figure
plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=no_dstr)
# close fig
plt.clf()
plt.close()
del fig
# save entire pdf
plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=no_dstr)
def plot_specs_zonal_change_annual2pdf(Vars, res='4x5', dpi=160,
no_dstr=True, f_size=20, pcent=False, specs=None,
dlist=None,
t_ps=None, savetitle='', diff=False,
extend='neither',
set_window=False, lat_0=None, lat_1=None,
mask_invalids=False,
set_lon=None, units=None, debug=False):
"""
Create multipage PDF with each page containing a zonal plot for given species in list
of "specs"
NOTES:
- Takes 5D array ( species ,lon , lat, alt, time)
"""
savetitle = 'Annual_Zonal_by_spec'+savetitle
pdff = plot2pdfmulti(title=savetitle, open=True, dpi=dpi, no_dstr=no_dstr)
left = 0.05
right = 0.9
bottom = 0.05
top = 0.875
hspace = 0.315
wspace = 0.2
# Loop species
for n, spec in enumerate(specs):
if debug:
print((n, spec, Vars.shape))
# Get units/scale for species + setup fig
# scale=1
if isinstance(units, type(None)):
units, scale = tra_unit(spec, scale=True, global_unit=True)
if pcent:
units, scale = '%', 1
mask_invalids = True
# Set the correct title
ptitle = 'Annual avg. {}'.format(latex_spec_name(spec))
if diff:
ptitle += ' zonal $\Delta$ concentration'
else:
ptitle += ' zonal concentration'
ptitle += ' ({})'.format(units)
fig = plt.figure(figsize=(22, 14), dpi=dpi,
facecolor='w', edgecolor='w')
cbVars = Vars[n, :, :, :, :].mean(axis=-1).copy()*scale
# set ranges for whiole data period
if pcent:
if len(cbVars[cbVars > 500]) > 0:
cbVars = np.ma.masked_where(cbVars > 500, cbVars)
extend = 'max'
elif len(cbVars[cbVars < -500]) > 0:
cbVars = np.ma.masked_where(cbVars < -500, cbVars)
if extend == 'max':
extend = 'both'
else:
extend = 'min'
else:
extend = 'neither'
else:
extend = 'neither'
if set_lon:
set_lon = get_gc_lon(set_lon, res=res)
fixcb = [(i.min(), i.max()) for i in [cbVars[set_lon, ...]]][0]
print(('SETTING LON to GC index: ', set_lon))
else:
cbVars = cbVars.mean(axis=0)
if set_window:
gclat_0, gclat_1 = [get_gc_lat(i, res=res) for i in (lat_0, lat_1)]
cbVars = cbVars[..., gclat_0:gclat_1, :]
fixcb = [(i.min(), i.max()) for i in [cbVars]][0]
# Kludge, force max cap at .2
if units == 'ratio':
fixcb = [fixcb[0], 0.2]
extend = 'max'
cmap = get_colormap(fixcb)
axn = [111]
ax = fig.add_subplot(*axn)
# if pcent:
# print [ (np.min(i), np.max(i)) \
# for i in [ Vars[n,:,:,:,m].mean(axis=0)*scale ] ]
# print [ (np.min(i), np.max(i)) \
# for i in [ Vars[n,:,:,:,m].median(axis=0)*scale ] ]
if set_lon:
arr = Vars[n, set_lon, ...].mean(axis=-1)*scale
else:
arr = Vars[n, ...].mean(axis=0).mean(axis=-1)*scale
if set_window:
arr = arr[..., get_gc_lat(lat_0, res=res): get_gc_lat(lat_1, res=res), :]
# plot up spatial surface change
zonal_plot(arr, fig, ax=ax, title=None, debug=debug, tropics=False,
units=units, f_size=f_size, c_off=37, no_cb=True, lat_0=lat_0,
lat_1=lat_1,
set_window=set_window, fixcb=fixcb, extend=extend, window=True,
lower_limited=True, res=res, mask_invalids=mask_invalids, cmap=cmap)
# only show troposphere
greyoutstrat(fig, t_ps.mean(axis=0).mean(axis=-1), axn=axn, res=res)
# Add single colorbar
mk_cb(fig, units=units, left=0.915, cmap=cmap, vmin=fixcb[0],
vmax=fixcb[1], f_size=f_size, extend=extend)
# sort out ascetics - adjust plots and add title
# fig.subplots_adjust( bottom=bottom, top=top, left=left, \
# right=right,hspace=hspace, wspace=wspace)
fig.suptitle(ptitle, fontsize=f_size*2, x=.55, y=.95)
# save out figure
plot2pdfmulti(pdff, savetitle, dpi=dpi, no_dstr=no_dstr)
# close fig
plt.clf()
plt.close()
del fig
# save entire pdf
plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi, no_dstr=no_dstr)
| 37.584922
| 173
| 0.538759
|
c1f044e55f4aea100484fb50028a17cd82ff80e2
| 4,720
|
py
|
Python
|
lib/config/default.py
|
jie311/YOLOP
|
75a281c86f6f24a82f15c4aa6a8c27219e924848
|
[
"MIT"
] | 811
|
2021-08-25T07:38:30.000Z
|
2022-03-31T10:30:26.000Z
|
lib/config/default.py
|
jie311/YOLOP
|
75a281c86f6f24a82f15c4aa6a8c27219e924848
|
[
"MIT"
] | 99
|
2021-08-28T05:57:12.000Z
|
2022-03-31T09:27:06.000Z
|
lib/config/default.py
|
jie311/YOLOP
|
75a281c86f6f24a82f15c4aa6a8c27219e924848
|
[
"MIT"
] | 189
|
2021-08-28T01:15:44.000Z
|
2022-03-30T17:40:13.000Z
|
import os
from yacs.config import CfgNode as CN
_C = CN()
_C.LOG_DIR = 'runs/'
_C.GPUS = (0,1)
_C.WORKERS = 8
_C.PIN_MEMORY = False
_C.PRINT_FREQ = 20
_C.AUTO_RESUME =False # Resume from the last training interrupt
_C.NEED_AUTOANCHOR = False # Re-select the prior anchor(k-means) When training from scratch (epoch=0), set it to be ture!
_C.DEBUG = False
_C.num_seg_class = 2
# Cudnn related params
_C.CUDNN = CN()
_C.CUDNN.BENCHMARK = True
_C.CUDNN.DETERMINISTIC = False
_C.CUDNN.ENABLED = True
# common params for NETWORK
_C.MODEL = CN(new_allowed=True)
_C.MODEL.NAME = ''
_C.MODEL.STRU_WITHSHARE = False #add share_block to segbranch
_C.MODEL.HEADS_NAME = ['']
_C.MODEL.PRETRAINED = ""
_C.MODEL.PRETRAINED_DET = ""
_C.MODEL.IMAGE_SIZE = [640, 640] # width * height, ex: 192 * 256
_C.MODEL.EXTRA = CN(new_allowed=True)
# loss params
_C.LOSS = CN(new_allowed=True)
_C.LOSS.LOSS_NAME = ''
_C.LOSS.MULTI_HEAD_LAMBDA = None
_C.LOSS.FL_GAMMA = 0.0 # focal loss gamma
_C.LOSS.CLS_POS_WEIGHT = 1.0 # classification loss positive weights
_C.LOSS.OBJ_POS_WEIGHT = 1.0 # object loss positive weights
_C.LOSS.SEG_POS_WEIGHT = 1.0 # segmentation loss positive weights
_C.LOSS.BOX_GAIN = 0.05 # box loss gain
_C.LOSS.CLS_GAIN = 0.5 # classification loss gain
_C.LOSS.OBJ_GAIN = 1.0 # object loss gain
_C.LOSS.DA_SEG_GAIN = 0.2 # driving area segmentation loss gain
_C.LOSS.LL_SEG_GAIN = 0.2 # lane line segmentation loss gain
_C.LOSS.LL_IOU_GAIN = 0.2 # lane line iou loss gain
# DATASET related params
_C.DATASET = CN(new_allowed=True)
_C.DATASET.DATAROOT = '/home/zwt/bdd/bdd100k/images/100k' # the path of images folder
_C.DATASET.LABELROOT = '/home/zwt/bdd/bdd100k/labels/100k' # the path of det_annotations folder
_C.DATASET.MASKROOT = '/home/zwt/bdd/bdd_seg_gt' # the path of da_seg_annotations folder
_C.DATASET.LANEROOT = '/home/zwt/bdd/bdd_lane_gt' # the path of ll_seg_annotations folder
_C.DATASET.DATASET = 'BddDataset'
_C.DATASET.TRAIN_SET = 'train'
_C.DATASET.TEST_SET = 'val'
_C.DATASET.DATA_FORMAT = 'jpg'
_C.DATASET.SELECT_DATA = False
_C.DATASET.ORG_IMG_SIZE = [720, 1280]
# training data augmentation
_C.DATASET.FLIP = True
_C.DATASET.SCALE_FACTOR = 0.25
_C.DATASET.ROT_FACTOR = 10
_C.DATASET.TRANSLATE = 0.1
_C.DATASET.SHEAR = 0.0
_C.DATASET.COLOR_RGB = False
_C.DATASET.HSV_H = 0.015 # image HSV-Hue augmentation (fraction)
_C.DATASET.HSV_S = 0.7 # image HSV-Saturation augmentation (fraction)
_C.DATASET.HSV_V = 0.4 # image HSV-Value augmentation (fraction)
# TODO: more augmet params to add
# train
_C.TRAIN = CN(new_allowed=True)
_C.TRAIN.LR0 = 0.001 # initial learning rate (SGD=1E-2, Adam=1E-3)
_C.TRAIN.LRF = 0.2 # final OneCycleLR learning rate (lr0 * lrf)
_C.TRAIN.WARMUP_EPOCHS = 3.0
_C.TRAIN.WARMUP_BIASE_LR = 0.1
_C.TRAIN.WARMUP_MOMENTUM = 0.8
_C.TRAIN.OPTIMIZER = 'adam'
_C.TRAIN.MOMENTUM = 0.937
_C.TRAIN.WD = 0.0005
_C.TRAIN.NESTEROV = True
_C.TRAIN.GAMMA1 = 0.99
_C.TRAIN.GAMMA2 = 0.0
_C.TRAIN.BEGIN_EPOCH = 0
_C.TRAIN.END_EPOCH = 240
_C.TRAIN.VAL_FREQ = 1
_C.TRAIN.BATCH_SIZE_PER_GPU =24
_C.TRAIN.SHUFFLE = True
_C.TRAIN.IOU_THRESHOLD = 0.2
_C.TRAIN.ANCHOR_THRESHOLD = 4.0
# if training 3 tasks end-to-end, set all parameters as True
# Alternating optimization
_C.TRAIN.SEG_ONLY = False # Only train two segmentation branchs
_C.TRAIN.DET_ONLY = False # Only train detection branch
_C.TRAIN.ENC_SEG_ONLY = False # Only train encoder and two segmentation branchs
_C.TRAIN.ENC_DET_ONLY = False # Only train encoder and detection branch
# Single task
_C.TRAIN.DRIVABLE_ONLY = False # Only train da_segmentation task
_C.TRAIN.LANE_ONLY = False # Only train ll_segmentation task
_C.TRAIN.DET_ONLY = False # Only train detection task
_C.TRAIN.PLOT = True #
# testing
_C.TEST = CN(new_allowed=True)
_C.TEST.BATCH_SIZE_PER_GPU = 24
_C.TEST.MODEL_FILE = ''
_C.TEST.SAVE_JSON = False
_C.TEST.SAVE_TXT = False
_C.TEST.PLOTS = True
_C.TEST.NMS_CONF_THRESHOLD = 0.001
_C.TEST.NMS_IOU_THRESHOLD = 0.6
def update_config(cfg, args):
cfg.defrost()
# cfg.merge_from_file(args.cfg)
if args.modelDir:
cfg.OUTPUT_DIR = args.modelDir
if args.logDir:
cfg.LOG_DIR = args.logDir
# if args.conf_thres:
# cfg.TEST.NMS_CONF_THRESHOLD = args.conf_thres
# if args.iou_thres:
# cfg.TEST.NMS_IOU_THRESHOLD = args.iou_thres
# cfg.MODEL.PRETRAINED = os.path.join(
# cfg.DATA_DIR, cfg.MODEL.PRETRAINED
# )
#
# if cfg.TEST.MODEL_FILE:
# cfg.TEST.MODEL_FILE = os.path.join(
# cfg.DATA_DIR, cfg.TEST.MODEL_FILE
# )
cfg.freeze()
| 29.873418
| 129
| 0.70678
|
d78d57336c4aee2ab077bfa01971cb295374c891
| 6,986
|
py
|
Python
|
backend/db.py
|
VrtanoskiAndrej/quack-hacks2020
|
235e402e8c43d8f04e0b3b116c6b0e924c843f69
|
[
"Apache-2.0"
] | null | null | null |
backend/db.py
|
VrtanoskiAndrej/quack-hacks2020
|
235e402e8c43d8f04e0b3b116c6b0e924c843f69
|
[
"Apache-2.0"
] | null | null | null |
backend/db.py
|
VrtanoskiAndrej/quack-hacks2020
|
235e402e8c43d8f04e0b3b116c6b0e924c843f69
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, abort, jsonify, request
from flask_sqlalchemy import SQLAlchemy
import numpy as np
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///users.db'
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
courses = db.Table(
'courses',
db.Column('course_id', db.Integer, db.ForeignKey('course.course_id'), primary_key=True),
db.Column('user_id', db.Integer, db.ForeignKey('user.user_id', primary_key = True))
)
interests = db.Table(
'interests',
db.Column('interest_id',db.Integer, db.ForeignKey('interest.interest_id', primary_key=True)),
db.Column('weight', db.Integer),
db.Column('user_id', db.Integer, db.ForeignKey('user.user_id', primary_key=True))
)
class User(db.Model):
user_id = db.Column(db.Integer, primary_key = True, autoincrement=True)
firstName = db.Column(db.String(100))
lastName = db.Column(db.String(100))
username = db.Column(db.String(100))
email = db.Column(db.String(100))
phoneNumber = db.Column(db.Integer)
courses = db.relationship('Course', secondary=courses, lazy='subquery',backref=db.backref('users',lazy='dynamic'))
interests = db.relationship('Interest', secondary=interests, lazy='subquery', backref=db.backref('users',lazy='dynamic'))
def __init__(self,firstName, lastName, username, email, phoneNumber):
self.firstName = firstName
self.lastName = lastName
self.username = username
self.email = email
self.phoneNumber = phoneNumber
self.email = email
class Course(db.Model):
course_id = db.Column(db.Integer, primary_key = True, autoincrement=True)
course_name = db.Column(db.String(100))
# _users = db.relationship('User', secondary=courses, backref=db.backref('courses_backref',lazy='dynamic'))
def __init__(self,course_name):
self.course_name = course_name
class Interest(db.Model):
interest_id = db.Column(db.Integer, primary_key = True, autoincrement=True)
interest = db.Column(db.String(100))
def __init__(self,interest):
self.interest = interest
all_Courses = [
'CS 1101','CS 1301',
'CS 1331', 'ENGL 1101',
'ENGL 1102', 'APPH 1040',
'APPH 1050', 'PSYCH 1101',
'MATH 1551', 'MATH 1552',
'MATH 1553', 'COE 201', 'HIST 2111']
for c in all_Courses:
course = Course(c)
db.session.add(course)
db.session.commit()
all_Interests = [
'Reading', 'Gaming',
'Rocket Building', 'Watching TV',
'Family Time', 'Movies',
'Fishing', 'Computer',
'Gardening', 'Renting Movies']
for i in all_Interests:
interest = Interest(i)
db.session.add(interest)
db.session.commit()
# usr = User("hailey", "nho38")
# db.session.add(usr)
# # User.query.filter_by(username='hailey').delete()
# cs1371 = Course("CS1371")
# db.session.commit()
# cs1371.users.append(usr)
# db.session.commit()
def courseDB():
query = db.session.query(Course,User).join(Course.users).all()
rows, cols = len(db.session.query(Course).all()), len(db.session.query(User).all())
courseDB = np.zeros((rows,cols))
for i in range (rows):
c = query[i][0].course_id - 1
r = query[i][1].user_id - 1
courseDB[r][c] = 1
return courseDB
def interestDB():
query = db.session.query(Interest,User).join(Interest.users).all()
rows, cols = len(db.session.query(Interest).all()), len(db.session.query(User).all())
interestDB = np.zeros((rows,cols))
for i in range (rows):
c = query[i][0].interest_id
r = query[i][1].user_id
interestDB[r][c] = 1
return interestDB
def findMaxIndex(array):
maximum = array[0]
maxIndex = 0
for index in range(0, len(array)):
if(array[index] > maximum):
maximum = array[index]
maxIndex = index
return maxIndex
# --------------------------------------
# register user
@app.route('/app/api/create', methods = ['POST'])
def create_user():
#Check for the data
if not request.json: #can add other conditions later to make sure it has certain data with: or not 'data' in request.json. Will need to check formats too
abort(400)
firstName = request.form.get('firstName')
lastName = request.form.get('lastName')
username = request.form.get('username')
email = request.form.get('email')
phoneNumber = request.form.get('phoneNumber')
usr = User(firstName, lastName, username, email, phoneNumber)
db.session.add(usr)
db.session.commit()
courses = request.form.get('courses')
interests = request.form.get('hobbies')
course_idx = [course["id"] for course in courses]
interest_idx = [interest["id"] for interest in interests]
for c in course_idx:
course = Course.query.filter(course_id=c).all()
course[0].users.append(usr)
db.session.commit()
for i in interest_idx:
interest = Interest.query.filter(interest_id=i).all()
interest[0].users.append(usr)
db.session.commit()
return jsonify({'created': True}), 201
# matching
@app.route('/app/api/recommend/<int:userid>', methods = ['GET'])
def recommend_matches(userid):
user = [{}] #get the user's information and store it in a list
#abort if this user has no information --- not sure if this is totally Needed
if len(user) == 0:
abort(404)
#define matrices containing the other user's course and interests as well as single row matrices representing the user's
courseDB = np.array(courseDB())
interestDB = np.array(interestDB())
user_course = np.array(courseDB[userid - 1])
user_interest = np.array(interestDB[userid - 1])
#use masking to multiply correspondng matrices together to get user scores for each category
courseScores = np.matmul(courseDB, user_course)
interestScores = np.matmul(interestDB, user_interest)
finalScores = np.add(courseScores, interestScores)
maxScore = np.amax(finalScores)
maxID = findMaxIndex(finalScores)
match_User = User.query.filter(user_id=maxID + 1).all()
firstName = match_User[0].firstName
lastName = match_User[0].lastName
email = match_User[0].email
phoneNumber = match_User[0].phoneNumber
return jsonify({'firstName' : firstName,'lastName' : lastName, 'email': email, 'phoneNumber' : phoneNumber})
# @app.teardown_appcontext
# def shutdown_session(exception=None):
# db.session.remove()
if __name__ == "__main__":
db.create_all()
app.run()
# result = [r for r in Course.query.filter(Course.users.any(id=0)).all()]
# result = [r for r in db.session.query(User).all()]
# result = db.session.query(Course).join(Course.users).filter(User.user_id==1).all()
# result = db.session.query(Course,User).join(Course.users).filter(User.user_id==1).all()
# result = [r for r in Course.query.join(Course.users).filter_by(user_id=1).all()]
# courseDB = courseDB()
# print(courseDB)
# from pdb import set_trace; set_trace()
| 33.912621
| 160
| 0.667764
|
798ad9f2710dcb47a192414e8ff15e3afeff3f2f
| 811
|
py
|
Python
|
haminfo/db/alembic_helpers.py
|
hemna/haminfo
|
86db93536075999afa086fda84f10c1911af0375
|
[
"Apache-2.0"
] | null | null | null |
haminfo/db/alembic_helpers.py
|
hemna/haminfo
|
86db93536075999afa086fda84f10c1911af0375
|
[
"Apache-2.0"
] | null | null | null |
haminfo/db/alembic_helpers.py
|
hemna/haminfo
|
86db93536075999afa086fda84f10c1911af0375
|
[
"Apache-2.0"
] | null | null | null |
from alembic import op
from sqlalchemy import engine_from_config
from sqlalchemy.engine import reflection
def table_does_not_exist(table, schema=None):
config = op.get_context().config
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix='sqlalchemy.')
insp = reflection.Inspector.from_engine(engine)
return insp.has_table(table, schema) is False
def table_has_column(table, column):
config = op.get_context().config
engine = engine_from_config(
config.get_section(config.config_ini_section), prefix='sqlalchemy.')
insp = reflection.Inspector.from_engine(engine)
has_column = False
for col in insp.get_columns(table):
if column not in col['name']:
continue
has_column = True
return has_column
| 32.44
| 76
| 0.72873
|
a7cc9908ff406b9b31fb55c690cc626ef19d69b5
| 32,283
|
py
|
Python
|
test/functional/fundrawtransaction.py
|
peraktechnology/perakcoin
|
502fb07b9d11ff1528577c7d2afe0943f5f20be0
|
[
"MIT"
] | 3
|
2018-09-27T04:01:02.000Z
|
2021-04-20T07:51:37.000Z
|
test/functional/fundrawtransaction.py
|
peraktechnology/perakcoin
|
502fb07b9d11ff1528577c7d2afe0943f5f20be0
|
[
"MIT"
] | null | null | null |
test/functional/fundrawtransaction.py
|
peraktechnology/perakcoin
|
502fb07b9d11ff1528577c7d2afe0943f5f20be0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework, BITCOIND_PROC_WAIT_TIMEOUT
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-5, "changeAddress must be a valid perakcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.stop_node(2)
self.stop_node(3)
self.nodes[1].encryptwallet("test")
self.bitcoind_processes[1].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
self.nodes = self.start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_jsonrpc(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 44.528276
| 223
| 0.570424
|
f6a8ae663e3df289f236496e09e3ee18acf8a078
| 6,758
|
py
|
Python
|
terra_notebook_utils/workflows.py
|
mitchac/terra-notebook-utils
|
4d3bd83f589b2219d028ce2b16027dd8da1b8749
|
[
"MIT"
] | null | null | null |
terra_notebook_utils/workflows.py
|
mitchac/terra-notebook-utils
|
4d3bd83f589b2219d028ce2b16027dd8da1b8749
|
[
"MIT"
] | null | null | null |
terra_notebook_utils/workflows.py
|
mitchac/terra-notebook-utils
|
4d3bd83f589b2219d028ce2b16027dd8da1b8749
|
[
"MIT"
] | null | null | null |
"""
Workflow information
"""
import json
import logging
from datetime import datetime
from functools import lru_cache
from typing import Dict, Generator, Optional, Tuple
from firecloud import fiss
from terra_notebook_utils import WORKSPACE_NAME, WORKSPACE_GOOGLE_PROJECT, costs
from terra_notebook_utils.utils import concurrent_recursion, js_get
logger = logging.getLogger(__name__)
date_format = "%Y-%m-%dT%H:%M:%S.%fZ"
class TNUCostException(Exception):
pass
def list_submissions(workspace_name: Optional[str]=WORKSPACE_NAME,
workspace_namespace: Optional[str]=WORKSPACE_GOOGLE_PROJECT) -> Generator[dict, None, None]:
resp = fiss.fapi.list_submissions(workspace_namespace, workspace_name)
resp.raise_for_status()
for s in resp.json():
yield s
@lru_cache()
def get_submission(submission_id: str,
workspace_name: Optional[str]=WORKSPACE_NAME,
workspace_namespace: Optional[str]=WORKSPACE_GOOGLE_PROJECT) -> dict:
"""
Get information about a submission, including member workflows
"""
resp = fiss.fapi.get_submission(workspace_namespace, workspace_name, submission_id)
resp.raise_for_status()
return resp.json()
@lru_cache()
def get_workflow(submission_id: str,
workflow_id: str,
workspace_name: Optional[str]=WORKSPACE_NAME,
workspace_namespace: Optional[str]=WORKSPACE_GOOGLE_PROJECT) -> dict:
"""
Get information about a workflow
"""
resp = fiss.fapi.get_workflow_metadata(workspace_namespace, workspace_name, submission_id, workflow_id)
resp.raise_for_status()
return resp.json()
def get_all_workflows(submission_id: str,
workspace: Optional[str]=WORKSPACE_NAME,
workspace_namespace: Optional[str]=WORKSPACE_GOOGLE_PROJECT) -> Dict[str, dict]:
"""
Retrieve all workflows, and workflow metadata, for `submission_id`, including sub-workflows.
"""
workflows_metadata = dict()
def get_metadata_and_subworkflows(workflow_id: str):
wf_medadata = get_workflow(submission_id, workflow_id, workspace, workspace_namespace)
workflows_metadata[workflow_id] = wf_medadata
subworkflows = {call_metadata['subWorkflowId']
for call_metadata_list in wf_medadata['calls'].values()
for call_metadata in call_metadata_list
if "subWorkflowId" in call_metadata}
return subworkflows
submission = get_submission(submission_id, workspace, workspace_namespace)
initial_workflow_ids = {wf['workflowId'] for wf in submission['workflows']}
concurrent_recursion(get_metadata_and_subworkflows, initial_workflow_ids)
return workflows_metadata
def estimate_workflow_cost(workflow_id: str, workflow_metadata: dict) -> Generator[dict, None, None]:
for call_name, call_metadata_list in workflow_metadata['calls'].items():
for call_metadata in call_metadata_list:
if "subWorkflowId" in call_metadata:
# subworkflows need to be looked up and estimated separately
continue
try:
task_name = call_name.split(".")[1]
call_cached = bool(int(js_get("callCaching.hit", call_metadata, default=0)))
if call_cached:
cost, cpus, memory_gb, runtime, disk_size_gb = 0.0, 0, 0.0, 0.0, 0.0
else:
cpus, memory_gb = _parse_machine_type(js_get("jes.machineType", call_metadata))
# Assume that Google Lifesciences Pipelines API uses N1 custome machine type
completed = js_get("executionStatus", call_metadata)
if completed == "Done":
instance_start = datetime.strptime(js_get("executionEvents[?contains(description,'assigned')].startTime | [0]", call_metadata), date_format)
instance_end = datetime.strptime(js_get("executionEvents[?description == 'Worker released'].endTime | [0]", call_metadata), date_format)
runtime = (instance_end - instance_start).total_seconds()
elif completed == "RetryableFailure" or completed == "Failed":
instance_start = datetime.strptime(js_get("executionEvents[?description == 'RunningJob'].startTime | [0]", call_metadata), date_format)
instance_end = datetime.strptime(js_get("executionEvents[?description == 'RunningJob'].endTime | [0]", call_metadata), date_format)
runtime = (instance_end - instance_start).total_seconds()
else:
instance_start = 0
instance_end = 0
runtime = 0
preemptible = bool(int(js_get("runtimeAttributes.preemptible", call_metadata)))
disk_description = js_get("runtimeAttributes.disks", call_metadata, default="")
if disk_description.startswith("local-disk"):
_, size_gb, _ = disk_description.split()
disk_size_gb = float(size_gb)
else:
disk_size_gb = 1.0 # Guess 1GB when disk information is unavailable
cost = (costs.GCPCustomN1Cost.estimate(cpus, memory_gb, runtime, preemptible)
+ costs.PersistentDisk.estimate(disk_size_gb, runtime))
yield dict(task_name=task_name,
cost=cost,
number_of_cpus=cpus,
memory=memory_gb,
disk=disk_size_gb,
completed=completed,
instance_start=instance_start,
instance_end=instance_end,
duration=runtime,
call_cached=call_cached)
except (KeyError, TNUCostException) as exc:
logger.warning(f"Unable to estimate costs for workflow {workflow_id}: "
f"{exc.args[0]}")
def _parse_machine_type(machine_type: str) -> Tuple[int, float]:
parts = machine_type.split("-", 2)
if 3 != len(parts) or "custom" != parts[0]:
raise TNUCostException(f"Cannot estimate costs for machine type '{machine_type}'"
"Please contact terra-notebook-utils maintainers to add support")
try:
cpus, memory_gb = int(parts[1]), float(parts[2]) / 1024
return cpus, memory_gb
except ValueError as exc:
raise TNUCostException(f"Cannot parse cpus and memory from '{machine_type}'") from exc
| 49.691176
| 164
| 0.627701
|
ca0a5f94547bcf5fdce72fa4c967c2175b862d8c
| 2,898
|
py
|
Python
|
tests/test_repeat_plugin.py
|
jeroen-dhollander/python-paginator
|
8c60ae6dd64a7440feda4561440117d9bebc3ae7
|
[
"CC0-1.0"
] | 3
|
2018-09-26T20:07:51.000Z
|
2021-07-10T11:59:44.000Z
|
tests/test_repeat_plugin.py
|
jeroen-dhollander/python-paginator
|
8c60ae6dd64a7440feda4561440117d9bebc3ae7
|
[
"CC0-1.0"
] | 1
|
2020-02-19T10:15:38.000Z
|
2020-03-14T21:48:55.000Z
|
tests/test_repeat_plugin.py
|
jeroen-dhollander/python-more-or-less
|
8c60ae6dd64a7440feda4561440117d9bebc3ae7
|
[
"CC0-1.0"
] | null | null | null |
from more_or_less import more_plugins
from more_or_less.input import Input
from more_or_less.more_plugin import MorePlugin
from more_or_less.output import Output
from more_or_less.page import Page
from more_or_less.search_plugin import SearchPage
from tests.test_more_page_builder import TestUtil
from unittest.mock import Mock, call
_UNREPEATABLE_PAGE_KEY = 'U'
class TestRepeatPlugin(TestUtil):
def setUp(self):
self.input = Mock(Input)
self.output = Mock(Output)
plugins = more_plugins.get() + [UnrepeatablePagePlugin()]
self.builder = self.get_more_page_builder(
input=self.input,
output=self.output,
plugins=plugins)
def fill_page(self, page):
while not page.is_full():
page.add_line('line \n')
def test_can_repeat_enter(self):
self.input.get_character.side_effect = ['5', '\n']
page = self.builder.build_next_page()
self.fill_page(page)
self.input.get_character.side_effect = ['.']
repeated_page = self.builder.build_next_page()
self.assertIsPageOfHeight(repeated_page, 5)
self.assertFalse(repeated_page.is_full())
def test_can_repeat_space(self):
self.input.get_character.side_effect = [' ']
page = self.builder.build_next_page()
self.fill_page(page)
self.input.get_character.side_effect = ['.']
repeated_page = self.builder.build_next_page()
self.assertIsPageOfHeight(repeated_page, page.height)
def test_can_repeat_search(self):
self.input.get_character.side_effect = ['5', '/']
self.input.prompt.return_value = 'the pattern'
self.builder.build_next_page()
self.input.get_character.side_effect = ['.']
repeated_page = self.builder.build_next_page()
self.assertIsPageOfType(repeated_page, SearchPage)
self.assertEqual('the pattern', repeated_page.pattern)
self.assertEqual(5, repeated_page.required_match_count)
def test_prints_warning_on_unrepeatable_command(self):
self.input.get_character.side_effect = [_UNREPEATABLE_PAGE_KEY]
self.builder.build_next_page()
self.input.get_character.side_effect = ['.', ' ', ' ']
self.builder.build_next_page()
self.input.assert_has_calls([
call.get_character('--More--'),
call.get_character('--Previous command can not be repeated--'),
])
class UnrepeatablePage(Page):
def is_full(self):
return False
def add_line(self, line):
pass
class UnrepeatablePagePlugin(MorePlugin):
'''
Plugin that returns a page of type 'DefaultPage'
'''
def get_keys(self):
return [_UNREPEATABLE_PAGE_KEY]
def build_page(self, page_builder, key_pressed, arguments):
return UnrepeatablePage()
def get_help(self):
pass
| 28.98
| 75
| 0.676329
|
33ab4f42fe0fc6aab55c05e4e991f56fbbc9753c
| 3,574
|
py
|
Python
|
JavaExtractor/extract.py
|
jjhenkel/code2seq
|
dd80fff02aa8cd9d05f6781a8309f8397e69f669
|
[
"MIT"
] | null | null | null |
JavaExtractor/extract.py
|
jjhenkel/code2seq
|
dd80fff02aa8cd9d05f6781a8309f8397e69f669
|
[
"MIT"
] | null | null | null |
JavaExtractor/extract.py
|
jjhenkel/code2seq
|
dd80fff02aa8cd9d05f6781a8309f8397e69f669
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import itertools
import multiprocessing
import os
import shutil
import subprocess
import sys
from argparse import ArgumentParser
from threading import Timer
def get_immediate_subdirectories(a_dir):
return [(os.path.join(a_dir, name)) for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
TMP_DIR = ""
def ParallelExtractDir(args, dir):
ExtractFeaturesForDir(args, dir, "")
def ExtractFeaturesForDir(args, dir, prefix):
command = ['java', '-Xmx100g', '-Xss500M', '-XX:MaxNewSize=60g', '-cp', args.jar, 'JavaExtractor.App',
'--max_path_length', str(args.max_path_length), '--max_path_width', str(args.max_path_width),
'--dir', dir, '--num_threads', str(args.num_threads)]
# print command
# os.system(command)
kill = lambda process: process.kill()
outputFileName = TMP_DIR + prefix + dir.split('/')[-1]
failed = False
with open(outputFileName, 'a') as outputFile:
sleeper = subprocess.Popen(command, stdout=outputFile, stderr=subprocess.PIPE)
timer = Timer(60 * 60 * 600, kill, [sleeper])
try:
timer.start()
stdout, stderr = sleeper.communicate()
finally:
timer.cancel()
if sleeper.poll() == 0:
if len(stderr) > 0:
print(stderr.decode(), file=sys.stderr)
else:
print('dir: ' + str(dir) + ' was not completed in time', file=sys.stderr)
if len(stderr) > 0:
print(stderr.decode(), file=sys.stderr)
failed = True
subdirs = get_immediate_subdirectories(dir)
for subdir in subdirs:
ExtractFeaturesForDir(args, subdir, prefix + dir.split('/')[-1] + '_')
if failed:
if os.path.exists(outputFileName):
os.remove(outputFileName)
def ExtractFeaturesForDirsList(args, dirs):
global TMP_DIR
TMP_DIR = "./tmp/feature_extractor%d/" % (os.getpid())
if os.path.exists(TMP_DIR):
shutil.rmtree(TMP_DIR, ignore_errors=True)
os.makedirs(TMP_DIR)
try:
p = multiprocessing.Pool(6)
p.starmap(ParallelExtractDir, zip(itertools.repeat(args), dirs))
# for dir in dirs:
# ExtractFeaturesForDir(args, dir, '')
output_files = os.listdir(TMP_DIR)
for f in output_files:
os.system("cat %s/%s" % (TMP_DIR, f))
finally:
shutil.rmtree(TMP_DIR, ignore_errors=True)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-maxlen", "--max_path_length", dest="max_path_length", required=False, default=8)
parser.add_argument("-maxwidth", "--max_path_width", dest="max_path_width", required=False, default=2)
parser.add_argument("-threads", "--num_threads", dest="num_threads", required=False, default=64)
parser.add_argument("-j", "--jar", dest="jar", required=True)
parser.add_argument("-dir", "--dir", dest="dir", required=False)
parser.add_argument("-file", "--file", dest="file", required=False)
args = parser.parse_args()
if args.file is not None:
command = 'java -cp ' + args.jar + ' JavaExtractor.App --max_path_length ' + \
str(args.max_path_length) + ' --max_path_width ' + str(args.max_path_width) + ' --file ' + args.file
os.system(command)
elif args.dir is not None:
# subdirs = get_immediate_subdirectories(args.dir)
# if len(subdirs) == 0:
# subdirs = [args.dir]
ExtractFeaturesForDirsList(args, [args.dir])
| 36.469388
| 118
| 0.62563
|
d0a4ad5c765853b97ec6d3f2739414919a4f3aa7
| 5,819
|
py
|
Python
|
bayes_opt/bayesian_optimization.py
|
heartfelt-tech/BayesianOptimization
|
fb94a0239090e4d8de34c04d445b151ed9cb5dd4
|
[
"MIT"
] | null | null | null |
bayes_opt/bayesian_optimization.py
|
heartfelt-tech/BayesianOptimization
|
fb94a0239090e4d8de34c04d445b151ed9cb5dd4
|
[
"MIT"
] | null | null | null |
bayes_opt/bayesian_optimization.py
|
heartfelt-tech/BayesianOptimization
|
fb94a0239090e4d8de34c04d445b151ed9cb5dd4
|
[
"MIT"
] | null | null | null |
import warnings
import numpy as np
from .target_space import TargetSpace
from .event import Events, DEFAULT_EVENTS
from .logger import _get_default_logger
from .util import UtilityFunction, acq_max, ensure_rng
from sklearn.gaussian_process.kernels import Matern
from sklearn.gaussian_process import GaussianProcessRegressor
class Queue:
def __init__(self):
self._queue = []
@property
def empty(self):
return len(self) == 0
def __len__(self):
return len(self._queue)
def __next__(self):
if self.empty:
raise StopIteration("Queue is empty, no more objects to retrieve.")
obj = self._queue[0]
self._queue = self._queue[1:]
return obj
def next(self):
return self.__next__()
def add(self, obj):
"""Add object to end of queue."""
self._queue.append(obj)
class Observable(object):
"""
Inspired/Taken from
https://www.protechtraining.com/blog/post/879#simple-observer
"""
def __init__(self, events):
# maps event names to subscribers
# str -> dict
self._events = {event: dict() for event in events}
def get_subscribers(self, event):
return self._events[event]
def subscribe(self, event, subscriber, callback=None):
if callback == None:
callback = getattr(subscriber, 'update')
self.get_subscribers(event)[subscriber] = callback
def unsubscribe(self, event, subscriber):
del self.get_subscribers(event)[subscriber]
def dispatch(self, event):
for _, callback in self.get_subscribers(event).items():
callback(event, self)
class BayesianOptimization(Observable):
def __init__(self, f, pbounds, ptypes=None, random_state=None, verbose=2):
""""""
self._random_state = ensure_rng(random_state)
# Data structure containing the function to be optimized, the bounds of
# its domain, and a record of the evaluations we have done so far
self._space = TargetSpace(f, pbounds, ptypes, random_state)
# queue
self._queue = Queue()
# Internal GP regressor
self._gp = GaussianProcessRegressor(
kernel=Matern(nu=2.5),
alpha=1e-6,
normalize_y=True,
n_restarts_optimizer=25,
random_state=self._random_state,
)
self._verbose = verbose
super(BayesianOptimization, self).__init__(events=DEFAULT_EVENTS)
@property
def space(self):
return self._space
@property
def max(self):
return self._space.max()
@property
def res(self):
return self._space.res()
def register(self, params, target):
"""Expect observation with known target"""
self._space.register(params, target)
self.dispatch(Events.OPTMIZATION_STEP)
def probe(self, params, lazy=True):
"""Probe target of x"""
if lazy:
self._queue.add(params)
else:
self._space.probe(params)
self.dispatch(Events.OPTMIZATION_STEP)
def suggest(self, utility_function):
"""Most promissing point to probe next"""
if len(self._space) == 0:
return self._space.array_to_params(self._space.random_sample())
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self._gp.fit(self._space.params, self._space.target)
# Finding argmax of the acquisition function.
suggestion = acq_max(
ac=utility_function.utility,
gp=self._gp,
y_max=self._space.target.max(),
bounds=self._space.bounds,
btypes=self._space.btypes,
random_state=self._random_state
)
return self._space.array_to_params(suggestion)
def _prime_queue(self, init_points):
"""Make sure there's something in the queue at the very beginning."""
if self._queue.empty and self._space.empty:
init_points = max(init_points, 1)
for _ in range(init_points):
self._queue.add(self._space.random_sample())
def _prime_subscriptions(self):
if not any([len(subs) for subs in self._events.values()]):
_logger = _get_default_logger(self._verbose)
self.subscribe(Events.OPTMIZATION_START, _logger)
self.subscribe(Events.OPTMIZATION_STEP, _logger)
self.subscribe(Events.OPTMIZATION_END, _logger)
def maximize(self,
init_points=5,
n_iter=25,
acq='ucb',
kappa=2.576,
xi=0.0,
**gp_params):
"""Mazimize your function"""
self._prime_subscriptions()
self.dispatch(Events.OPTMIZATION_START)
self._prime_queue(init_points)
self.set_gp_params(**gp_params)
util = UtilityFunction(kind=acq, kappa=kappa, xi=xi)
iteration = 0
while not self._queue.empty or iteration < n_iter:
try:
x_probe = next(self._queue)
except StopIteration:
x_probe = self.suggest(util)
iteration += 1
self.probe(x_probe, lazy=False)
self.dispatch(Events.OPTMIZATION_END)
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
self._space.set_bounds(new_bounds)
def set_gp_params(self, **params):
self._gp.set_params(**params)
| 30.307292
| 79
| 0.616085
|
fc2de1737f3c4598d0df7a96e2a609a95442911f
| 10,078
|
py
|
Python
|
other_experiments/spatial_sne_all_with_skip_connections(Copy over effect)/few_shot_segmentor_sne_position_all_type_spatial_skipconn_conditioner_baseline.py
|
a-parida12/few-shot-segmentation
|
0f678845cec598e635b41ec6d8c2db1585c6ea55
|
[
"MIT"
] | 1
|
2021-12-29T07:30:21.000Z
|
2021-12-29T07:30:21.000Z
|
other_experiments/spatial_sne_all_with_skip_connections(Copy over effect)/few_shot_segmentor_sne_position_all_type_spatial_skipconn_conditioner_baseline.py
|
a-parida12/few-shot-segmentation
|
0f678845cec598e635b41ec6d8c2db1585c6ea55
|
[
"MIT"
] | null | null | null |
other_experiments/spatial_sne_all_with_skip_connections(Copy over effect)/few_shot_segmentor_sne_position_all_type_spatial_skipconn_conditioner_baseline.py
|
a-parida12/few-shot-segmentation
|
0f678845cec598e635b41ec6d8c2db1585c6ea55
|
[
"MIT"
] | null | null | null |
"""Few-Shot_learning Segmentation"""
import numpy as np
import torch
import torch.nn as nn
from nn_common_modules import modules as sm
from data_utils import split_batch
# import torch.nn.functional as F
from squeeze_and_excitation import squeeze_and_excitation as se
class SDnetConditioner(nn.Module):
"""
A conditional branch of few shot learning regressing the parameters for the segmentor
"""
def __init__(self, params):
super(SDnetConditioner, self).__init__()
se_block_type = se.SELayer.SSE
params['num_channels'] = 2
params['num_filters'] = 16
self.encode1 = sm.SDnetEncoderBlock(params)
self.squeeze_conv_e1 = nn.Conv2d(in_channels=params['num_filters'], out_channels=1,
kernel_size=(1, 1),
padding=(0, 0),
stride=1)
params['num_channels'] = 16
self.encode2 = sm.SDnetEncoderBlock(params)
self.squeeze_conv_e2 = nn.Conv2d(in_channels=params['num_filters'], out_channels=1,
kernel_size=(1, 1),
padding=(0, 0),
stride=1)
self.encode3 = sm.SDnetEncoderBlock(params)
self.squeeze_conv_e3 = nn.Conv2d(in_channels=params['num_filters'], out_channels=1,
kernel_size=(1, 1),
padding=(0, 0),
stride=1)
self.encode4 = sm.SDnetEncoderBlock(params)
self.squeeze_conv_e4 = nn.Conv2d(in_channels=params['num_filters'], out_channels=1,
kernel_size=(1, 1),
padding=(0, 0),
stride=1)
self.bottleneck = sm.GenericBlock(params)
self.squeeze_conv_bn = nn.Conv2d(in_channels=params['num_filters'], out_channels=1,
kernel_size=(1, 1),
padding=(0, 0),
stride=1)
params['num_channels'] = 16+16
self.decode1 = sm.SDnetDecoderBlock(params)
self.squeeze_conv_d1 = nn.Conv2d(in_channels=params['num_filters'], out_channels=1,
kernel_size=(1, 1),
padding=(0, 0),
stride=1)
self.decode2 = sm.SDnetDecoderBlock(params)
self.squeeze_conv_d2 = nn.Conv2d(in_channels=params['num_filters'], out_channels=1,
kernel_size=(1, 1),
padding=(0, 0),
stride=1)
self.decode3 = sm.SDnetDecoderBlock(params)
self.squeeze_conv_d3 = nn.Conv2d(in_channels=params['num_filters'], out_channels=1,
kernel_size=(1, 1),
padding=(0, 0),
stride=1)
self.decode4 = sm.SDnetDecoderBlock(params)
self.squeeze_conv_d4 = nn.Conv2d(in_channels=params['num_filters'], out_channels=1,
kernel_size=(1, 1),
padding=(0, 0),
stride=1)
params['num_channels'] = 16
self.classifier = sm.ClassifierBlock(params)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
e1, out1, ind1 = self.encode1(input)
e_w1 = self.sigmoid(self.squeeze_conv_e1(e1))
e2, out2, ind2 = self.encode2(e1)
e_w2 = self.sigmoid(self.squeeze_conv_e2(e2))
e3, out3, ind3 = self.encode3(e2)
e_w3 = self.sigmoid(self.squeeze_conv_e3(e3))
e4, out4, ind4 = self.encode3(e3)
e_w4 = self.sigmoid(self.squeeze_conv_e4(e4))
bn = self.bottleneck(e4)
bn_w4 = self.sigmoid(self.squeeze_conv_bn(bn))
d4 = self.decode4(bn, out4, ind4)
d_w4 = self.sigmoid(self.squeeze_conv_d4(d4))
d3 = self.decode3(d4, out3, ind3)
d_w3 = self.sigmoid(self.squeeze_conv_d3(d3))
d2 = self.decode2(d3, out2, ind2)
d_w2 = self.sigmoid(self.squeeze_conv_d2(d2))
d1 = self.decode1(d2, out1, ind1)
d_w1 = self.sigmoid(self.squeeze_conv_d1(d1))
space_weights = (e_w1, e_w2, e_w3, e_w4, bn_w4, d_w4, d_w3, d_w2, d_w1, None)
channel_weights = (None, None, None, None)
return space_weights, channel_weights
class SDnetSegmentor(nn.Module):
"""
Segmentor Code
param ={
'num_channels':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':1
'se_block': True,
'drop_out':0
}
"""
def __init__(self, params):
super(SDnetSegmentor, self).__init__()
se_block_type = se.SELayer.SSE
params['num_channels'] = 1
params['num_filters'] = 64
self.encode1 = sm.SDnetEncoderBlock(params)
params['num_channels'] = 64
self.encode2 = sm.SDnetEncoderBlock(params)
self.encode3 = sm.SDnetEncoderBlock(params)
self.encode4 = sm.SDnetEncoderBlock(params)
self.bottleneck = sm.GenericBlock(params)
params['num_channels'] = 64
self.decode1 = sm.SDnetDecoderBlock(params)
self.decode2 = sm.SDnetDecoderBlock(params)
self.decode3 = sm.SDnetDecoderBlock(params)
self.decode4 = sm.SDnetDecoderBlock(params)
params['num_channels'] = 64
self.classifier = sm.ClassifierBlock(params)
self.soft_max = nn.Softmax2d()
# self.sigmoid = nn.Sigmoid()
def forward(self, inpt, weights=None):
space_weights, channel_weights = weights
# e_w1, e_w2, e_w3, bn_w, d_w3, d_w2, d_w1, cls_w = weights if weights is not None else (
# None, None, None, None, None, None, None, None)
e_w1, e_w2, e_w3, e_w4, bn_w, d_w4, d_w3, d_w2, d_w1, cls_w = space_weights if space_weights is not None else (
None, None, None, None, None, None, None, None, None, None)
e_c1, e_c2, d_c1, d_c2 = channel_weights
# if weights is not None:
# bn_w, d_w4, d_w3, d_w2, d_w1, cls_w = bn_w * 50, d_w4 * 50, d_w3 * 50, d_w2 * 50, d_w1 * 50, cls_w * 50
e1, _, ind1 = self.encode1(inpt)
if e_w1 is not None:
e1 = torch.mul(e1, e_w1)
e2, _, ind2 = self.encode2(e1)
if e_w2 is not None:
e2 = torch.mul(e2, e_w2)
e3, _, ind3 = self.encode3(e2)
if e_w3 is not None:
e3 = torch.mul(e3, e_w3)
e4, _, ind4 = self.encode4(e3)
if e_w4 is not None:
e4 = torch.mul(e4, e_w4)
bn = self.bottleneck(e4)
if bn_w is not None:
bn = torch.mul(bn, bn_w)
d4 = self.decode4(bn, None, ind4)
if d_w4 is not None:
d4 = torch.mul(d4, d_w4)
d3 = self.decode3(d4, None, ind3)
if d_w3 is not None:
d3 = torch.mul(d3, d_w3)
d2 = self.decode2(d3, None, ind2)
if d_w2 is not None:
d2 = torch.mul(d2, d_w2)
d1 = self.decode1(d2, None, ind1)
if d_w1 is not None:
d1 = torch.mul(d1, d_w1)
# d1_1 = torch.cat((d1, inpt), dim=1)
logit = self.classifier.forward(d1)
if cls_w is not None:
logit = torch.mul(logit, cls_w)
logit = self.soft_max(logit)
return logit
class FewShotSegmentorDoubleSDnet(nn.Module):
'''
Class Combining Conditioner and Segmentor for few shot learning
'''
def __init__(self, params):
super(FewShotSegmentorDoubleSDnet, self).__init__()
self.conditioner = SDnetConditioner(params)
self.segmentor = SDnetSegmentor(params)
def forward(self, input1, input2):
weights = self.conditioner(input1)
segment = self.segmentor(input2, weights)
return segment
def enable_test_dropout(self):
attr_dict = self.__dict__['_modules']
for i in range(1, 5):
encode_block, decode_block = attr_dict['encode' + str(i)], attr_dict['decode' + str(i)]
encode_block.drop_out = encode_block.drop_out.apply(nn.Module.train)
decode_block.drop_out = decode_block.drop_out.apply(nn.Module.train)
@property
def is_cuda(self):
"""
Check if model parameters are allocated on the GPU.
"""
return next(self.parameters()).is_cuda
def save(self, path):
"""
Save model with its parameters to the given path. Conventionally the
path should end with "*.model".
Inputs:
- path: path string
"""
print('Saving model... %s' % path)
torch.save(self, path)
def predict(self, X, y, query_label, device=0, enable_dropout=False):
"""
Predicts the outout after the model is trained.
Inputs:
- X: Volume to be predicted
"""
self.eval()
input1, input2, y2 = split_batch(X, y, query_label)
input1, input2, y2 = to_cuda(input1, device), to_cuda(input2, device), to_cuda(y2, device)
if enable_dropout:
self.enable_test_dropout()
with torch.no_grad():
out = self.forward(input1, input2)
# max_val, idx = torch.max(out, 1)
idx = out > 0.5
idx = idx.data.cpu().numpy()
prediction = np.squeeze(idx)
del X, out, idx
return prediction
def to_cuda(X, device):
if type(X) is np.ndarray:
X = torch.tensor(X, requires_grad=False).type(torch.FloatTensor).cuda(device, non_blocking=True)
elif type(X) is torch.Tensor and not X.is_cuda:
X = X.type(torch.FloatTensor).cuda(device, non_blocking=True)
return X
| 37.604478
| 119
| 0.552788
|
c9bd9725db7eb31d548677647e4e1f137c6cfdfe
| 6,031
|
py
|
Python
|
venv/Lib/site-packages/pygments/style.py
|
star10919/drf
|
77c005794087484d72ffc0d76612a6ac9845821e
|
[
"BSD-3-Clause"
] | 9
|
2019-05-29T23:50:28.000Z
|
2021-01-29T20:51:05.000Z
|
venv/Lib/site-packages/pygments/style.py
|
star10919/drf
|
77c005794087484d72ffc0d76612a6ac9845821e
|
[
"BSD-3-Clause"
] | 5
|
2021-02-27T21:31:47.000Z
|
2021-04-05T21:49:38.000Z
|
marlinkpy/venv/Lib/site-packages/pygments/style.py
|
amilinovic/Test
|
4cfdc12a0efeebd636c4982ef90dad65a63b842b
|
[
"MIT"
] | 3
|
2020-05-25T02:38:08.000Z
|
2021-01-20T06:23:06.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.style
~~~~~~~~~~~~~~
Basic style object.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.token import Token, STANDARD_TYPES
# Default mapping of ansixxx to RGB colors.
_ansimap = {
# dark
'ansiblack': '000000',
'ansired': '7f0000',
'ansigreen': '007f00',
'ansiyellow': '7f7fe0',
'ansiblue': '00007f',
'ansimagenta': '7f007f',
'ansicyan': '007f7f',
'ansigray': 'e5e5e5',
# normal
'ansibrightblack': '555555',
'ansibrightred': 'ff0000',
'ansibrightgreen': '00ff00',
'ansibrightyellow': 'ffff00',
'ansibrightblue': '0000ff',
'ansibrightmagenta': 'ff00ff',
'ansibrightcyan': '00ffff',
'ansiwhite': 'ffffff',
}
# mapping of deprecated #ansixxx colors to new color names
_deprecated_ansicolors = {
# dark
'#ansiblack': 'ansiblack',
'#ansidarkred': 'ansired',
'#ansidarkgreen': 'ansigreen',
'#ansibrown': 'ansiyellow',
'#ansidarkblue': 'ansiblue',
'#ansipurple': 'ansimagenta',
'#ansiteal': 'ansicyan',
'#ansilightgray': 'ansigray',
# normal
'#ansidarkgray': 'ansibrightblack',
'#ansired': 'ansibrightred',
'#ansigreen': 'ansibrightgreen',
'#ansiyellow': 'ansibrightyellow',
'#ansiblue': 'ansibrightblue',
'#ansifuchsia': 'ansibrightmagenta',
'#ansiturquoise': 'ansibrightcyan',
'#ansiwhite': 'ansiwhite',
}
ansicolors = set(_ansimap)
class StyleMeta(type):
def __new__(mcs, name, bases, dct):
obj = type.__new__(mcs, name, bases, dct)
for token in STANDARD_TYPES:
if token not in obj.styles:
obj.styles[token] = ''
def colorformat(text):
if text in ansicolors:
return text
if text[0:1] == '#':
col = text[1:]
if len(col) == 6:
return col
elif len(col) == 3:
return col[0] * 2 + col[1] * 2 + col[2] * 2
elif text == '':
return ''
elif text.startswith('var') or text.startswith('calc'):
return text
assert False, "wrong color format %r" % text
_styles = obj._styles = {}
for ttype in obj.styles:
for token in ttype.split():
if token in _styles:
continue
ndef = _styles.get(token.parent, None)
styledefs = obj.styles.get(token, '').split()
if not ndef or token is None:
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
elif 'noinherit' in styledefs and token is not Token:
ndef = _styles[Token][:]
else:
ndef = ndef[:]
_styles[token] = ndef
for styledef in obj.styles.get(token, '').split():
if styledef == 'noinherit':
pass
elif styledef == 'bold':
ndef[1] = 1
elif styledef == 'nobold':
ndef[1] = 0
elif styledef == 'italic':
ndef[2] = 1
elif styledef == 'noitalic':
ndef[2] = 0
elif styledef == 'underline':
ndef[3] = 1
elif styledef == 'nounderline':
ndef[3] = 0
elif styledef[:3] == 'bg:':
ndef[4] = colorformat(styledef[3:])
elif styledef[:7] == 'border:':
ndef[5] = colorformat(styledef[7:])
elif styledef == 'roman':
ndef[6] = 1
elif styledef == 'sans':
ndef[7] = 1
elif styledef == 'mono':
ndef[8] = 1
else:
ndef[0] = colorformat(styledef)
return obj
def style_for_token(cls, token):
t = cls._styles[token]
ansicolor = bgansicolor = None
color = t[0]
if color in _deprecated_ansicolors:
color = _deprecated_ansicolors[color]
if color in ansicolors:
ansicolor = color
color = _ansimap[color]
bgcolor = t[4]
if bgcolor in _deprecated_ansicolors:
bgcolor = _deprecated_ansicolors[color]
if bgcolor in ansicolors:
bgansicolor = bgcolor
bgcolor = _ansimap[bgcolor]
return {
'color': color or None,
'bold': bool(t[1]),
'italic': bool(t[2]),
'underline': bool(t[3]),
'bgcolor': bgcolor or None,
'border': t[5] or None,
'roman': bool(t[6]) or None,
'sans': bool(t[7]) or None,
'mono': bool(t[8]) or None,
'ansicolor': ansicolor,
'bgansicolor': bgansicolor,
}
def list_styles(cls):
return list(cls)
def styles_token(cls, ttype):
return ttype in cls._styles
def __iter__(cls):
for token in cls._styles:
yield token, cls.style_for_token(token)
def __len__(cls):
return len(cls._styles)
class Style(metaclass=StyleMeta):
#: overall background color (``None`` means transparent)
background_color = '#ffffff'
#: highlight background color
highlight_color = '#ffffcc'
#: line number font color
line_number_color = '#000000'
#: line number background color
line_number_background_color = '#f0f0f0'
#: special line number font color
line_number_special_color = '#000000'
#: special line number background color
line_number_special_background_color = '#ffffc0'
#: Style definitions for individual token types.
styles = {}
| 31.248705
| 70
| 0.502902
|
715ce7b6160505789022f038fc0e9bb813ab5218
| 1,434
|
py
|
Python
|
test/test_pacman.py
|
yuan-feng/pygmsh
|
74671ed9f1dbf1762397504aae33fa27fdebe9e7
|
[
"MIT"
] | 1
|
2022-03-13T04:39:03.000Z
|
2022-03-13T04:39:03.000Z
|
test/test_pacman.py
|
kurtsansom/pygmsh
|
9c2cfe1c6c7f80943f61f3e695a453e343544619
|
[
"MIT"
] | null | null | null |
test/test_pacman.py
|
kurtsansom/pygmsh
|
9c2cfe1c6c7f80943f61f3e695a453e343544619
|
[
"MIT"
] | null | null | null |
from numpy import cos, pi, sin
import pygmsh
from helpers import compute_volume
def test(lcar=0.3):
geom = pygmsh.built_in.Geometry()
r = 1.25 * 3.4
p1 = geom.add_point([0.0, 0.0, 0.0], lcar)
# p2 = geom.add_point([+r, 0.0, 0.0], lcar)
p3 = geom.add_point([-r, 0.0, 0.0], lcar)
p4 = geom.add_point([0.0, +r, 0.0], lcar)
p5 = geom.add_point([0.0, -r, 0.0], lcar)
p6 = geom.add_point([r * cos(+pi / 12.0), r * sin(+pi / 12.0), 0.0], lcar)
p7 = geom.add_point([r * cos(-pi / 12.0), r * sin(-pi / 12.0), 0.0], lcar)
p8 = geom.add_point([0.5 * r, 0.0, 0.0], lcar)
c0 = geom.add_circle_arc(p6, p1, p4)
c1 = geom.add_circle_arc(p4, p1, p3)
c2 = geom.add_circle_arc(p3, p1, p5)
c3 = geom.add_circle_arc(p5, p1, p7)
l1 = geom.add_line(p7, p8)
l2 = geom.add_line(p8, p6)
ll = geom.add_line_loop([c0, c1, c2, c3, l1, l2])
# test adding raw code
geom.add_raw_code("// dummy")
geom.add_raw_code(["// dummy"])
pacman = geom.add_plane_surface(ll)
# test setting physical groups
geom.add_physical(p1, label="c")
geom.add_physical(c0, label="arc")
geom.add_physical(pacman)
geom.add_physical(pacman, label=77)
ref = 54.312974717523744
mesh = pygmsh.generate_mesh(geom)
assert abs(compute_volume(mesh) - ref) < 1.0e-2 * ref
return mesh
if __name__ == "__main__":
import meshio
meshio.write("pacman.vtu", test())
| 28.68
| 78
| 0.605997
|
83f2b2b7d475e946942413ccf5da8dad416f868e
| 4,969
|
py
|
Python
|
cnn_architectures/mixup/resnet_cifar/eval_model_c100_leila.py
|
leilayasmeen/MSc_Thesis
|
ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d
|
[
"MIT"
] | 2
|
2019-10-29T03:26:20.000Z
|
2021-03-07T10:02:39.000Z
|
cnn_architectures/mixup/resnet_cifar/eval_model_c100_leila.py
|
leilayasmeen/MSc_Thesis
|
ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d
|
[
"MIT"
] | null | null | null |
cnn_architectures/mixup/resnet_cifar/eval_model_c100_leila.py
|
leilayasmeen/MSc_Thesis
|
ee5e1782ab4a1d86c5dc0f5dc4111b4432ae204d
|
[
"MIT"
] | null | null | null |
# Load in model weights and evaluate its goodness (ECE, MCE, error) also saves logits.
# ResNet model from https://github.com/BIGBALLON/cifar-10-cnn/blob/master/4_Residual_Network/ResNet_keras.py
import keras
import numpy as np
from keras.datasets import cifar10, cifar100
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Dense, Input, add, Activation, GlobalAveragePooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint
from keras.models import Model
from keras import optimizers, regularizers
from sklearn.model_selection import train_test_split
import pickle
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.evaluation import evaluate_model
stack_n = 18
num_classes10 = 10
num_classes100 = 100
img_rows, img_cols = 32, 32
img_channels = 3
batch_size = 128
epochs = 200
iterations = 45000 // batch_size
weight_decay = 0.0001
seed = 333
weights_file_100 = "resnet_110_45k_c100.h5"
def scheduler(epoch):
if epoch < 80:
return 0.1
if epoch < 150:
return 0.01
return 0.001
def residual_network(img_input,classes_num=10,stack_n=5):
def residual_block(intput,out_channel,increase=False):
if increase:
stride = (2,2)
else:
stride = (1,1)
pre_bn = BatchNormalization()(intput)
pre_relu = Activation('relu')(pre_bn)
conv_1 = Conv2D(out_channel,kernel_size=(3,3),strides=stride,padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
bn_1 = BatchNormalization()(conv_1)
relu1 = Activation('relu')(bn_1)
conv_2 = Conv2D(out_channel,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(relu1)
if increase:
projection = Conv2D(out_channel,
kernel_size=(1,1),
strides=(2,2),
padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(intput)
block = add([conv_2, projection])
else:
block = add([intput,conv_2])
return block
# build model
# total layers = stack_n * 3 * 2 + 2
# stack_n = 5 by default, total layers = 32
# input: 32x32x3 output: 32x32x16
x = Conv2D(filters=16,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(img_input)
# input: 32x32x16 output: 32x32x16
for _ in range(stack_n):
x = residual_block(x,16,False)
# input: 32x32x16 output: 16x16x32
x = residual_block(x,32,True)
for _ in range(1,stack_n):
x = residual_block(x,32,False)
# input: 16x16x32 output: 8x8x64
x = residual_block(x,64,True)
for _ in range(1,stack_n):
x = residual_block(x,64,False)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
# input: 64 output: 10
x = Dense(classes_num,activation='softmax',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(x)
return x
def color_preprocessing(x_train,x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
for i in range(3):
x_train[:,:,:,i] = (x_train[:,:,:,i] - mean[i]) / std[i]
x_test[:,:,:,i] = (x_test[:,:,:,i] - mean[i]) / std[i]
return x_train, x_test
if __name__ == '__main__':
# CIFAR-100 =========== evaluation
print("Cifar-100 evaluation")
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
y_test = keras.utils.to_categorical(y_test, num_classes100)
x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed) # random_state = seed
img_mean = x_train45.mean(axis=0) # per-pixel mean
img_std = x_train45.std(axis=0)
x_train45 = (x_train45-img_mean)/img_std
x_val = (x_val-img_mean)/img_std
x_test = (x_test-img_mean)/img_std
# build network and evaluate
img_input = Input(shape=(img_rows,img_cols,img_channels))
output = residual_network(img_input,num_classes100,stack_n)
model2 = Model(img_input, output)
evaluate_model(model2, weights_file_100, x_test, y_test, bins = 15, verbose = True,
pickle_file = "probs_resnet110_c100", x_val = x_val, y_val = y_val)
| 37.08209
| 132
| 0.638559
|
ccdce40462ccc1ff361031bf2de2f1263dbc3916
| 322
|
py
|
Python
|
setup.py
|
garrett-m-smith/sosp_simple
|
15b9f7db089009566f7b571c7a4cf8d56ad7ce97
|
[
"MIT"
] | null | null | null |
setup.py
|
garrett-m-smith/sosp_simple
|
15b9f7db089009566f7b571c7a4cf8d56ad7ce97
|
[
"MIT"
] | null | null | null |
setup.py
|
garrett-m-smith/sosp_simple
|
15b9f7db089009566f7b571c7a4cf8d56ad7ce97
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(name='sosp_simple', version='1.1',
description='A package for simple SOSP models',
url='https://github.com/garrett-m-smith/sosp',
author='Garrett Smith',
author_email='garrett.smith@uconn.edu',
license='MIT',
packages=find_packages(),
zip_safe=False)
| 32.2
| 50
| 0.723602
|
295f9fe4dde33915a9b8147bce525639c4f01596
| 1,443
|
py
|
Python
|
source/timeseries/multi_step/dense.py
|
supercoder3000/py_tensorflow_experiments
|
01aebf681df25286cf4503661148203a03309b04
|
[
"MIT"
] | null | null | null |
source/timeseries/multi_step/dense.py
|
supercoder3000/py_tensorflow_experiments
|
01aebf681df25286cf4503661148203a03309b04
|
[
"MIT"
] | null | null | null |
source/timeseries/multi_step/dense.py
|
supercoder3000/py_tensorflow_experiments
|
01aebf681df25286cf4503661148203a03309b04
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from data_types.training_result import TrainingResult
from data_types.training_set import TrainingSet
from timeseries.build import compile_and_fit
from timeseries.constants import CONV_WIDTH
from timeseries.window_generator import WindowGenerator
def evaluate_multi_step_dense(
training_set: TrainingSet
) -> TrainingResult:
conv_window = WindowGenerator(
input_width=CONV_WIDTH,
label_width=1,
shift=1,
label_columns=['T (degC)'],
training_set=training_set
)
multi_step_dense = tf.keras.Sequential([
# Shape: (time, features) => (time*features)
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1),
# Add back the time dimension.
# Shape: (outputs) => (1, outputs)
tf.keras.layers.Reshape([1, -1]),
])
print('Input shape:', conv_window.example[0].shape)
print('Output shape:', multi_step_dense(conv_window.example[0]).shape)
compile_and_fit(multi_step_dense, conv_window)
metric_index = multi_step_dense.metrics_names.index('mean_absolute_error')
return TrainingResult(
validation_performance=multi_step_dense.evaluate(conv_window.val)[metric_index],
performance=multi_step_dense.evaluate(conv_window.test, verbose=0)[metric_index]
)
| 34.357143
| 88
| 0.711712
|
dee86b8ff68a8aa9642bf5930ec16467e51f55c7
| 775
|
py
|
Python
|
standardsettings/environments/local.py
|
divio/django-standardsettings
|
26f20b069c053692671e344e975e1e8b71e48692
|
[
"BSD-3-Clause"
] | null | null | null |
standardsettings/environments/local.py
|
divio/django-standardsettings
|
26f20b069c053692671e344e975e1e8b71e48692
|
[
"BSD-3-Clause"
] | null | null | null |
standardsettings/environments/local.py
|
divio/django-standardsettings
|
26f20b069c053692671e344e975e1e8b71e48692
|
[
"BSD-3-Clause"
] | 1
|
2018-11-18T01:19:18.000Z
|
2018-11-18T01:19:18.000Z
|
# -*- coding: utf-8 -*-
from getenv import env
import os
def apply_settings(settings):
s = settings
s.DATA_ROOT = os.path.abspath(env("DATA_ROOT", os.path.join(s.PROJECT_ROOT, 'tmp')))
s.MEDIA_ROOT = env("MEDIA_ROOT", os.path.join(s.DATA_ROOT, 'media'))
s.STATIC_ROOT = env("STATIC_ROOT", os.path.join(s.DATA_ROOT, 'static_collected'))
s.LOG_ROOT = env("LOG_ROOT", os.path.join(s.DATA_ROOT, 'logs'))
s.SOCKET_ROOT = env("SOCKET_ROOT", s.DATA_ROOT)
s.REDIS_SOCKET = env("REDIS_SOCKET", os.path.join(s.SOCKET_ROOT, 'redis.sock'))
s.ALLOWED_HOSTS = env("ALLOWED_HOSTS", ['127.0.0.1', 'localhost',])
import dj_database_url
s.DATABASES = {'default': dj_database_url.config(default='sqlite:///%s' % os.path.join(s.DATA_ROOT, 'db.sqlite3'))}
| 51.666667
| 119
| 0.682581
|
feb8cfc278b63aaedd753bed42a5574a80aa1782
| 27,042
|
py
|
Python
|
dask_cudf/core.py
|
shwina/dask-cudf
|
c0991ce0b0e0a7154c7d80f70eb20790779e9e66
|
[
"Apache-2.0"
] | 95
|
2018-11-09T13:01:30.000Z
|
2021-08-23T09:09:54.000Z
|
dask_cudf/core.py
|
shwina/dask-cudf
|
c0991ce0b0e0a7154c7d80f70eb20790779e9e66
|
[
"Apache-2.0"
] | 217
|
2018-11-07T00:14:03.000Z
|
2019-07-11T21:36:15.000Z
|
dask_cudf/core.py
|
shwina/dask-cudf
|
c0991ce0b0e0a7154c7d80f70eb20790779e9e66
|
[
"Apache-2.0"
] | 38
|
2018-11-28T08:44:59.000Z
|
2021-11-25T08:47:41.000Z
|
# Copyright (c) 2018, NVIDIA CORPORATION.
import warnings
from collections import OrderedDict
import pandas as pd
import dask
import dask.dataframe as dd
import numpy as np
from dask import compute
from dask.base import normalize_token, tokenize
from dask.compatibility import apply
from dask.context import _globals
from dask.core import flatten
from dask.dataframe import from_delayed
from dask.dataframe.core import Scalar, handle_out, map_partitions
from dask.dataframe.utils import raise_on_meta_error
from dask.delayed import delayed
from dask.optimization import cull, fuse
from dask.utils import M, OperatorMethodMixin, funcname, derived_from
from toolz import partition_all
import cudf
import cudf.bindings.reduce as cpp_reduce
from dask_cudf import batcher_sortnet, join_impl
from dask_cudf.accessor import CachedAccessor, CategoricalAccessor, DatetimeAccessor
def optimize(dsk, keys, **kwargs):
flatkeys = list(flatten(keys)) if isinstance(keys, list) else [keys]
dsk, dependencies = cull(dsk, flatkeys)
dsk, dependencies = fuse(
dsk,
keys,
dependencies=dependencies,
ave_width=_globals.get("fuse_ave_width", 1),
)
dsk, _ = cull(dsk, keys)
return dsk
def finalize(results):
return cudf.concat(results)
class _Frame(dd.core._Frame, OperatorMethodMixin):
""" Superclass for DataFrame and Series
Parameters
----------
dsk : dict
The dask graph to compute this DataFrame
name : str
The key prefix that specifies which keys in the dask comprise this
particular DataFrame / Series
meta : cudf.DataFrame, cudf.Series, or cudf.Index
An empty cudf object with names, dtypes, and indices matching the
expected output.
divisions : tuple of index values
Values along which we partition our blocks on the index
"""
__dask_scheduler__ = staticmethod(dask.get)
__dask_optimize__ = staticmethod(optimize)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return type(self), (self._name, self._meta, self.divisions)
def __init__(self, dsk, name, meta, divisions):
self.dask = dsk
self._name = name
meta = dd.core.make_meta(meta)
if not isinstance(meta, self._partition_type):
raise TypeError(
"Expected meta to specify type {0}, got type "
"{1}".format(self._partition_type.__name__, type(meta).__name__)
)
self._meta = dd.core.make_meta(meta)
self.divisions = tuple(divisions)
def __getstate__(self):
return (self.dask, self._name, self._meta, self.divisions)
def __setstate__(self, state):
self.dask, self._name, self._meta, self.divisions = state
def __repr__(self):
s = "<dask_cudf.%s | %d tasks | %d npartitions>"
return s % (type(self).__name__, len(self.dask), self.npartitions)
def to_dask_dataframe(self):
"""Create a dask.dataframe object from a dask_cudf object"""
return self.map_partitions(M.to_pandas)
concat = dd.concat
normalize_token.register(_Frame, lambda a: a._name)
class DataFrame(_Frame, dd.core.DataFrame):
_partition_type = cudf.DataFrame
def _assign_column(self, k, v):
def assigner(df, k, v):
out = df.copy()
out[k] = v
return out
meta = assigner(self._meta, k, dd.core.make_meta(v))
return self.map_partitions(assigner, k, v, meta=meta)
def apply_rows(self, func, incols, outcols, kwargs={}, cache_key=None):
import uuid
if cache_key is None:
cache_key = uuid.uuid4()
def do_apply_rows(df, func, incols, outcols, kwargs):
return df.apply_rows(func, incols, outcols, kwargs, cache_key=cache_key)
meta = do_apply_rows(self._meta, func, incols, outcols, kwargs)
return self.map_partitions(
do_apply_rows, func, incols, outcols, kwargs, meta=meta
)
def merge(
self,
other,
on=None,
how="left",
left_index=False,
right_index=False,
suffixes=("_x", "_y"),
):
"""Merging two dataframes on the column(s) indicated in *on*.
"""
if (
left_index
or right_index
or not dask.is_dask_collection(other)
or self.npartitions == 1
and how in ("inner", "right")
or other.npartitions == 1
and how in ("inner", "left")
):
return dd.merge(
self,
other,
how=how,
suffixes=suffixes,
left_index=left_index,
right_index=right_index,
)
if not on and not left_index and not right_index:
on = [c for c in self.columns if c in other.columns]
if not on:
left_index = right_index = True
return join_impl.join_frames(
left=self,
right=other,
on=on,
how=how,
lsuffix=suffixes[0],
rsuffix=suffixes[1],
)
def join(self, other, how="left", lsuffix="", rsuffix=""):
"""Join two datatframes
*on* is not supported.
"""
if how == "right":
return other.join(other=self, how="left", lsuffix=rsuffix, rsuffix=lsuffix)
same_names = set(self.columns) & set(other.columns)
if same_names and not (lsuffix or rsuffix):
raise ValueError(
"there are overlapping columns but "
"lsuffix and rsuffix are not defined"
)
left, leftuniques = self._align_divisions()
right, rightuniques = other._align_to_indices(leftuniques)
leftparts = left.to_delayed()
rightparts = right.to_delayed()
@delayed
def part_join(left, right, how):
return left.join(
right, how=how, sort=True, lsuffix=lsuffix, rsuffix=rsuffix
)
def inner_selector():
pivot = 0
for i in range(len(leftparts)):
for j in range(pivot, len(rightparts)):
if leftuniques[i] & rightuniques[j]:
yield leftparts[i], rightparts[j]
pivot = j + 1
break
def left_selector():
pivot = 0
for i in range(len(leftparts)):
for j in range(pivot, len(rightparts)):
if leftuniques[i] & rightuniques[j]:
yield leftparts[i], rightparts[j]
pivot = j + 1
break
else:
yield leftparts[i], None
selector = {"left": left_selector, "inner": inner_selector}[how]
rhs_dtypes = [(k, other._meta.dtypes[k]) for k in other._meta.columns]
@delayed
def fix_column(lhs):
df = cudf.DataFrame()
for k in lhs.columns:
df[k + lsuffix] = lhs[k]
for k, dtype in rhs_dtypes:
data = np.zeros(len(lhs), dtype=dtype)
mask_size = cudf.utils.utils.calc_chunk_size(
data.size, cudf.utils.utils.mask_bitsize
)
mask = np.zeros(mask_size, dtype=cudf.utils.utils.mask_dtype)
sr = cudf.Series.from_masked_array(
data=data, mask=mask, null_count=data.size
)
df[k + rsuffix] = sr.set_index(df.index)
return df
joinedparts = [
(part_join(lhs, rhs, how=how) if rhs is not None else fix_column(lhs))
for lhs, rhs in selector()
]
meta = self._meta.join(other._meta, how=how, lsuffix=lsuffix, rsuffix=rsuffix)
return from_delayed(joinedparts, meta=meta)
def _align_divisions(self):
"""Align so that the values do not split across partitions
"""
parts = self.to_delayed()
uniques = self._get_unique_indices(parts=parts)
originals = list(map(frozenset, uniques))
changed = True
while changed:
changed = False
for i in range(len(uniques))[:-1]:
intersect = uniques[i] & uniques[i + 1]
if intersect:
smaller = min(uniques[i], uniques[i + 1], key=len)
bigger = max(uniques[i], uniques[i + 1], key=len)
smaller |= intersect
bigger -= intersect
changed = True
# Fix empty partitions
uniques = list(filter(bool, uniques))
return self._align_to_indices(uniques, originals=originals, parts=parts)
def _get_unique_indices(self, parts=None):
if parts is None:
parts = self.to_delayed()
@delayed
def unique(x):
return set(x.index.as_column().unique().to_array())
parts = self.to_delayed()
return compute(*map(unique, parts))
def _align_to_indices(self, uniques, originals=None, parts=None):
uniques = list(map(set, uniques))
if parts is None:
parts = self.to_delayed()
if originals is None:
originals = self._get_unique_indices(parts=parts)
allindices = set()
for x in originals:
allindices |= x
for us in uniques:
us &= allindices
uniques = list(filter(bool, uniques))
extras = originals[-1] - uniques[-1]
extras = {x for x in extras if x > max(uniques[-1])}
if extras:
uniques.append(extras)
remap = OrderedDict()
for idxset in uniques:
remap[tuple(sorted(idxset))] = bins = []
for i, orig in enumerate(originals):
if idxset & orig:
bins.append(parts[i])
@delayed
def take(indices, depends):
first = min(indices)
last = max(indices)
others = []
for d in depends:
# TODO: this can be replaced with searchsorted
# Normalize to index data in range before selection.
firstindex = d.index[0]
lastindex = d.index[-1]
s = max(first, firstindex)
e = min(last, lastindex)
others.append(d.loc[s:e])
return cudf.concat(others)
newparts = []
for idx, depends in remap.items():
newparts.append(take(idx, depends))
divisions = list(map(min, uniques))
divisions.append(max(uniques[-1]))
newdd = from_delayed(newparts, meta=self._meta)
return newdd, uniques
def _compute_divisions(self):
if self.known_divisions:
return self
@delayed
def first_index(df):
return df.index[0]
@delayed
def last_index(df):
return df.index[-1]
parts = self.to_delayed()
divs = [first_index(p) for p in parts] + [last_index(parts[-1])]
divisions = compute(*divs)
return type(self)(self.dask, self._name, self._meta, divisions)
def set_index(self, index, drop=True, sorted=False):
"""Set new index.
Parameters
----------
index : str or Series
If a ``str`` is provided, it is used as the name of the
column to be made into the index.
If a ``Series`` is provided, it is used as the new index
drop : bool
Whether the first original index column is dropped.
sorted : bool
Whether the new index column is already sorted.
"""
if not drop:
raise NotImplementedError("drop=False not supported yet")
if isinstance(index, str):
tmpdf = self.sort_values(index)
return tmpdf._set_column_as_sorted_index(index, drop=drop)
elif isinstance(index, Series):
indexname = "__dask_cudf.index"
df = self.assign(**{indexname: index})
return df.set_index(indexname, drop=drop, sorted=sorted)
else:
raise TypeError("cannot set_index from {}".format(type(index)))
def _set_column_as_sorted_index(self, colname, drop):
def select_index(df, col):
return df.set_index(col)
return self.map_partitions(
select_index, col=colname, meta=self._meta.set_index(colname)
)
def _argsort(self, col, sorted=False):
"""
Returns
-------
shufidx : Series
Positional indices to be used with .take() to
put the dataframe in order w.r.t ``col``.
"""
# Get subset with just the index and positional value
subset = self[col].to_dask_dataframe()
subset = subset.reset_index(drop=False)
ordered = subset.set_index(0, sorted=sorted)
shufidx = from_dask_dataframe(ordered)["index"]
return shufidx
def _set_index_raw(self, indexname, drop, sorted):
shufidx = self._argsort(indexname, sorted=sorted)
# Shuffle the GPU data
shuffled = self.take(shufidx, npartitions=self.npartitions)
out = shuffled.map_partitions(lambda df: df.set_index(indexname))
return out
def reset_index(self, force=False, drop=False):
"""Reset index to range based
"""
if force:
dfs = self.to_delayed()
sizes = np.asarray(compute(*map(delayed(len), dfs)))
prefixes = np.zeros_like(sizes)
prefixes[1:] = np.cumsum(sizes[:-1])
@delayed
def fix_index(df, startpos):
stoppos = startpos + len(df)
return df.set_index(
cudf.dataframe.RangeIndex(start=startpos, stop=stoppos)
)
outdfs = [fix_index(df, startpos) for df, startpos in zip(dfs, prefixes)]
return from_delayed(outdfs, meta=self._meta.reset_index(drop=True))
else:
return self.map_partitions(M.reset_index, drop=drop)
def sort_values(self, by, ignore_index=False):
"""Sort by the given column
Parameter
---------
by : str
"""
parts = self.to_delayed()
sorted_parts = batcher_sortnet.sort_delayed_frame(parts, by)
return from_delayed(sorted_parts, meta=self._meta).reset_index(
force=not ignore_index
)
def sort_values_binned(self, by):
"""Sorty by the given column and ensure that the same key
doesn't spread across multiple partitions.
"""
# Get sorted partitions
parts = self.sort_values(by=by).to_delayed()
# Get unique keys in each partition
@delayed
def get_unique(p):
return set(p[by].unique())
uniques = list(compute(*map(get_unique, parts)))
joiner = {}
for i in range(len(uniques)):
joiner[i] = to_join = {}
for j in range(i + 1, len(uniques)):
intersect = uniques[i] & uniques[j]
# If the keys intersect
if intersect:
# Remove keys
uniques[j] -= intersect
to_join[j] = frozenset(intersect)
else:
break
@delayed
def join(df, other, keys):
others = [other.query("{by}==@k".format(by=by)) for k in sorted(keys)]
return cudf.concat([df] + others)
@delayed
def drop(df, keep_keys):
locvars = locals()
for i, k in enumerate(keep_keys):
locvars["k{}".format(i)] = k
conds = ["{by}==@k{i}".format(by=by, i=i) for i in range(len(keep_keys))]
expr = " or ".join(conds)
return df.query(expr)
for i in range(len(parts)):
if uniques[i]:
parts[i] = drop(parts[i], uniques[i])
for joinee, intersect in joiner[i].items():
parts[i] = join(parts[i], parts[joinee], intersect)
results = [p for i, p in enumerate(parts) if uniques[i]]
return from_delayed(results, meta=self._meta).reset_index()
def _shuffle_sort_values(self, by):
"""Slow shuffle based sort by the given column
Parameter
---------
by : str
"""
shufidx = self._argsort(by)
return self.take(shufidx)
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False,
dtype=None, out=None):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
return handle_out(out, result)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'var'
result = map_partitions(var_aggregate, x2, x, n,
token=name, meta=meta, ddof=ddof)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
def sum_of_squares(x):
x = x.astype("f8")._column
outcol = cpp_reduce.apply_reduce("sum_of_squares", x)
return cudf.Series(outcol)
def var_aggregate(x2, x, n, ddof):
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
result = (x2 / n) - (x / n)**2
if ddof != 0:
result = result * n / (n - ddof)
return result
except ZeroDivisionError:
return np.float64(np.nan)
def nlargest_agg(x, **kwargs):
return cudf.concat(x).nlargest(**kwargs)
def nsmallest_agg(x, **kwargs):
return cudf.concat(x).nsmallest(**kwargs)
def unique_k_agg(x, **kwargs):
return cudf.concat(x).unique_k(**kwargs)
class Series(_Frame, dd.core.Series):
_partition_type = cudf.Series
def count(self, split_every=False):
return reduction(
self, chunk=M.count, aggregate=np.sum, split_every=split_every, meta="i8"
)
def mean(self, split_every=False):
sum = self.sum(split_every=split_every)
n = self.count(split_every=split_every)
return sum / n
def unique_k(self, k, split_every=None):
return reduction(
self,
chunk=M.unique_k,
aggregate=unique_k_agg,
meta=self._meta,
token="unique-k",
split_every=split_every,
k=k,
)
@derived_from(pd.DataFrame)
def var(self, axis=None, skipna=True, ddof=1, split_every=False, dtype=None, out=None):
axis = self._validate_axis(axis)
meta = self._meta_nonempty.var(axis=axis, skipna=skipna)
if axis == 1:
result = map_partitions(M.var, self, meta=meta,
token=self._token_prefix + 'var',
axis=axis, skipna=skipna, ddof=ddof)
return handle_out(out, result)
else:
num = self._get_numeric_data()
x = 1.0 * num.sum(skipna=skipna, split_every=split_every)
x2 = 1.0 * (num ** 2).sum(skipna=skipna, split_every=split_every)
n = num.count(split_every=split_every)
name = self._token_prefix + 'var'
result = map_partitions(var_aggregate, x2, x, n,
token=name, meta=meta, ddof=ddof)
if isinstance(self, DataFrame):
result.divisions = (min(self.columns), max(self.columns))
return handle_out(out, result)
# ----------------------------------------------------------------------
# Accessor Methods
# ----------------------------------------------------------------------
dt = CachedAccessor("dt", DatetimeAccessor)
cat = CachedAccessor("cat", CategoricalAccessor)
class Index(Series, dd.core.Index):
_partition_type = cudf.dataframe.index.Index
def splits_divisions_sorted_cudf(df, chunksize):
segments = list(df.index.find_segments().to_array())
segments.append(len(df) - 1)
splits = [0]
last = current_size = 0
for s in segments:
size = s - last
last = s
current_size += size
if current_size >= chunksize:
splits.append(s)
current_size = 0
# Ensure end is included
if splits[-1] != segments[-1]:
splits.append(segments[-1])
divisions = tuple(df.index.take(np.array(splits)).values)
splits[-1] += 1 # Offset to extract to end
return splits, divisions
def _extract_meta(x):
"""
Extract internal cache data (``_meta``) from dask_cudf objects
"""
if isinstance(x, (Scalar, _Frame)):
return x._meta
elif isinstance(x, list):
return [_extract_meta(_x) for _x in x]
elif isinstance(x, tuple):
return tuple([_extract_meta(_x) for _x in x])
elif isinstance(x, dict):
return {k: _extract_meta(v) for k, v in x.items()}
return x
def _emulate(func, *args, **kwargs):
"""
Apply a function using args / kwargs. If arguments contain dd.DataFrame /
dd.Series, using internal cache (``_meta``) for calculation
"""
with raise_on_meta_error(funcname(func)):
return func(*_extract_meta(args), **_extract_meta(kwargs))
def align_partitions(args):
"""Align partitions between dask_cudf objects.
Note that if all divisions are unknown, but have equal npartitions, then
they will be passed through unchanged."""
dfs = [df for df in args if isinstance(df, _Frame)]
if not dfs:
return args
divisions = dfs[0].divisions
if not all(df.divisions == divisions for df in dfs):
raise NotImplementedError("Aligning mismatched partitions")
return args
def reduction(
args,
chunk=None,
aggregate=None,
combine=None,
meta=None,
token=None,
chunk_kwargs=None,
aggregate_kwargs=None,
combine_kwargs=None,
split_every=None,
**kwargs
):
"""Generic tree reduction operation.
Parameters
----------
args :
Positional arguments for the `chunk` function. All `dask.dataframe`
objects should be partitioned and indexed equivalently.
chunk : function [block-per-arg] -> block
Function to operate on each block of data
aggregate : function list-of-blocks -> block
Function to operate on the list of results of chunk
combine : function list-of-blocks -> block, optional
Function to operate on intermediate lists of results of chunk
in a tree-reduction. If not provided, defaults to aggregate.
$META
token : str, optional
The name to use for the output keys.
chunk_kwargs : dict, optional
Keywords for the chunk function only.
aggregate_kwargs : dict, optional
Keywords for the aggregate function only.
combine_kwargs : dict, optional
Keywords for the combine function only.
split_every : int, optional
Group partitions into groups of this size while performing a
tree-reduction. If set to False, no tree-reduction will be used,
and all intermediates will be concatenated and passed to ``aggregate``.
Default is 8.
kwargs :
All remaining keywords will be passed to ``chunk``, ``aggregate``, and
``combine``.
"""
if chunk_kwargs is None:
chunk_kwargs = dict()
if aggregate_kwargs is None:
aggregate_kwargs = dict()
chunk_kwargs.update(kwargs)
aggregate_kwargs.update(kwargs)
if combine is None:
if combine_kwargs:
raise ValueError("`combine_kwargs` provided with no `combine`")
combine = aggregate
combine_kwargs = aggregate_kwargs
else:
if combine_kwargs is None:
combine_kwargs = dict()
combine_kwargs.update(kwargs)
if not isinstance(args, (tuple, list)):
args = [args]
npartitions = set(arg.npartitions for arg in args if isinstance(arg, _Frame))
if len(npartitions) > 1:
raise ValueError("All arguments must have same number of partitions")
npartitions = npartitions.pop()
if split_every is None:
split_every = 8
elif split_every is False:
split_every = npartitions
elif split_every < 2 or not isinstance(split_every, int):
raise ValueError("split_every must be an integer >= 2")
token_key = tokenize(
token or (chunk, aggregate),
meta,
args,
chunk_kwargs,
aggregate_kwargs,
combine_kwargs,
split_every,
)
# Chunk
a = "{0}-chunk-{1}".format(token or funcname(chunk), token_key)
if len(args) == 1 and isinstance(args[0], _Frame) and not chunk_kwargs:
dsk = {(a, 0, i): (chunk, key) for i, key in enumerate(args[0].__dask_keys__())}
else:
dsk = {
(a, 0, i): (
apply,
chunk,
[(x._name, i) if isinstance(x, _Frame) else x for x in args],
chunk_kwargs,
)
for i in range(args[0].npartitions)
}
# Combine
b = "{0}-combine-{1}".format(token or funcname(combine), token_key)
k = npartitions
depth = 0
while k > split_every:
for part_i, inds in enumerate(partition_all(split_every, range(k))):
conc = (list, [(a, depth, i) for i in inds])
dsk[(b, depth + 1, part_i)] = (
(apply, combine, [conc], combine_kwargs)
if combine_kwargs
else (combine, conc)
)
k = part_i + 1
a = b
depth += 1
# Aggregate
b = "{0}-agg-{1}".format(token or funcname(aggregate), token_key)
conc = (list, [(a, depth, i) for i in range(k)])
if aggregate_kwargs:
dsk[(b, 0)] = (apply, aggregate, [conc], aggregate_kwargs)
else:
dsk[(b, 0)] = (aggregate, conc)
if meta is None:
meta_chunk = _emulate(apply, chunk, args, chunk_kwargs)
meta = _emulate(apply, aggregate, [[meta_chunk]], aggregate_kwargs)
meta = dd.core.make_meta(meta)
for arg in args:
if isinstance(arg, _Frame):
dsk.update(arg.dask)
return dd.core.new_dd_object(dsk, b, meta, (None, None))
from_cudf = dd.from_pandas
def from_dask_dataframe(df):
return df.map_partitions(cudf.from_pandas)
| 32.738499
| 91
| 0.577805
|
58d44bdb35e0f517d5f2e37d7a97b500fdc1c9ec
| 2,331
|
py
|
Python
|
tests/test_mail.py
|
h4ck3rm1k3/scrapy
|
59dcdbe84769c9d204f552a2b545b1e096a2d42c
|
[
"BSD-3-Clause"
] | 26
|
2015-02-07T17:35:26.000Z
|
2020-04-27T21:11:00.000Z
|
tests/test_mail.py
|
h4ck3rm1k3/scrapy
|
59dcdbe84769c9d204f552a2b545b1e096a2d42c
|
[
"BSD-3-Clause"
] | 2
|
2021-09-20T19:54:29.000Z
|
2022-03-22T21:47:39.000Z
|
tests/test_mail.py
|
h4ck3rm1k3/scrapy
|
59dcdbe84769c9d204f552a2b545b1e096a2d42c
|
[
"BSD-3-Clause"
] | 9
|
2015-09-21T08:17:20.000Z
|
2021-02-07T02:31:36.000Z
|
import unittest
from io import BytesIO
from scrapy.mail import MailSender
class MailSenderTest(unittest.TestCase):
def test_send(self):
mailsender = MailSender(debug=True)
mailsender.send(to=['test@scrapy.org'], subject='subject', body='body', _callback=self._catch_mail_sent)
assert self.catched_msg
self.assertEqual(self.catched_msg['to'], ['test@scrapy.org'])
self.assertEqual(self.catched_msg['subject'], 'subject')
self.assertEqual(self.catched_msg['body'], 'body')
msg = self.catched_msg['msg']
self.assertEqual(msg['to'], 'test@scrapy.org')
self.assertEqual(msg['subject'], 'subject')
self.assertEqual(msg.get_payload(), 'body')
self.assertEqual(msg.get('Content-Type'), 'text/plain')
def test_send_html(self):
mailsender = MailSender(debug=True)
mailsender.send(to=['test@scrapy.org'], subject='subject', body='<p>body</p>', mimetype='text/html', _callback=self._catch_mail_sent)
msg = self.catched_msg['msg']
self.assertEqual(msg.get_payload(), '<p>body</p>')
self.assertEqual(msg.get('Content-Type'), 'text/html')
def test_send_attach(self):
attach = BytesIO()
attach.write(b'content')
attach.seek(0)
attachs = [('attachment', 'text/plain', attach)]
mailsender = MailSender(debug=True)
mailsender.send(to=['test@scrapy.org'], subject='subject', body='body',
attachs=attachs, _callback=self._catch_mail_sent)
assert self.catched_msg
self.assertEqual(self.catched_msg['to'], ['test@scrapy.org'])
self.assertEqual(self.catched_msg['subject'], 'subject')
self.assertEqual(self.catched_msg['body'], 'body')
msg = self.catched_msg['msg']
self.assertEqual(msg['to'], 'test@scrapy.org')
self.assertEqual(msg['subject'], 'subject')
payload = msg.get_payload()
assert isinstance(payload, list)
self.assertEqual(len(payload), 2)
text, attach = payload
self.assertEqual(text.get_payload(decode=True), 'body')
self.assertEqual(attach.get_payload(decode=True), 'content')
def _catch_mail_sent(self, **kwargs):
self.catched_msg = dict(**kwargs)
if __name__ == "__main__":
unittest.main()
| 35.861538
| 141
| 0.642643
|
19ac70210983b3fc4810a8cecffadd0e0b77ad80
| 2,189
|
py
|
Python
|
examples/naive_bayes_spmd.py
|
agorshk/daal4py
|
58a9b2301c47cd2d5144a403a59c210e10b75f8f
|
[
"Apache-2.0"
] | null | null | null |
examples/naive_bayes_spmd.py
|
agorshk/daal4py
|
58a9b2301c47cd2d5144a403a59c210e10b75f8f
|
[
"Apache-2.0"
] | null | null | null |
examples/naive_bayes_spmd.py
|
agorshk/daal4py
|
58a9b2301c47cd2d5144a403a59c210e10b75f8f
|
[
"Apache-2.0"
] | null | null | null |
#*******************************************************************************
# Copyright 2014-2020 Intel Corporation
# All Rights Reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#*******************************************************************************
# daal4py Naive Bayes Classification example for distributed memory systems; SPMD mode
# run like this:
# mpirun -n 4 python ./naive_bayes_spmd.py
import daal4py as d4p
from numpy import loadtxt, allclose
if __name__ == "__main__":
# Initialize SPMD mode
d4p.daalinit()
# Each process gets its own data
infile = "./data/batch/naivebayes_train_dense.csv"
# Configure a training object (20 classes)
talgo = d4p.multinomial_naive_bayes_training(20, distributed=True)
# Read data. Let's use 20 features per observation
data = loadtxt(infile, delimiter=',', usecols=range(20))
labels = loadtxt(infile, delimiter=',', usecols=range(20,21))
labels.shape = (labels.size, 1) # must be a 2d array
tresult = talgo.compute(data, labels)
# Now let's do some prediction
# It runs only on a single node
if d4p.my_procid() == 0:
palgo = d4p.multinomial_naive_bayes_prediction(20)
# read test data (with same #features)
pdata = loadtxt("./data/batch/naivebayes_test_dense.csv", delimiter=',', usecols=range(20))
# now predict using the model from the training above
presult = palgo.compute(pdata, tresult.model)
# Prediction result provides prediction
assert(presult.prediction.shape == (pdata.shape[0], 1))
print('All looks good!')
d4p.daalfini()
| 37.741379
| 99
| 0.656921
|
fe677d5af4fc41f2107ead5333669c9ef5605c6a
| 442
|
py
|
Python
|
linepy/__init__.py
|
Kaneki711/line-py
|
e8a008d800b041952846b0055f8a67c981e99256
|
[
"BSD-3-Clause"
] | null | null | null |
linepy/__init__.py
|
Kaneki711/line-py
|
e8a008d800b041952846b0055f8a67c981e99256
|
[
"BSD-3-Clause"
] | null | null | null |
linepy/__init__.py
|
Kaneki711/line-py
|
e8a008d800b041952846b0055f8a67c981e99256
|
[
"BSD-3-Clause"
] | null | null | null |
from .client import LINE
from .channel import Channel
from .oepoll import OEPoll
from akad.ttypes import OpType
__copyright__ = 'Copyright 2018 by Fadhiil Rachman'
__version__ = '3.0.6'
__license__ = 'BSD-3-Clause'
__author__ = 'Fadhiil Rachman'
__author_email__ = 'fadhiilrachman@gmail.com'
__url__ = 'http://github.com/fadhiilrachman/line-py'
__all__ = ['LINE', 'Channel', 'OEPoll', 'OpType']
| 34
| 64
| 0.680995
|
9e81ab194c61be14cb52b0a28e7f4925ef090dec
| 2,560
|
py
|
Python
|
app/tests/integration/test_create_roa.py
|
18F/aocourt-api
|
0fd8aab0c993b6e704e1e57fe696bf120c68af3d
|
[
"CC0-1.0"
] | null | null | null |
app/tests/integration/test_create_roa.py
|
18F/aocourt-api
|
0fd8aab0c993b6e704e1e57fe696bf120c68af3d
|
[
"CC0-1.0"
] | 5
|
2021-07-23T00:20:40.000Z
|
2021-08-03T21:11:15.000Z
|
app/tests/integration/test_create_roa.py
|
18F/aocourts-api
|
0fd8aab0c993b6e704e1e57fe696bf120c68af3d
|
[
"CC0-1.0"
] | null | null | null |
import json
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
headers = {"Content-Type": "application/json"}
def test_create_roa_mutation(client: TestClient, db_session: Session, simple_case) -> None:
'''It should return a createRecordOnAppeal object with only the requested fields'''
query = {
"query": "mutation{createRecordOnAppeal(caseId: %d) {title, originalCaseId}}" % simple_case.id
}
r = client.post("/graphql/", data=json.dumps(query), headers=headers)
assert r.status_code == 200
resp = r.json()
assert resp['data']['createRecordOnAppeal']['originalCaseId'] == simple_case.id
assert resp['data']['createRecordOnAppeal']['title'] == simple_case.title
def test_create_roa_mutation_persists(client: TestClient, db_session: Session, simple_case) -> None:
'''It should add a record of appeal to the sysyem'''
mutation = {
"query": "mutation{createRecordOnAppeal(caseId: %d) {id, title, originalCaseId}}" % simple_case.id
}
r = client.post("/graphql/", data=json.dumps(mutation), headers=headers)
resp = r.json()
roa_id = resp['data']['createRecordOnAppeal']['id']
query = {
"query": "{recordOnAppeal(id: %s) {id, title, originalCaseId}}" % roa_id
}
r = client.post("/graphql/", data=json.dumps(query), headers=headers)
assert r.status_code == 200
resp = r.json()
assert resp['data'] == {
'recordOnAppeal': {
'id': roa_id,
'title': simple_case.title,
'originalCaseId': simple_case.id
}
}
def test_send_roa_persists(client: TestClient, db_session: Session, simple_case) -> None:
'''It should add a record of appeal to the sysyem'''
create_mutation = {
"query": "mutation{createRecordOnAppeal(caseId: %d) {id, title, originalCaseId}}" % simple_case.id
}
r = client.post("/graphql/", data=json.dumps(create_mutation), headers=headers)
resp = r.json()
roa_id = resp['data']['createRecordOnAppeal']['id']
send_mutation = {
"query": '''
mutation{sendRecordOnAppeal(recordOnAppealId: %s, receivingCourtId: "ca9") {
id,
receivingCourt{
id
} }}''' % roa_id
}
r = client.post("/graphql/", data=json.dumps(send_mutation), headers=headers)
assert r.status_code == 200
resp = r.json()
assert resp['data'] == {
'sendRecordOnAppeal': {
'id': roa_id,
'receivingCourt': {'id': 'ca9'}
}
}
| 36.056338
| 106
| 0.624609
|
be3bb3e3fc193179f58315c0cc22ca9f3f1be25d
| 1,096
|
py
|
Python
|
dongers/dongers.py
|
WreckRox/FlapJack-Cogs
|
e2950f5dc7916127c3b9519ba8bfea1f71fead40
|
[
"MIT"
] | 42
|
2017-04-15T17:29:40.000Z
|
2022-02-16T18:15:52.000Z
|
dongers/dongers.py
|
WreckRox/FlapJack-Cogs
|
e2950f5dc7916127c3b9519ba8bfea1f71fead40
|
[
"MIT"
] | 89
|
2017-03-22T03:21:42.000Z
|
2022-03-15T18:14:49.000Z
|
dongers/dongers.py
|
WreckRox/FlapJack-Cogs
|
e2950f5dc7916127c3b9519ba8bfea1f71fead40
|
[
"MIT"
] | 72
|
2017-03-23T01:03:29.000Z
|
2022-01-25T22:47:15.000Z
|
import random
import aiohttp
from redbot.core import commands
from bs4 import BeautifulSoup
class Dongers(commands.Cog):
"""Cog that does dongers"""
def __init__(self, bot):
self.bot = bot
self.donger_pages = 40
self.session = aiohttp.ClientSession()
async def red_delete_data_for_user(self, **kwargs):
"""Nothing to delete."""
return
@commands.command()
async def donger(self, ctx):
"""Print a random donger in chat"""
# Access random donger page
url = "http://dongerlist.com/page/" + str(random.randint(1, self.donger_pages))
async with self.session.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
try:
donger_list = soup.find_all("textarea", "donger")
await ctx.send(random.choice(donger_list).get_text())
except:
await ctx.send("I couldn't find any dongers. ¯\_(ツ)_/¯")
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
| 27.4
| 88
| 0.60219
|
adc31560449b9a4552c03d7cca78ab9a93fc071e
| 1,498
|
py
|
Python
|
deployment/tfserving/demo.py
|
jundeli/Scaled-YOLOv4-tensorflow2
|
dd2ce523258f9a5b851bd6f391a6c07a4999662e
|
[
"Apache-2.0"
] | 30
|
2021-01-29T13:57:47.000Z
|
2022-02-09T13:17:57.000Z
|
deployment/tfserving/demo.py
|
jundeli/Scaled-YOLOv4-tensorflow2
|
dd2ce523258f9a5b851bd6f391a6c07a4999662e
|
[
"Apache-2.0"
] | 13
|
2021-04-16T06:30:27.000Z
|
2022-03-16T18:42:23.000Z
|
deployment/tfserving/demo.py
|
jundeli/Scaled-YOLOv4-tensorflow2
|
dd2ce523258f9a5b851bd6f391a6c07a4999662e
|
[
"Apache-2.0"
] | 16
|
2021-04-28T06:51:58.000Z
|
2022-03-23T23:47:52.000Z
|
import os
import argparse
import sys
import cv2
import numpy as np
import time
from tfservingclient.client import Client
def parse_args(args):
parser = argparse.ArgumentParser("test model")
parser.add_argument('--pic-dir',default="../../images/pothole_pictures")
parser.add_argument('--class-names',default="../../dataset/pothole.names")
return parser.parse_args(args)
def main(args):
if not os.path.exists(args.pic_dir):
raise ValueError("{} don't exist!".format(args.pic_dir))
if not os.path.exists(args.class_names):
raise ValueError("{} don't exist!".format(args.class_names))
with open(args.class_names) as f1:
class_names = f1.read().splitlines()
client = Client()
client.init(host='127.0.0.1',port=8500)
while True:
for img_name in os.listdir(args.pic_dir):
img = cv2.imread(os.path.join(args.pic_dir,img_name))
img = np.expand_dims(img, axis=0)
img = client.preprocess(img,(416,416))
boxes, scores, classes, valid_detections = client.predict(img,score_thr=0.1)
for index, num_det in enumerate(valid_detections):
show_img = client.draw_result(img[index], boxes[index][0:num_det], scores[index][0:num_det],
classes[index][0:num_det],class_names)
cv2.imshow('dd', show_img)
cv2.waitKey(0)
if __name__=='__main__':
args = parse_args(sys.argv[1:])
main(args)
| 39.421053
| 108
| 0.638852
|
90285c597141c7a1d2dce871659ee8c9303ddcd3
| 2,960
|
py
|
Python
|
setup.py
|
tomchop/google-api-python-client
|
2d076b87cff4029a243357dad19564767bbf8751
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
tomchop/google-api-python-client
|
2d076b87cff4029a243357dad19564767bbf8751
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
tomchop/google-api-python-client
|
2d076b87cff4029a243357dad19564767bbf8751
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for Google API Python client.
Also installs included versions of third party libraries, if those libraries
are not already installed.
"""
from __future__ import print_function
import sys
if sys.version_info < (2, 7):
print("google-api-python-client requires python version >= 2.7.", file=sys.stderr)
sys.exit(1)
if (3, 1) <= sys.version_info < (3, 4):
print("google-api-python-client requires python3 version >= 3.4.", file=sys.stderr)
sys.exit(1)
import io
import os
from setuptools import setup
packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
install_requires = [
# NOTE: Apache Beam tests depend on this library and cannot
# currently upgrade their httplib2 version.
# Please see https://github.com/googleapis/google-api-python-client/pull/841
"httplib2>=0.9.2,<1dev",
"google-auth>=1.16.0",
"google-auth-httplib2>=0.0.3",
"google-api-core>=1.21.0,<2dev",
"six>=1.13.0,<2dev",
"uritemplate>=3.0.0,<4dev",
]
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.md")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
version = "1.12.2"
setup(
name="google-api-python-client",
version=version,
description="Google API Client Library for Python",
long_description=readme,
long_description_content_type='text/markdown',
author="Google LLC",
author_email="googleapis-packages@google.com",
url="https://github.com/googleapis/google-api-python-client/",
install_requires=install_requires,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
packages=packages,
package_data={},
license="Apache 2.0",
keywords="google api client",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
],
)
| 34.418605
| 87
| 0.685473
|
d8c30324f94663660b991f00a69032d120d22798
| 46,514
|
py
|
Python
|
google/cloud/forseti/services/inventory/storage.py
|
BrunoReboul/forseti-security
|
9d4a61b3e5a5d22a4330d15ddf61063fc9079071
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/forseti/services/inventory/storage.py
|
BrunoReboul/forseti-security
|
9d4a61b3e5a5d22a4330d15ddf61063fc9079071
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/forseti/services/inventory/storage.py
|
BrunoReboul/forseti-security
|
9d4a61b3e5a5d22a4330d15ddf61063fc9079071
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inventory storage implementation."""
# pylint: disable=too-many-lines
import json
import enum
from sqlalchemy import and_
from sqlalchemy import BigInteger
from sqlalchemy import case
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Enum
from sqlalchemy import exists
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import LargeBinary
from sqlalchemy import or_
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Text
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import mapper
from google.cloud.asset_v1beta1.proto import assets_pb2
from google.protobuf import json_format
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util.index_state import IndexState
# pylint: disable=line-too-long
from google.cloud.forseti.services.inventory.base.storage import Storage as BaseStorage
# pylint: enable=line-too-long
LOGGER = logger.get_logger(__name__)
BASE = declarative_base()
CURRENT_SCHEMA = 1
PER_YIELD = 1024
MAX_ALLOWED_PACKET = 32 * 1024 * 1024 # 32 Mb default mysql max packet size
class Categories(enum.Enum):
"""Inventory Categories."""
resource = 1
iam_policy = 2
gcs_policy = 3
dataset_policy = 4
billing_info = 5
enabled_apis = 6
kubernetes_service_config = 7
class ContentTypes(enum.Enum):
"""Cloud Asset Inventory Content Types."""
resource = 1
iam_policy = 2
SUPPORTED_CATEGORIES = frozenset(item.name for item in list(Categories))
SUPPORTED_CONTENT_TYPES = frozenset(item.name for item in list(ContentTypes))
class InventoryIndex(BASE):
"""Represents a GCP inventory."""
__tablename__ = 'inventory_index'
id = Column(BigInteger, primary_key=True)
created_at_datetime = Column(DateTime)
completed_at_datetime = Column(DateTime)
inventory_status = Column(Text)
schema_version = Column(Integer)
progress = Column(Text)
counter = Column(Integer)
inventory_index_warnings = Column(Text(16777215))
inventory_index_errors = Column(Text(16777215))
message = Column(Text(16777215))
def __repr__(self):
"""Object string representation.
Returns:
str: String representation of the object.
"""
return """<{}(id='{}', version='{}', timestamp='{}')>""".format(
self.__class__.__name__,
self.id,
self.schema_version,
self.created_at_datetime)
@classmethod
def create(cls):
"""Create a new inventory index row.
Returns:
object: InventoryIndex row object.
"""
utc_now = date_time.get_utc_now_datetime()
micro_timestamp = date_time.get_utc_now_microtimestamp(utc_now)
return InventoryIndex(
id=micro_timestamp,
created_at_datetime=utc_now,
completed_at_datetime=None,
inventory_status=IndexState.CREATED,
schema_version=CURRENT_SCHEMA,
counter=0)
def complete(self, status=IndexState.SUCCESS):
"""Mark the inventory as completed with a final inventory_status.
Args:
status (str): Final inventory_status.
"""
self.completed_at_datetime = date_time.get_utc_now_datetime()
self.inventory_status = status
def add_warning(self, session, warning):
"""Add a warning to the inventory.
Args:
session (object): session object to work on.
warning (str): Warning message
"""
warning_message = '{}\n'.format(warning)
if not self.inventory_index_warnings:
self.inventory_index_warnings = warning_message
else:
self.inventory_index_warnings += warning_message
session.add(self)
session.flush()
def set_error(self, session, message):
"""Indicate a broken import.
Args:
session (object): session object to work on.
message (str): Error message to set.
"""
self.inventory_index_errors = message
session.add(self)
session.flush()
def get_lifecycle_state_details(self, session, resource_type_input):
"""Count of lifecycle states of the specified resources.
Generate/return the count of lifecycle states (ACTIVE, DELETE_PENDING)
of the specific resource type input (project, folder) for this inventory
index.
Args:
session (object) : session object to work on.
resource_type_input (str) : resource type to get lifecycle states.
Returns:
dict: a (lifecycle state -> count) dictionary
"""
resource_data = Inventory.resource_data
details = dict(
session.query(func.json_extract(resource_data, '$.lifecycleState'),
func.count())
.filter(Inventory.inventory_index_id == self.id)
.filter(Inventory.category == 'resource')
.filter(Inventory.resource_type == resource_type_input)
.group_by(func.json_extract(resource_data, '$.lifecycleState'))
.all())
for key in details.keys():
new_key = key.replace('\"', '').replace('_', ' ')
new_key = ' - '.join([resource_type_input, new_key])
details[new_key] = details.pop(key)
if len(details) == 1:
if 'ACTIVE' in details.keys()[0]:
added_key_str = 'DELETE PENDING'
elif 'DELETE PENDING' in details.keys()[0]:
added_key_str = 'ACTIVE'
added_key = ' - '.join([resource_type_input, added_key_str])
details[added_key] = 0
return details
def get_hidden_resource_details(self, session, resource_type):
"""Count of the hidden and shown specified resources.
Generate/return the count of hidden resources (e.g. dataset) for this
inventory index.
Args:
session (object) : session object to work on.
resource_type (str) : resource type to find details for.
Returns:
dict: a (hidden_resource -> count) dictionary
"""
details = {}
resource_id = Inventory.resource_id
field_label_hidden = resource_type + ' - HIDDEN'
field_label_shown = resource_type + ' - SHOWN'
hidden_label = (
func.count(case([(resource_id.contains('%:~_%', escape='~'), 1)])))
shown_label = (
func.count(case([(~resource_id.contains('%:~_%', escape='~'), 1)])))
details_query = (
session.query(hidden_label, shown_label)
.filter(Inventory.inventory_index_id == self.id)
.filter(Inventory.category == 'resource')
.filter(Inventory.resource_type == resource_type).one())
details[field_label_hidden] = details_query[0]
details[field_label_shown] = details_query[1]
return details
def get_summary(self, session):
"""Generate/return an inventory summary for this inventory index.
Args:
session (object): session object to work on.
Returns:
dict: a (resource type -> count) dictionary
"""
resource_type = Inventory.resource_type
summary = dict(
session.query(resource_type, func.count(resource_type))
.filter(Inventory.inventory_index_id == self.id)
.filter(Inventory.category == 'resource')
.group_by(resource_type).all())
return summary
def get_details(self, session):
"""Generate/return inventory details for this inventory index.
Includes delete pending/active resource types and hidden/shown datasets.
Args:
session (object): session object to work on.
Returns:
dict: a (resource type -> count) dictionary
"""
resource_types_with_lifecycle = ['folder', 'organization', 'project']
resource_types_hidden = ['dataset']
resource_types_with_details = {'lifecycle':
resource_types_with_lifecycle,
'hidden':
resource_types_hidden}
details = {}
for key, value in resource_types_with_details.items():
if key == 'lifecycle':
details_function = self.get_lifecycle_state_details
elif key == 'hidden':
details_function = self.get_hidden_resource_details
for resource in value:
resource_details = details_function(session, resource)
details.update(resource_details)
return details
class Inventory(BASE):
"""Resource inventory table."""
__tablename__ = 'gcp_inventory'
id = Column(Integer, primary_key=True, autoincrement=True)
inventory_index_id = Column(BigInteger)
category = Column(Enum(Categories))
resource_type = Column(String(255))
resource_id = Column(Text)
resource_data = Column(Text(16777215))
parent_id = Column(Integer)
other = Column(Text)
inventory_errors = Column(Text)
__table_args__ = (
Index('idx_resource_category',
'inventory_index_id',
'resource_type',
'category'),
Index('idx_parent_id',
'parent_id'))
@classmethod
def from_resource(cls, index, resource):
"""Creates a database row object from a crawled resource.
Args:
index (object): InventoryIndex to associate.
resource (object): Crawled resource.
Returns:
object: database row object.
"""
parent = resource.parent()
iam_policy = resource.get_iam_policy()
gcs_policy = resource.get_gcs_policy()
dataset_policy = resource.get_dataset_policy()
billing_info = resource.get_billing_info()
enabled_apis = resource.get_enabled_apis()
service_config = resource.get_kubernetes_service_config()
other = json.dumps({'timestamp': resource.get_timestamp()})
rows = [Inventory(
inventory_index_id=index.id,
category=Categories.resource,
resource_id=resource.key(),
resource_type=resource.type(),
resource_data=json.dumps(resource.data(), sort_keys=True),
parent_id=None if not parent else parent.inventory_key(),
other=other,
inventory_errors=resource.get_warning())]
if iam_policy:
rows.append(
Inventory(
inventory_index_id=index.id,
category=Categories.iam_policy,
resource_id=resource.key(),
resource_type=resource.type(),
resource_data=json.dumps(iam_policy, sort_keys=True),
other=other,
inventory_errors=None))
if gcs_policy:
rows.append(
Inventory(
inventory_index_id=index.id,
category=Categories.gcs_policy,
resource_id=resource.key(),
resource_type=resource.type(),
resource_data=json.dumps(gcs_policy, sort_keys=True),
other=other,
inventory_errors=None))
if dataset_policy:
rows.append(
Inventory(
inventory_index_id=index.id,
category=Categories.dataset_policy,
resource_id=resource.key(),
resource_type=resource.type(),
resource_data=json.dumps(dataset_policy, sort_keys=True),
other=other,
inventory_errors=None))
if billing_info:
rows.append(
Inventory(
inventory_index_id=index.id,
category=Categories.billing_info,
resource_id=resource.key(),
resource_type=resource.type(),
resource_data=json.dumps(billing_info, sort_keys=True),
other=other,
inventory_errors=None))
if enabled_apis:
rows.append(
Inventory(
inventory_index_id=index.id,
category=Categories.enabled_apis,
resource_id=resource.key(),
resource_type=resource.type(),
resource_data=json.dumps(enabled_apis, sort_keys=True),
other=other,
inventory_errors=None))
if service_config:
rows.append(
Inventory(
inventory_index_id=index.id,
category=Categories.kubernetes_service_config,
resource_id=resource.key(),
resource_type=resource.type(),
resource_data=json.dumps(service_config, sort_keys=True),
other=other,
inventory_errors=None))
return rows
def copy_inplace(self, new_row):
"""Update a database row object from a resource.
Args:
new_row (Inventory): the Inventory row of the new resource
"""
self.category = new_row.category
self.resource_id = new_row.resource_id
self.resource_type = new_row.resource_type
self.resource_data = new_row.resource_data
self.other = new_row.other
self.inventory_errors = new_row.inventory_errors
def __repr__(self):
"""String representation of the database row object.
Returns:
str: A description of inventory_index
"""
return ('<{}(inventory_index_id=\'{}\', resource_id=\'{}\','
' resource_type=\'{}\')>').format(
self.__class__.__name__,
self.inventory_index_id,
self.resource_id,
self.resource_type)
def get_resource_id(self):
"""Get the row's resource id.
Returns:
str: resource id.
"""
return self.resource_id
def get_resource_type(self):
"""Get the row's resource type.
Returns:
str: resource type.
"""
return self.resource_type
def get_category(self):
"""Get the row's data category.
Returns:
str: data category.
"""
return self.category.name
def get_parent_id(self):
"""Get the row's parent id.
Returns:
int: parent id.
"""
return self.parent_id
def get_resource_data(self):
"""Get the row's metadata.
Returns:
dict: row's metadata.
"""
return json.loads(self.resource_data)
def get_resource_data_raw(self):
"""Get the row's data json string.
Returns:
str: row's raw data.
"""
return self.resource_data
def get_other(self):
"""Get the row's other data.
Returns:
dict: row's other data.
"""
return json.loads(self.other)
def get_inventory_errors(self):
"""Get the row's error data.
Returns:
str: row's error data.
"""
return self.inventory_errors
class CaiTemporaryStore(object):
"""CAI temporary inventory table."""
__tablename__ = 'cai_temporary_store'
# Class members created in initialize() by mapper()
name = None
parent_name = None
content_type = None
asset_type = None
asset_data = None
def __init__(self, name, parent_name, content_type, asset_type, asset_data):
"""Initialize database column.
Manually defined so that the collation value can be overriden at run
time for this database table.
Args:
name (str): The asset name.
parent_name (str): The asset name of the parent resource.
content_type (ContentTypes): The asset data content type.
asset_type (str): The asset data type.
asset_data (str): The asset data as a serialized binary blob.
"""
self.name = name
self.parent_name = parent_name
self.content_type = content_type
self.asset_type = asset_type
self.asset_data = asset_data
@classmethod
def initialize(cls, metadata, collation='utf8_bin'):
"""Create the table schema based on run time arguments.
Used to fix the column collation value for non-MySQL database engines.
Args:
metadata (object): The sqlalchemy MetaData to associate the table
with.
collation (str): The collation value to use.
"""
if 'cai_temporary_store' not in metadata.tables:
my_table = Table('cai_temporary_store', metadata,
Column('name', String(512, collation=collation),
nullable=False),
Column('parent_name', String(255), nullable=True),
Column('content_type', Enum(ContentTypes),
nullable=False),
Column('asset_type', String(255), nullable=False),
Column('asset_data',
LargeBinary(length=(2**32) - 1),
nullable=False),
Index('idx_parent_name', 'parent_name'),
PrimaryKeyConstraint('content_type',
'asset_type',
'name',
name='cai_temp_store_pk'))
mapper(cls, my_table)
def extract_asset_data(self, content_type):
"""Extracts the data from the asset protobuf based on the content type.
Args:
content_type (ContentTypes): The content type data to extract.
Returns:
dict: The dict representation of the asset data.
"""
# The no-member is a false positive for the dynamic protobuf class.
# pylint: disable=no-member
asset_pb = assets_pb2.Asset.FromString(self.asset_data)
# pylint: enable=no-member
if content_type == ContentTypes.resource:
return json_format.MessageToDict(asset_pb.resource.data)
elif content_type == ContentTypes.iam_policy:
return json_format.MessageToDict(asset_pb.iam_policy)
return json_format.MessageToDict(asset_pb)
@classmethod
def from_json(cls, asset_json):
"""Creates a database row object from the json data in a dump file.
Args:
asset_json (str): The json representation of an Asset.
Returns:
object: database row object or None if there is no data.
"""
asset_pb = json_format.Parse(asset_json, assets_pb2.Asset())
if len(asset_pb.name) > 512:
LOGGER.warn('Skipping insert of asset %s, name too long.',
asset_pb.name)
return None
if asset_pb.HasField('resource'):
content_type = ContentTypes.resource
parent_name = cls._get_parent_name(asset_pb)
elif asset_pb.HasField('iam_policy'):
content_type = ContentTypes.iam_policy
parent_name = asset_pb.name
else:
return None
return cls(
name=asset_pb.name,
parent_name=parent_name,
content_type=content_type,
asset_type=asset_pb.asset_type,
asset_data=asset_pb.SerializeToString()
)
@classmethod
def delete_all(cls, session):
"""Deletes all rows from this table.
Args:
session (object): db session
Returns:
int: The number of rows deleted.
Raises:
Exception: Reraises any exception.
"""
try:
num_rows = session.query(cls).delete()
session.commit()
return num_rows
except Exception as e:
LOGGER.exception(e)
session.rollback()
raise
@staticmethod
def _get_parent_name(asset_pb):
"""Determines the parent name from the resource data.
Args:
asset_pb (assets_pb2.Asset): An Asset protobuf object.
Returns:
str: The parent name for the resource.
"""
if asset_pb.resource.parent:
return asset_pb.resource.parent
if asset_pb.asset_type == 'google.cloud.kms.KeyRing':
# KMS KeyRings are parented by a location under a project, but
# the location is not directly discoverable without iterating all
# locations, so instead this creates an artificial parent at the
# project level, which acts as an aggregated list of all keyrings
# in all locations to fix this broken behavior.
#
# Strip locations/{LOCATION}/keyRings/{RING} off name to get the
# parent project.
return '/'.join(asset_pb.name.split('/')[:-4])
elif (asset_pb.asset_type.startswith('google.appengine') or
asset_pb.asset_type.startswith('google.bigquery') or
asset_pb.asset_type.startswith('google.spanner') or
asset_pb.asset_type.startswith('google.cloud.kms')):
# Strip off the last two segments of the name to get the parent
return '/'.join(asset_pb.name.split('/')[:-2])
LOGGER.debug('Could not determine parent name for %s', asset_pb)
return ''
class BufferedDbWriter(object):
"""Buffered db writing."""
def __init__(self,
session,
max_size=1024,
max_packet_size=MAX_ALLOWED_PACKET * .75,
commit_on_flush=False):
"""Initialize
Args:
session (object): db session
max_size (int): max size of buffer
max_packet_size (int): max size of a packet to send to SQL
commit_on_flush (bool): If true, the session is committed to the
database when the data is flushed.
"""
self.session = session
self.buffer = []
self.estimated_packet_size = 0
self.max_size = max_size
self.max_packet_size = max_packet_size
self.commit_on_flush = commit_on_flush
def add(self, obj, estimated_length=0):
"""Add an object to the buffer to write to db.
Args:
obj (object): Object to write to db.
estimated_length (int): The estimated length of this object.
"""
self.buffer.append(obj)
self.estimated_packet_size += estimated_length
if (self.estimated_packet_size > self.max_packet_size or
len(self.buffer) >= self.max_size):
self.flush()
def flush(self):
"""Flush all pending objects to the database."""
self.session.bulk_save_objects(self.buffer)
self.session.flush()
if self.commit_on_flush:
self.session.commit()
self.estimated_packet_size = 0
self.buffer = []
class CaiDataAccess(object):
"""Access to the CAI temporary store table."""
@staticmethod
def clear_cai_data(session):
"""Deletes all temporary CAI data from the cai temporary table.
Args:
session (object): Database session.
Returns:
int: The number of rows deleted.
"""
num_rows = 0
try:
num_rows = CaiTemporaryStore.delete_all(session)
except SQLAlchemyError as e:
LOGGER.exception('Attempt to delete data from CAI temporary store '
'failed, disabling the use of CAI: %s', e)
return num_rows
@staticmethod
def populate_cai_data(data, session):
"""Add assets from cai data dump into cai temporary table.
Args:
data (file): A file like object, line delimeted text dump of json
data representing assets from Cloud Asset Inventory exportAssets
API.
session (object): Database session.
Returns:
int: The number of rows inserted
"""
# CAI data can be large, so limit the number of rows written at one
# time to 512.
commit_buffer = BufferedDbWriter(session,
max_size=512,
commit_on_flush=True)
num_rows = 0
try:
for line in data:
if not line:
continue
try:
row = CaiTemporaryStore.from_json(line.strip())
except json_format.ParseError as e:
# If the public protobuf definition differs from the
# internal representation of the resource content in CAI
# then the json_format module will throw a ParseError. The
# crawler automatically falls back to using the live API
# when this happens, so no content is lost.
resource = json.loads(line)
if 'iam_policy' in resource:
content_type = 'iam_policy'
elif 'resource' in resource:
content_type = 'resource'
else:
content_type = 'none'
LOGGER.info('Protobuf parsing error %s, falling back to '
'live API for resource %s, asset type %s, '
'content type %s', e, resource.get('name', ''),
resource.get('asset_type', ''), content_type)
continue
if row:
# Overestimate the packet length to ensure max size is never
# exceeded. The actual length is closer to len(line) * 1.5.
commit_buffer.add(row, estimated_length=len(line) * 2)
num_rows += 1
commit_buffer.flush()
except SQLAlchemyError as e:
LOGGER.exception('Error populating CAI data: %s', e)
session.rollback()
return num_rows
@staticmethod
def iter_cai_assets(content_type, asset_type, parent_name, session):
"""Iterate the objects in the cai temporary table.
Args:
content_type (ContentTypes): The content type to return.
asset_type (str): The asset type to return.
parent_name (str): The parent resource to iter children under.
session (object): Database session.
Yields:
object: The content_type data for each resource.
"""
filters = [
CaiTemporaryStore.content_type == content_type,
CaiTemporaryStore.asset_type == asset_type,
CaiTemporaryStore.parent_name == parent_name,
]
base_query = session.query(CaiTemporaryStore)
for qry_filter in filters:
base_query = base_query.filter(qry_filter)
base_query = base_query.order_by(CaiTemporaryStore.name.asc())
for row in base_query.yield_per(PER_YIELD):
yield row.extract_asset_data(content_type)
@staticmethod
def fetch_cai_asset(content_type, asset_type, name, session):
"""Returns a single resource from the cai temporary store.
Args:
content_type (ContentTypes): The content type to return.
asset_type (str): The asset type to return.
name (str): The resource to return.
session (object): Database session.
Returns:
dict: The content data for the specified resource.
"""
filters = [
CaiTemporaryStore.content_type == content_type,
CaiTemporaryStore.asset_type == asset_type,
CaiTemporaryStore.name == name,
]
base_query = session.query(CaiTemporaryStore)
for qry_filter in filters:
base_query = base_query.filter(qry_filter)
row = base_query.one_or_none()
if row:
return row.extract_asset_data(content_type)
return {}
class DataAccess(object):
"""Access to inventory for services."""
@classmethod
def delete(cls, session, inventory_index_id):
"""Delete an inventory index entry by id.
Args:
session (object): Database session.
inventory_index_id (str): Id specifying which inventory to delete.
Returns:
InventoryIndex: An expunged entry corresponding the
inventory_index_id.
Raises:
Exception: Reraises any exception.
"""
try:
result = cls.get(session, inventory_index_id)
session.query(Inventory).filter(
Inventory.inventory_index_id == inventory_index_id).delete()
session.query(InventoryIndex).filter(
InventoryIndex.id == inventory_index_id).delete()
session.commit()
return result
except Exception as e:
LOGGER.exception(e)
session.rollback()
raise
@classmethod
def list(cls, session):
"""List all inventory index entries.
Args:
session (object): Database session
Yields:
InventoryIndex: Generates each row
"""
for row in session.query(InventoryIndex).yield_per(PER_YIELD):
session.expunge(row)
yield row
@classmethod
def get(cls, session, inventory_index_id):
"""Get an inventory index entry by id.
Args:
session (object): Database session
inventory_index_id (str): Inventory id
Returns:
InventoryIndex: Entry corresponding the id
"""
result = (
session.query(InventoryIndex).filter(
InventoryIndex.id == inventory_index_id).one()
)
session.expunge(result)
return result
@classmethod
def get_latest_inventory_index_id(cls, session):
"""List all inventory index entries.
Args:
session (object): Database session
Returns:
int64: inventory index id
"""
inventory_index = (
session.query(InventoryIndex).filter(
or_(InventoryIndex.inventory_status == 'SUCCESS',
InventoryIndex.inventory_status == 'PARTIAL_SUCCESS')
).order_by(InventoryIndex.id.desc()).first())
session.expunge(inventory_index)
LOGGER.info(
'Latest success/partial_success inventory index id is: %s',
inventory_index.id)
return inventory_index.id
@classmethod
def get_inventory_indexes_older_than_cutoff( # pylint: disable=invalid-name
cls, session, cutoff_datetime):
"""Get all inventory index entries older than the cutoff.
Args:
session (object): Database session
cutoff_datetime (datetime): The cutoff point to find any
older inventory index entries.
Returns:
list: InventoryIndex
"""
inventory_indexes = session.query(InventoryIndex).filter(
InventoryIndex.created_at_datetime < cutoff_datetime).all()
session.expunge_all()
return inventory_indexes
def initialize(engine):
"""Create all tables in the database if not existing.
Args:
engine (object): Database engine to operate on.
"""
dialect = engine.dialect.name
if dialect == 'sqlite':
collation = 'binary'
else:
collation = 'utf8_bin'
CaiTemporaryStore.initialize(BASE.metadata, collation)
BASE.metadata.create_all(engine)
class Storage(BaseStorage):
"""Inventory storage used during creation."""
def __init__(self, session, existing_id=0, readonly=False):
"""Initialize
Args:
session (object): db session.
existing_id (int64): The inventory id if wants to open an existing
inventory.
readonly (bool): whether to keep the inventory read-only.
"""
self.session = session
self.opened = False
self.inventory_index = None
self.buffer = BufferedDbWriter(self.session)
self._existing_id = existing_id
self.session_completed = False
self.readonly = readonly
def _require_opened(self):
"""Make sure the storage is in 'open' state.
Raises:
Exception: If storage is not opened.
"""
if not self.opened:
raise Exception('Storage is not opened')
def _create(self):
"""Create a new inventory.
Returns:
int: Index number of the created inventory.
Raises:
Exception: Reraises any exception.
"""
try:
index = InventoryIndex.create()
self.session.add(index)
except Exception as e:
LOGGER.exception(e)
self.session.rollback()
raise
else:
return index
def _open(self, inventory_index_id):
"""Open an existing inventory.
Args:
inventory_index_id (str): the id of the inventory to open.
Returns:
object: The inventory index db row.
"""
return (
self.session.query(InventoryIndex).filter(
InventoryIndex.id == inventory_index_id).filter(
InventoryIndex.inventory_status.in_(
[IndexState.SUCCESS, IndexState.PARTIAL_SUCCESS]))
.one())
def _get_resource_rows(self, key, resource_type):
""" Get the rows in the database for a certain resource
Args:
key (str): The key of the resource
resource_type (str): The type of the resource
Returns:
object: The inventory db rows of the resource,
IAM policy and GCS policy.
Raises:
Exception: if there is no such row or more than one.
"""
rows = self.session.query(Inventory).filter(
and_(
Inventory.inventory_index_id == self.inventory_index.id,
Inventory.resource_id == key,
Inventory.resource_type == resource_type
)).all()
if not rows:
raise Exception('Resource {} not found in the table'.format(key))
else:
return rows
def _get_resource_id(self, resource):
"""Checks if a resource exists already in the inventory.
Args:
resource (object): Resource object to check against the db.
Returns:
int: The resource id of the existing resource, else 0.
"""
row = self.session.query(Inventory.id).filter(
and_(
Inventory.inventory_index_id == self.inventory_index.id,
Inventory.category == Categories.resource,
Inventory.resource_type == resource.type(),
Inventory.resource_id == resource.key(),
)).one_or_none()
if row:
return row.id
return 0
def open(self, handle=None):
"""Open the storage, potentially create a new index.
Args:
handle (str): If None, create a new index instead
of opening an existing one.
Returns:
str: Index id of the opened or created inventory.
Raises:
Exception: if open was called more than once
"""
existing_id = handle
if self.opened:
raise Exception('open called before')
# existing_id in open overrides potential constructor given id
existing_id = existing_id if existing_id else self._existing_id
# Should we create a new entry or are we opening an existing one?
if existing_id:
self.inventory_index = self._open(existing_id)
else:
self.inventory_index = self._create()
self.session.commit() # commit only on create.
self.opened = True
if not self.readonly:
self.session.begin_nested()
return self.inventory_index.id
def rollback(self):
"""Roll back the stored inventory, but keep the index entry."""
try:
self.buffer.flush()
self.session.rollback()
self.inventory_index.complete(status=IndexState.FAILURE)
self.session.commit()
finally:
self.session_completed = True
def commit(self):
"""Commit the stored inventory."""
try:
self.buffer.flush()
self.session.commit()
self.inventory_index.complete()
self.session.commit()
finally:
self.session_completed = True
def close(self):
"""Close the storage.
Raises:
Exception: If the storage was not opened before or
if the storage is writeable but neither
rollback nor commit has been called.
"""
if not self.opened:
raise Exception('not open')
if not self.readonly and not self.session_completed:
raise Exception('Need to perform commit or rollback before close')
self.opened = False
def write(self, resource):
"""Write a resource to the storage and updates its row
Args:
resource (object): Resource object to store in db.
Raises:
Exception: If storage was opened readonly.
"""
if self.readonly:
raise Exception('Opened storage readonly')
previous_id = self._get_resource_id(resource)
if previous_id:
resource.set_inventory_key(previous_id)
self.update(resource)
return
rows = Inventory.from_resource(self.inventory_index, resource)
for row in rows:
if row.category == Categories.resource:
# Force flush to insert the resource row in order to get the
# inventory id value. This is used to tie child resources
# and related data back to the parent resource row and to
# check for duplicate resources.
self.session.add(row)
self.session.flush()
resource.set_inventory_key(row.id)
else:
row.parent_id = resource.inventory_key()
self.buffer.add(row)
self.inventory_index.counter += len(rows)
def update(self, resource):
"""Update a resource in the storage.
Args:
resource (object): Resource object to store in db.
Raises:
Exception: If storage was opened readonly.
"""
if self.readonly:
raise Exception('Opened storage readonly')
self.buffer.flush()
try:
new_rows = Inventory.from_resource(self.inventory_index, resource)
old_rows = self._get_resource_rows(
resource.key(), resource.type())
new_dict = {row.category.name: row for row in new_rows}
old_dict = {row.category.name: row for row in old_rows}
for category in SUPPORTED_CATEGORIES:
if category in new_dict:
if category in old_dict:
old_dict[category].copy_inplace(
new_dict[category])
else:
new_dict[category].parent_id = resource.inventory_key()
self.session.add(new_dict[category])
self.session.commit()
except Exception as e:
LOGGER.exception(e)
raise Exception('Resource Update Unsuccessful: {}'.format(e))
def error(self, message):
"""Store a fatal error in storage. This will help debug problems.
Args:
message (str): Error message describing the problem.
Raises:
Exception: If the storage was opened readonly.
"""
if self.readonly:
raise Exception('Opened storage readonly')
self.inventory_index.set_error(self.session, message)
def warning(self, message):
"""Store a Warning message in storage. This will help debug problems.
Args:
message (str): Warning message describing the problem.
Raises:
Exception: If the storage was opened readonly.
"""
if self.readonly:
raise Exception('Opened storage readonly')
self.inventory_index.add_warning(self.session, message)
def iter(self,
type_list=None,
fetch_iam_policy=False,
fetch_gcs_policy=False,
fetch_dataset_policy=False,
fetch_billing_info=False,
fetch_enabled_apis=False,
fetch_service_config=False,
with_parent=False):
"""Iterate the objects in the storage.
Args:
type_list (list): List of types to iterate over, or [] for all.
fetch_iam_policy (bool): Yield iam policies.
fetch_gcs_policy (bool): Yield gcs policies.
fetch_dataset_policy (bool): Yield dataset policies.
fetch_billing_info (bool): Yield project billing info.
fetch_enabled_apis (bool): Yield project enabled APIs info.
fetch_service_config (bool): Yield container service config info.
with_parent (bool): Join parent with results, yield tuples.
Yields:
object: Single row object or child/parent if 'with_parent' is set.
"""
filters = [Inventory.inventory_index_id == self.inventory_index.id]
if fetch_iam_policy:
filters.append(
Inventory.category == Categories.iam_policy)
elif fetch_gcs_policy:
filters.append(
Inventory.category == Categories.gcs_policy)
elif fetch_dataset_policy:
filters.append(
Inventory.category == Categories.dataset_policy)
elif fetch_billing_info:
filters.append(
Inventory.category == Categories.billing_info)
elif fetch_enabled_apis:
filters.append(
Inventory.category == Categories.enabled_apis)
elif fetch_service_config:
filters.append(
Inventory.category == Categories.kubernetes_service_config)
else:
filters.append(
Inventory.category == Categories.resource)
if type_list:
filters.append(Inventory.resource_type.in_(type_list))
if with_parent:
parent_inventory = aliased(Inventory)
p_id = parent_inventory.id
base_query = (
self.session.query(Inventory, parent_inventory)
.filter(Inventory.parent_id == p_id))
else:
base_query = self.session.query(Inventory)
for qry_filter in filters:
base_query = base_query.filter(qry_filter)
base_query = base_query.order_by(Inventory.id.asc())
for row in base_query.yield_per(PER_YIELD):
yield row
def get_root(self):
"""get the resource root from the inventory
Returns:
object: A row in gcp_inventory of the root
"""
# Comparison to None needed to compare to Null in SQL.
# pylint: disable=singleton-comparison
root = self.session.query(Inventory).filter(
and_(
Inventory.inventory_index_id == self.inventory_index.id,
Inventory.parent_id == None,
Inventory.category == Categories.resource,
Inventory.resource_type.in_(['organization',
'folder',
'project'])
)).first()
# pylint: enable=singleton-comparison
LOGGER.debug('Root resource: %s', root)
return root
def type_exists(self,
type_list=None):
"""Check if certain types of resources exists in the inventory
Args:
type_list (list): List of types to check
Returns:
bool: If these types of resources exists
"""
return self.session.query(exists().where(and_(
Inventory.inventory_index_id == self.inventory_index.id,
Inventory.category == Categories.resource,
Inventory.resource_type.in_(type_list)
))).scalar()
def __enter__(self):
"""To support with statement for auto closing.
Returns:
Storage: The inventory storage object
"""
self.open()
return self
def __exit__(self, type_p, value, traceback):
"""To support with statement for auto closing.
Args:
type_p (object): Unused.
value (object): Unused.
traceback (object): Unused.
"""
self.close()
| 33.200571
| 87
| 0.586598
|
c94a586a0a1a5c22feaba4d7725ba86d6a419a28
| 53
|
py
|
Python
|
Python/print_without_newline.py
|
PushpneetSingh/Hello-world
|
def0f44737e02fb40063cd347e93e456658e2532
|
[
"MIT"
] | 1,428
|
2018-10-03T15:15:17.000Z
|
2019-03-31T18:38:36.000Z
|
Python/print_without_newline.py
|
PushpneetSingh/Hello-world
|
def0f44737e02fb40063cd347e93e456658e2532
|
[
"MIT"
] | 1,162
|
2018-10-03T15:05:49.000Z
|
2018-10-18T14:17:52.000Z
|
Python/print_without_newline.py
|
PushpneetSingh/Hello-world
|
def0f44737e02fb40063cd347e93e456658e2532
|
[
"MIT"
] | 3,909
|
2018-10-03T15:07:19.000Z
|
2019-03-31T18:39:08.000Z
|
mylist = ['10','12','14']
print(*mylist,sep='\n')
| 17.666667
| 28
| 0.509434
|
4c11c44a75a6de80933c21f81d2d335cf1051714
| 1,415
|
py
|
Python
|
env/ant/ant.py
|
clvrai/coordination
|
2b1bc8a6817b477f49c0cf6bdacd9c2f2e56f692
|
[
"MIT"
] | 33
|
2020-02-15T07:52:05.000Z
|
2021-12-27T04:19:45.000Z
|
env/ant/ant.py
|
clvrai/coordination
|
2b1bc8a6817b477f49c0cf6bdacd9c2f2e56f692
|
[
"MIT"
] | null | null | null |
env/ant/ant.py
|
clvrai/coordination
|
2b1bc8a6817b477f49c0cf6bdacd9c2f2e56f692
|
[
"MIT"
] | 6
|
2020-10-12T01:37:02.000Z
|
2022-02-21T12:49:49.000Z
|
from collections import OrderedDict
import numpy as np
from env.base import BaseEnv
class AntEnv(BaseEnv):
def __init__(self, xml_path, **kwargs):
super().__init__(xml_path, **kwargs)
self._env_config.update({
"init_randomness": 0.1,
"init_random_rot": 0,
})
# Env info
self.reward_type = ["ctrl_reward"]
self.ob_shape = OrderedDict([("shared_pos", 17), ("lower_body", 24), ("upper_body", 9), ("box_pos", 3)])
self.action_space.decompose(OrderedDict([("lower_body", 8), ("upper_body", 3)]))
def _get_box_pos(self):
return self._get_pos('box')
def _render_callback(self):
body_id = self.sim.model.body_name2id('torso')
lookat = self.sim.data.body_xpos[body_id]
lookat[2] += 0.3
cam_pos = lookat + np.array([0, -10, 3])
cam_id = self._camera_id
self._set_camera_position(cam_id, cam_pos)
self._set_camera_rotation(cam_id, lookat)
self.sim.forward()
def _viewer_reset(self):
body_id = self.sim.model.body_name2id('torso')
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self._viewer.cam.lookat[idx] = value
self._viewer.cam.lookat[2] += 0.35
self._viewer.cam.distance = 7
#self._viewer.cam.azimuth = 180.
self._viewer.cam.elevation = -25.
| 30.106383
| 112
| 0.614134
|
b7b48cb30c7efd52d267169b386ac41ec7749012
| 11,860
|
py
|
Python
|
hiplot/fetchers_demo.py
|
mathematicalmichael/hiplot
|
84980f63d2ac7b34995d8f2ed144a1e786c02b38
|
[
"MIT"
] | null | null | null |
hiplot/fetchers_demo.py
|
mathematicalmichael/hiplot
|
84980f63d2ac7b34995d8f2ed144a1e786c02b38
|
[
"MIT"
] | null | null | null |
hiplot/fetchers_demo.py
|
mathematicalmichael/hiplot
|
84980f63d2ac7b34995d8f2ed144a1e786c02b38
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
import random
import math
import time
import typing as t
from . import experiment as hip
# Demos from the README. If one of those is modified, please modify the readme as well
def demo_change_column_properties() -> hip.Experiment:
data = [{"param": 1, "loss": 10, "hidden_field": "value1", "c": "red"}, {"param": 2, "loss": 5, "hidden_field": "value2", "c": "black"}]
exp = hip.Experiment.from_iterable(data)
exp.parameters_definition["c"].colors = {"red": "rgb(255, 0, 0)", "black": "rgb(0, 0, 0)"}
exp.parameters_definition["loss"].type = hip.ValueType.NUMERIC_LOG
exp.display_data(hip.Displays.PARALLEL_PLOT).update({
'hide': ['hidden_field'], # This column won't appear in the parallel plot
'order': ['c'] # Column `c` will be displayed first the in parallel plot
})
return exp
def demo_basic_usage() -> hip.Experiment:
data = [{'dropout': 0.1, 'lr': 0.001, 'loss': 10.0, 'optimizer': 'SGD'},
{'dropout': 0.15, 'lr': 0.01, 'loss': 3.5, 'optimizer': 'Adam'},
{'dropout': 0.3, 'lr': 0.1, 'loss': 4.5, 'optimizer': 'Adam'}]
return hip.Experiment.from_iterable(data)
def demo_line_xy() -> hip.Experiment:
# DEMO_LINE_XY_BEGIN
exp = hip.Experiment()
exp.display_data(hip.Displays.XY).update({
'axis_x': 'generation',
'axis_y': 'loss',
})
for i in range(200):
dp = hip.Datapoint(
uid=str(i),
values={
'generation': i,
'param': 10 ** random.uniform(-1, 1),
'loss': random.uniform(-5, 5),
})
if i > 10:
from_parent = random.choice(exp.datapoints[-10:])
dp.from_uid = from_parent.uid # <-- Connect the parent to the child
dp.values['loss'] += from_parent.values['loss'] # type: ignore
dp.values['param'] *= from_parent.values['param'] # type: ignore
exp.datapoints.append(dp)
# DEMO_LINE_XY_END
return exp
def demo_bug_uid() -> hip.Experiment:
return hip.Experiment.from_iterable([{'a': 1, 'b': 2, 'uid': 50.0}, {'a': 2, 'b': 3, 'uid': 49.33}])
def demo(n: int = 100) -> hip.Experiment:
xp = hip.Experiment()
xp.display_data(hip.Displays.XY).update({
'axis_x': 'time',
'axis_y': 'exp_metric',
})
# Some fake PBT-ish data
def fake_params() -> t.Dict[str, hip.DisplayableType]:
r = random.random()
p: t.Dict[str, hip.DisplayableType] = {
"lr": 10 ** random.uniform(-5, 0),
"seed": random.uniform(0, 10),
"name": uuid.uuid4().hex[:6],
"optimizer": random.choice(["sgd", "adam", "adamw"]),
"r": r,
"c": random.choice(["red", "green", "black"]),
}
if r < 0.1:
del p['optimizer']
if r > 0.3:
p["optionA"] = random.uniform(1, 5)
else:
p["optionB"] = random.uniform(1, 5)
if r < 0.2:
p["pctile"] = -1.0
elif r < 0.5:
p["pctile"] = random.uniform(-1.0, 10.0)
elif r < 0.8:
p["pctile"] = 10 ** random.uniform(1, 2)
else:
p["pctile"] = random.uniform(100, 101)
if random.random() > 0.3:
p["special_values"] = random.uniform(1, 5)
else:
p["special_values"] = random.choice([math.inf, -math.inf, math.nan])
return p
def fake_metrics(tm: float) -> t.Dict[str, hip.DisplayableType]:
return {
"exp_metric": 10 ** random.uniform(-5, 0),
"pct_success": random.uniform(10, 90),
"chkpt": uuid.uuid4().hex[:6],
"time": tm + random.uniform(-0.2, 0.2),
"force_numericlog": random.uniform(1, 100),
'timestamp': int(time.time() + (task_idx * 2000)),
}
current_pop: t.List[t.Dict[str, t.Any]] = [dict(uid=f"init{i}", params=fake_params(), last_ckpt_uid=None) for i in range(10)]
continue_num = 0
for task_idx in range(n):
# All drop checkpoints
for p in current_pop:
ckpt_uid = f"{p['uid']}_{uuid.uuid4().hex[:6]}"
xp.datapoints.append(hip.Datapoint(uid=ckpt_uid, from_uid=p['last_ckpt_uid'], values={**p['params'], **fake_metrics(task_idx)}))
p['last_ckpt_uid'] = ckpt_uid
# Randomly drop some
current_pop = [p for p in current_pop if random.random() > 0.3]
# Respawn as needed
for _ in range(10 - len(current_pop)):
continue_num += 1
parent = random.choice(xp.datapoints[-10:])
current_pop.append(dict(uid=f"continue{continue_num}", params=fake_params(), last_ckpt_uid=parent.uid))
xp.parameters_definition["c"].colors = {"red": "rgb(255, 0, 0)", "green": "rgb(0, 255, 0)", "black": "rgb(0, 0, 0)"}
xp.parameters_definition["force_numericlog"].type = hip.ValueType.NUMERIC_LOG
xp.parameters_definition["pctile"].type = hip.ValueType.NUMERIC_PERCENTILE
xp.parameters_definition["timestamp"].type = hip.ValueType.TIMESTAMP
return xp
def demo_customize() -> hip.Experiment:
exp = demo()
# EXPERIMENT_SETTINGS_SNIPPET2_BEGIN
# Provide configuration for the parallel plot
exp.display_data(hip.Displays.PARALLEL_PLOT).update({
# Hide some columns in the parallel plot
'hide': ['optionB'],
# Specify the order for others
'order': ['time'], # Put column time first on the left
})
# Provide configuration for the table with all the rows
exp.display_data(hip.Displays.TABLE).update({
# Don't display `uid` and `from_uid` columns to the user
'hide': ['uid', 'from_uid'],
# In the table, order rows by default
'order_by': [['pct_success', 'desc']],
# Specify the order for columns
'order': ['time'], # Put column time first on the left
})
# Provide configuration for the XY graph
exp.display_data(hip.Displays.XY).update({
# Default X axis for the XY plot
'axis_x': 'time',
# Default Y axis
'axis_y': 'lr',
# Configure lines
'lines_thickness': 1.0,
'lines_opacity': 0.1,
# Configure dots
'dots_thickness': 2.0,
'dots_opacity': 0.3,
})
# EXPERIMENT_SETTINGS_SNIPPET2_END
return exp
def demo_force_scale() -> hip.Experiment:
xp = hip.Experiment()
for _ in range(100):
values = [abs(random.gauss(0.0, 1.0)) for _ in range(4)]
xp.datapoints.append(hip.Datapoint({
f"value{i}": v / sum(values)
for i, v in enumerate(values)
}))
for i in range(4):
xp.parameters_definition[f"value{i}"].force_range(0.0, 1.0)
return xp
def demo_distribution(**kwargs: t.Any) -> hip.Experiment:
xp = hip.Experiment.from_iterable([{
'cat': random.choice(["a", "b", "c", "d", "e", "f", "g", "h"]),
'numeric': random.uniform(0.0, 1.0),
} for i in range(1000)])
xp.display_data(hip.Displays.DISTRIBUTION).update(kwargs)
return xp
def demo_bool() -> hip.Experiment:
return hip.Experiment.from_iterable([
{"bool": True},
{"bool": False}
])
def demo_color_interpolate() -> hip.Experiment:
exp = demo()
exp.parameters_definition["exp_metric"].colormap = "interpolateSinebow"
return exp
def demo_color_scheme_ylrd() -> hip.Experiment:
exp = demo()
exp.parameters_definition["exp_metric"].colormap = "schemeYlOrRd"
return exp
def demo_color_scheme_accent() -> hip.Experiment:
exp = demo()
exp.parameters_definition["exp_metric"].colormap = "schemeAccent"
return exp
def demo_color_interpolate_inverse() -> hip.Experiment:
exp = demo_color_interpolate()
assert exp.parameters_definition["exp_metric"].colormap is not None
exp.parameters_definition["exp_metric"].colormap += "#inverse"
return exp
def demo_axis_style() -> hip.Experiment:
data: t.List[t.Dict[str, t.Any]] = []
for _ in range(100):
data.append({
**{
f'param{i}': random.uniform(0, 1)
for i in range(6)
},
'loss': random.uniform(0, 100),
'metric': 10 ** random.uniform(0, 10)
})
xp = hip.Experiment.from_iterable(data)
for i in range(6):
xp.parameters_definition[f"param{i}"].label_css = "badge badge-pill badge-secondary"
xp.parameters_definition["loss"].label_css = "badge badge-pill badge-primary"
xp.parameters_definition["metric"].label_css = "badge badge-pill badge-info"
return xp
def demo_categorical() -> hip.Experiment:
data: t.List[t.Dict[str, t.Any]] = []
for _ in range(100):
data.append({
'cat_num_05': random.randint(0, 5),
'cat_num_15': random.randint(0, 10),
'cat_num_25': random.randint(0, 25),
'cat_str_05': f's{random.randint(0, 5)}',
'cat_str_15': f's{random.randint(0, 15)}',
'cat_str_25': f's{random.randint(0, 25)}',
})
xp = hip.Experiment.from_iterable(data)
for param in ["cat_num_05", "cat_num_15", "cat_num_25"]:
xp.parameters_definition[param].type = hip.ValueType.CATEGORICAL
xp.colorby = 'cat_num_25'
return xp
def demo_long_names() -> hip.Experiment:
return hip.Experiment.from_iterable([
{
'some very very long name for a field': random.randint(0, 5),
'this one is also very long': random.randint(0, 10),
'another.long.one.but.with.dots': random.randint(0, 25),
}
for _ in range(100)
])
def demo_force_constant_pplot() -> hip.Experiment:
exp = hip.Experiment.from_iterable([
{'uid': 123, 'a': 1, 'b': 3},
{'uid': 345, 'a': 2, 'b': 3}
])
exp.parameters_definition["b"].force_range(0, 100)
return exp
def demo_first_value_nan() -> hip.Experiment:
return hip.Experiment.from_iterable([
{},
{'a': None},
{'a': 2},
{'a': 2.1},
{'a': 2.2},
{'a': 5.5},
{'a': math.nan},
])
def demo_weighted_rows() -> hip.Experiment:
experiment = hip.Experiment.from_iterable([
{'w': 1.0, 'a': 1, 'b': 1},
{'w': 2.0, 'a': 2, 'b': 1},
{'w': -2.0, 'a': 2, 'b': 1},
{'w': math.inf, 'a': 2, 'b': 2},
{'w': 'not_a_number', 'a': 2, 'b': 3},
{'w': None, 'a': 3, 'b': 3},
{'a': 4, 'b': 3},
])
experiment.weightcolumn = "w"
return experiment
README_DEMOS: t.Dict[str, t.Callable[[], hip.Experiment]] = {
"demo": demo,
"demo_big": lambda: demo(1000),
"demo_change_column_properties": demo_change_column_properties,
"demo_basic_usage": demo_basic_usage,
"demo_line_xy": demo_line_xy,
"demo_bug_uid": demo_bug_uid,
"demo_force_scale": demo_force_scale,
"demo_distribution_cat": lambda: demo_distribution(axis="cat"),
"demo_distribution_num": lambda: demo_distribution(axis="numeric"),
"demo_distribution_num_100bins": lambda: demo_distribution(axis="numeric", nbins=100),
"demo_bool": demo_bool,
"demo_color_interpolate": demo_color_interpolate,
"demo_color_scheme_ylrd": demo_color_scheme_ylrd,
"demo_color_scheme_accent": demo_color_scheme_accent,
"demo_axis_style": demo_axis_style,
"demo_categorical": demo_categorical,
"demo_customize": demo_customize,
"demo_long_names": demo_long_names,
"demo_force_constant_pplot": demo_force_constant_pplot,
"demo_color_interpolate_inverse": demo_color_interpolate_inverse,
"demo_first_value_nan": demo_first_value_nan,
"demo_weighted_rows": demo_weighted_rows,
}
| 35.192878
| 140
| 0.593676
|
e721f0901f2d08071e776319958b6968acbec51b
| 438
|
py
|
Python
|
configs/Experiment.py
|
jbr-ai-labs/mamba
|
bd05023bd0d66f89ffbe42c315c4c4a6612b8fb4
|
[
"MIT"
] | 3
|
2022-02-08T20:23:18.000Z
|
2022-02-11T13:49:14.000Z
|
configs/Experiment.py
|
jbr-ai-labs/mamba
|
bd05023bd0d66f89ffbe42c315c4c4a6612b8fb4
|
[
"MIT"
] | null | null | null |
configs/Experiment.py
|
jbr-ai-labs/mamba
|
bd05023bd0d66f89ffbe42c315c4c4a6612b8fb4
|
[
"MIT"
] | null | null | null |
from configs.Config import Config
class Experiment(Config):
def __init__(self, steps, episodes, random_seed, env_config, controller_config, learner_config):
super(Experiment, self).__init__()
self.steps = steps
self.episodes = episodes
self.random_seed = random_seed
self.env_config = env_config
self.controller_config = controller_config
self.learner_config = learner_config
| 33.692308
| 100
| 0.710046
|
caaebb54ba031a14523c8f92eb4ce87041befdd3
| 219
|
py
|
Python
|
src/lesson_data_persistence_and_exchange/shelve_readonly.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | 3
|
2018-08-14T09:33:52.000Z
|
2022-03-21T12:31:58.000Z
|
src/lesson_data_persistence_and_exchange/shelve_readonly.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
src/lesson_data_persistence_and_exchange/shelve_readonly.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
import dbm
import shelve
with shelve.open('test_shelf.db', flag='r') as s:
print('Existing:', s['key1'])
try:
s['key1'] = 'new value'
except dbm.error as err:
print('ERROR: {}'.format(err))
| 21.9
| 49
| 0.579909
|
4f8fbd2aab2c179ca8104cf2deb16d40fa8cc8bf
| 921
|
py
|
Python
|
python/edl/tests/unittests/del_from_etcd.py
|
WEARE0/edl
|
f065ec02bb27a67c80466103e298bd6f37494048
|
[
"Apache-2.0"
] | 90
|
2020-04-21T01:46:10.000Z
|
2022-02-10T09:09:34.000Z
|
python/edl/tests/unittests/del_from_etcd.py
|
WEARE0/edl
|
f065ec02bb27a67c80466103e298bd6f37494048
|
[
"Apache-2.0"
] | 37
|
2018-03-02T22:41:15.000Z
|
2020-04-22T16:48:36.000Z
|
python/edl/tests/unittests/del_from_etcd.py
|
WEARE0/edl
|
f065ec02bb27a67c80466103e298bd6f37494048
|
[
"Apache-2.0"
] | 34
|
2018-03-02T23:28:25.000Z
|
2020-03-25T08:50:29.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from edl.utils import constants
from edl.discovery import etcd_client
g_etcd_endpoints = "127.0.0.1:2379"
job_id = os.environ["PADDLE_JOB_ID"]
etcd_endpoints = os.environ["PADDLE_ETCD_ENDPOINTS"]
etcd = etcd_client.EtcdClient([g_etcd_endpoints], root=job_id)
etcd.init()
constants.clean_etcd(etcd)
| 35.423077
| 74
| 0.775244
|
e5dab578f1b0eba8f062481661ab78fee39694fb
| 12,221
|
py
|
Python
|
DQM/Integration/python/clients/beampixel_dqm_sourceclient-live_cfg.py
|
malbouis/cmssw
|
16173a30d3f0c9ecc5419c474bb4d272c58b65c8
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQM/Integration/python/clients/beampixel_dqm_sourceclient-live_cfg.py
|
malbouis/cmssw
|
16173a30d3f0c9ecc5419c474bb4d272c58b65c8
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQM/Integration/python/clients/beampixel_dqm_sourceclient-live_cfg.py
|
malbouis/cmssw
|
16173a30d3f0c9ecc5419c474bb4d272c58b65c8
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
import sys
from Configuration.Eras.Era_Run2_2018_cff import Run2_2018
process = cms.Process("BeamPixel", Run2_2018)
unitTest = False
if 'unitTest=True' in sys.argv:
unitTest = True
#----------------------------
# Common for PP and HI running
#----------------------------
if unitTest == True:
process.load("DQM.Integration.config.unittestinputsource_cfi")
from DQM.Integration.config.unittestinputsource_cfi import options
else:
process.load("DQM.Integration.config.inputsource_cfi")
from DQM.Integration.config.inputsource_cfi import options
# Use this to run locally (for testing purposes)
#process.load("DQM.Integration.config.fileinputsource_cfi")
#from DQM.Integration.config.fileinputsource_cfi import options
#----------------------------
# HLT Filter
#----------------------------
# 0=random, 1=physics, 2=calibration, 3=technical
process.hltTriggerTypeFilter = cms.EDFilter("HLTTriggerTypeFilter", SelectedTriggerType = cms.int32(1))
#----------------------------
# DQM Environment
#----------------------------
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = "BeamPixel"
process.dqmSaver.tag = "BeamPixel"
process.dqmSaver.runNumber = options.runNumber
process.dqmSaverPB.tag = 'BeamPixel'
process.dqmSaverPB.runNumber = options.runNumber
#----------------------------
# Conditions
#----------------------------
# Use this to run locally (for testing purposes), choose the right GT
#process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#from Configuration.AlCa.GlobalTag import GlobalTag
#process.GlobalTag = GlobalTag(process.GlobalTag, "auto:run2_data", "")
# Otherwise use this
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
#----------------------------
# Sub-system Configuration
#----------------------------
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff")
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
#----------------------------
# Define Sequences
#----------------------------
process.dqmModules = cms.Sequence(process.dqmEnv + process.dqmSaver + process.dqmSaverPB)
process.physTrigger = cms.Sequence(process.hltTriggerTypeFilter)
#----------------------------
# Process Customizations
#----------------------------
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
#----------------------------
# Tracking General Configuration
#----------------------------
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cfi")
process.load("RecoLocalTracker.Configuration.RecoLocalTracker_cff")
process.load("TrackingTools.TransientTrack.TransientTrackBuilder_cfi")
#----------------------------
# Pixel-Tracks&Vertices Config
#----------------------------
from RecoPixelVertexing.PixelLowPtUtilities.siPixelClusterShapeCache_cfi import *
process.siPixelClusterShapeCachePreSplitting = siPixelClusterShapeCache.clone(src = 'siPixelClustersPreSplitting')
process.load("RecoLocalTracker.SiPixelRecHits.PixelCPEGeneric_cfi")
process.load("RecoPixelVertexing.Configuration.RecoPixelVertexing_cff")
from RecoVertex.PrimaryVertexProducer.OfflinePixel3DPrimaryVertices_cfi import *
process.pixelVertices = pixelVertices.clone(
TkFilterParameters = dict(
minPt = process.pixelTracksTrackingRegions.RegionPSet.ptMin)
)
process.pixelTracksTrackingRegions.RegionPSet.originRadius = 0.4
process.pixelTracksTrackingRegions.RegionPSet.originHalfLength = 15.
process.pixelTracksTrackingRegions.RegionPSet.originXPos = 0.08
process.pixelTracksTrackingRegions.RegionPSet.originYPos = -0.03
process.pixelTracksTrackingRegions.RegionPSet.originZPos = 0.
#----------------------------
# Proton-Proton Specific Section
#----------------------------
if (process.runType.getRunType() == process.runType.pp_run or process.runType.getRunType() == process.runType.pp_run_stage1 or
process.runType.getRunType() == process.runType.cosmic_run or process.runType.getRunType() == process.runType.cosmic_run_stage1 or
process.runType.getRunType() == process.runType.hpu_run):
print("[beampixel_dqm_sourceclient-live_cfg]::running pp")
#----------------------------
# Tracking Configuration
#----------------------------
process.castorDigis.InputLabel = "rawDataCollector"
process.csctfDigis.producer = "rawDataCollector"
process.dttfDigis.DTTF_FED_Source = "rawDataCollector"
process.ecalDigis.cpu.InputLabel = "rawDataCollector"
process.ecalPreshowerDigis.sourceTag = "rawDataCollector"
process.gctDigis.inputLabel = "rawDataCollector"
process.gtDigis.DaqGtInputTag = "rawDataCollector"
process.hcalDigis.InputLabel = "rawDataCollector"
process.muonCSCDigis.InputObjects = "rawDataCollector"
process.muonDTDigis.inputLabel = "rawDataCollector"
process.muonRPCDigis.InputLabel = "rawDataCollector"
process.scalersRawToDigi.scalersInputTag = "rawDataCollector"
process.siPixelDigis.cpu.InputLabel = "rawDataCollector"
process.siStripDigis.ProductLabel = "rawDataCollector"
#----------------------------
# pixelVertexDQM Config
#----------------------------
process.pixelVertexDQM = DQMEDAnalyzer('Vx3DHLTAnalyzer',
vertexCollection = cms.untracked.InputTag("pixelVertices"),
pixelHitCollection = cms.untracked.InputTag("siPixelRecHitsPreSplitting"),
debugMode = cms.bool(True),
nLumiFit = cms.uint32(2),
maxLumiIntegration = cms.uint32(15),
nLumiXaxisRange = cms.uint32(5000),
dataFromFit = cms.bool(True),
minNentries = cms.uint32(20),
# If the histogram has at least "minNentries" then extract Mean and RMS,
# or, if we are performing the fit, the number of vertices must be greater
# than minNentries otherwise it waits for other nLumiFit
xRange = cms.double(0.8),
xStep = cms.double(0.001),
yRange = cms.double(0.8),
yStep = cms.double(0.001),
zRange = cms.double(30.0),
zStep = cms.double(0.04),
VxErrCorr = cms.double(1.2), # Keep checking this with later release
minVxDoF = cms.double(10.0),
minVxWgt = cms.double(0.5),
fileName = cms.string("/nfshome0/dqmdev/BeamMonitorDQM/BeamPixelResults.txt"))
#----------------------------
# Heavy Ion Specific Section
#----------------------------
if (process.runType.getRunType() == process.runType.hi_run):
print("[beampixel_dqm_sourceclient-live_cfg]::running HI")
#----------------------------
# Tracking Configuration
#----------------------------
process.castorDigis.InputLabel = "rawDataRepacker"
process.csctfDigis.producer = "rawDataRepacker"
process.dttfDigis.DTTF_FED_Source = "rawDataRepacker"
process.ecalDigis.cpu.InputLabel = "rawDataRepacker"
process.ecalPreshowerDigis.sourceTag = "rawDataRepacker"
process.gctDigis.inputLabel = "rawDataRepacker"
process.gtDigis.DaqGtInputTag = "rawDataRepacker"
process.hcalDigis.InputLabel = "rawDataRepacker"
process.muonCSCDigis.InputObjects = "rawDataRepacker"
process.muonDTDigis.inputLabel = "rawDataRepacker"
process.muonRPCDigis.InputLabel = "rawDataRepacker"
process.scalersRawToDigi.scalersInputTag = "rawDataRepacker"
process.siPixelDigis.cpu.InputLabel = "rawDataRepacker"
process.siStripDigis.ProductLabel = "rawDataRepacker"
#----------------------------
# pixelVertexDQM Config
#----------------------------
process.pixelVertexDQM = DQMEDAnalyzer('Vx3DHLTAnalyzer',
vertexCollection = cms.untracked.InputTag("pixelVertices"),
pixelHitCollection = cms.untracked.InputTag("siPixelRecHitsPreSplitting"),
debugMode = cms.bool(True),
nLumiFit = cms.uint32(5),
maxLumiIntegration = cms.uint32(15),
nLumiXaxisRange = cms.uint32(5000),
dataFromFit = cms.bool(True),
minNentries = cms.uint32(20),
# If the histogram has at least "minNentries" then extract Mean and RMS,
# or, if we are performing the fit, the number of vertices must be greater
# than minNentries otherwise it waits for other nLumiFit
xRange = cms.double(0.8),
xStep = cms.double(0.001),
yRange = cms.double(0.8),
yStep = cms.double(0.001),
zRange = cms.double(30.0),
zStep = cms.double(0.04),
VxErrCorr = cms.double(1.2), # Keep checking this with later release
minVxDoF = cms.double(10.0),
minVxWgt = cms.double(0.5),
fileName = cms.string("/nfshome0/dqmdev/BeamMonitorDQM/BeamPixelResults.txt"))
#----------------------------
# File to save beamspot info
#----------------------------
if process.dqmRunConfig.type.value() == "production":
process.pixelVertexDQM.fileName = "/nfshome0/dqmpro/BeamMonitorDQM/BeamPixelResults.txt"
else:
process.pixelVertexDQM.fileName = "/nfshome0/dqmdev/BeamMonitorDQM/BeamPixelResults.txt"
print("[beampixel_dqm_sourceclient-live_cfg]::saving DIP file into " + str(process.pixelVertexDQM.fileName))
#----------------------------
# Pixel-Tracks&Vertices Reco
#----------------------------
process.reconstructionStep = cms.Sequence(process.siPixelDigis*
process.siStripDigis*
process.striptrackerlocalreco*
process.offlineBeamSpot*
process.siPixelClustersPreSplitting*
process.siPixelRecHitsPreSplitting*
process.siPixelClusterShapeCachePreSplitting*
process.recopixelvertexing)
#----------------------------
# Define Path
#----------------------------
process.p = cms.Path(process.scalersRawToDigi*process.physTrigger*process.reconstructionStep*process.pixelVertexDQM*process.dqmModules)
| 50.920833
| 135
| 0.551346
|
3abe9fc362d8b6fc53737508881bd5adfce968b5
| 18,774
|
py
|
Python
|
Tests/scripts/validate_files.py
|
vibhuabharadwaj/content
|
30d639dbea0015536a3040ec18f93e50322bded0
|
[
"MIT"
] | null | null | null |
Tests/scripts/validate_files.py
|
vibhuabharadwaj/content
|
30d639dbea0015536a3040ec18f93e50322bded0
|
[
"MIT"
] | null | null | null |
Tests/scripts/validate_files.py
|
vibhuabharadwaj/content
|
30d639dbea0015536a3040ec18f93e50322bded0
|
[
"MIT"
] | null | null | null |
"""
This script is used to validate the files in Content repository. Specifically for each file:
1) Proper prefix
2) Proper suffix
3) Valid yml/json schema
4) Having ReleaseNotes if applicable.
It can be run to check only committed changes (if the first argument is 'true') or all the files in the repo.
Note - if it is run for all the files in the repo it won't check releaseNotes, use `release_notes.py`
for that task.
"""
import os
import re
import sys
import glob
import logging
import argparse
import subprocess
from Tests.scripts.constants import *
from Tests.scripts.hook_validations.id import IDSetValidator
from Tests.scripts.hook_validations.secrets import get_secrets
from Tests.scripts.hook_validations.image import ImageValidator
from Tests.scripts.hook_validations.description import DescriptionValidator
from Tests.scripts.update_id_set import get_script_package_data
from Tests.scripts.hook_validations.script import ScriptValidator
from Tests.scripts.hook_validations.conf_json import ConfJsonValidator
from Tests.scripts.hook_validations.structure import StructureValidator
from Tests.scripts.hook_validations.integration import IntegrationValidator
from Tests.test_utils import checked_type, run_command, print_error, collect_ids, print_color, str2bool, LOG_COLORS, \
get_yaml, filter_packagify_changes
class FilesValidator(object):
"""FilesValidator is a class that's designed to validate all the changed files on your branch, and all files in case
you are on master, this class will be used on your local env as the validation hook(pre-commit), and on CircleCi
to make sure you did not bypass the hooks as a safety precaution.
Attributes:
_is_valid (bool): saves the status of the whole validation(instead of mingling it between all the functions).
is_circle (bool): whether we are running on circle or local env.
conf_json_validator (ConfJsonValidator): object for validating the conf.json file.
id_set_validator (IDSetValidator): object for validating the id_set.json file(Created in Circle only).
"""
def __init__(self, is_circle=False):
self._is_valid = True
self.is_circle = is_circle
self.conf_json_validator = ConfJsonValidator()
self.id_set_validator = IDSetValidator(is_circle)
@staticmethod
def is_py_script_or_integration(file_path):
file_yml = get_yaml(file_path)
if re.match(INTEGRATION_REGEX, file_path, re.IGNORECASE):
if file_yml.get('script', {}).get('type', 'javascript') != 'python':
return False
return True
elif re.match(SCRIPT_REGEX, file_path, re.IGNORECASE):
if file_yml.get('type', 'javascript') != 'python':
return False
return True
return False
@staticmethod
def get_modified_files(files_string):
"""Get lists of the modified files in your branch according to the files string.
Args:
files_string (string): String that was calculated by git using `git diff` command.
Returns:
(modified_files_list, added_files_list, deleted_files). Tuple of sets.
"""
all_files = files_string.split('\n')
deleted_files = set([])
added_files_list = set([])
modified_files_list = set([])
old_format_files = set([])
for f in all_files:
file_data = f.split()
if not file_data:
continue
file_status = file_data[0]
file_path = file_data[1]
if file_status.lower().startswith('r'):
file_status = 'r'
file_path = file_data[2]
if checked_type(file_path, CODE_FILES_REGEX) and file_status.lower() != 'd' \
and not file_path.endswith('_test.py'):
# naming convention - code file and yml file in packages must have same name.
file_path = os.path.splitext(file_path)[0] + '.yml'
elif file_path.endswith('.js') or file_path.endswith('.py'):
continue
if file_status.lower() in ['m', 'a', 'r'] and checked_type(file_path, OLD_YML_FORMAT_FILE) and \
FilesValidator.is_py_script_or_integration(file_path):
old_format_files.add(file_path)
elif file_status.lower() == 'm' and checked_type(file_path) and not file_path.startswith('.'):
modified_files_list.add(file_path)
elif file_status.lower() == 'a' and checked_type(file_path) and not file_path.startswith('.'):
added_files_list.add(file_path)
elif file_status.lower() == 'd' and checked_type(file_path) and not file_path.startswith('.'):
deleted_files.add(file_path)
elif file_status.lower().startswith('r') and checked_type(file_path):
modified_files_list.add((file_data[1], file_data[2]))
elif file_status.lower() not in KNOWN_FILE_STATUSES:
print_error(file_path + " file status is an unknown known one, "
"please check. File status was: " + file_status)
modified_files_list, added_files_list, deleted_files = filter_packagify_changes(
modified_files_list,
added_files_list,
deleted_files,
'master')
return modified_files_list, added_files_list, deleted_files, old_format_files
def get_modified_and_added_files(self, branch_name, is_circle):
"""Get lists of the modified and added files in your branch according to the git diff output.
Args:
branch_name (string): The name of the branch we are working on.
is_circle (bool): Whether we are running on circle or local env.
Returns:
(modified_files, added_files). Tuple of sets.
"""
all_changed_files_string = run_command("git diff --name-status origin/master...{}".format(branch_name))
modified_files, added_files, _, old_format_files = self.get_modified_files(all_changed_files_string)
if not is_circle:
files_string = run_command("git diff --name-status --no-merges HEAD")
non_committed_modified_files, non_committed_added_files, non_committed_deleted_files, \
non_committed_old_format_files = self.get_modified_files(files_string)
all_changed_files_string = run_command("git diff --name-status origin/master")
modified_files_from_master, added_files_from_master, _, _ = \
self.get_modified_files(all_changed_files_string)
old_format_files = old_format_files.union(non_committed_old_format_files)
for mod_file in modified_files_from_master:
if mod_file in non_committed_modified_files:
modified_files.add(mod_file)
for add_file in added_files_from_master:
if add_file in non_committed_added_files:
added_files.add(add_file)
modified_files = modified_files - set(non_committed_deleted_files)
added_files = added_files - set(non_committed_modified_files) - set(non_committed_deleted_files)
# new_added_files = set([])
# for added_file in added_files:
# if added_file in non_committed_added_files:
# new_added_files.add(added_file)
# added_files = new_added_files
return modified_files, added_files, old_format_files
def validate_modified_files(self, modified_files, is_backward_check=True):
"""Validate the modified files from your branch.
In case we encounter an invalid file we set the self._is_valid param to False.
Args:
modified_files (set): A set of the modified files in the current branch.
"""
for file_path in modified_files:
old_file_path = None
if isinstance(file_path, tuple):
old_file_path, file_path = file_path
print("Validating {}".format(file_path))
structure_validator = StructureValidator(file_path, is_added_file=not(False or is_backward_check),
is_renamed=True if old_file_path else False)
if not structure_validator.is_file_valid():
self._is_valid = False
if not self.id_set_validator.is_file_valid_in_set(file_path):
self._is_valid = False
elif re.match(INTEGRATION_REGEX, file_path, re.IGNORECASE) or \
re.match(INTEGRATION_YML_REGEX, file_path, re.IGNORECASE):
image_validator = ImageValidator(file_path)
if not image_validator.is_valid():
self._is_valid = False
description_validator = DescriptionValidator(file_path)
if not description_validator.is_valid():
self._is_valid = False
integration_validator = IntegrationValidator(file_path, old_file_path=old_file_path)
if is_backward_check and not integration_validator.is_backward_compatible():
self._is_valid = False
elif re.match(SCRIPT_REGEX, file_path, re.IGNORECASE):
script_validator = ScriptValidator(file_path)
if is_backward_check and not script_validator.is_backward_compatible():
self._is_valid = False
elif re.match(SCRIPT_YML_REGEX, file_path, re.IGNORECASE) or \
re.match(SCRIPT_PY_REGEX, file_path, re.IGNORECASE) or \
re.match(SCRIPT_JS_REGEX, file_path, re.IGNORECASE):
yml_path, _ = get_script_package_data(os.path.dirname(file_path))
script_validator = ScriptValidator(yml_path)
if is_backward_check and not script_validator.is_backward_compatible():
self._is_valid = False
elif re.match(IMAGE_REGEX, file_path, re.IGNORECASE):
image_validator = ImageValidator(file_path)
if not image_validator.is_valid():
self._is_valid = False
def validate_added_files(self, added_files):
"""Validate the added files from your branch.
In case we encounter an invalid file we set the self._is_valid param to False.
Args:
added_files (set): A set of the modified files in the current branch.
"""
for file_path in added_files:
print("Validating {}".format(file_path))
structure_validator = StructureValidator(file_path, is_added_file=True)
if not structure_validator.is_file_valid():
self._is_valid = False
if not self.id_set_validator.is_file_valid_in_set(file_path):
self._is_valid = False
if self.id_set_validator.is_file_has_used_id(file_path):
self._is_valid = False
if re.match(TEST_PLAYBOOK_REGEX, file_path, re.IGNORECASE):
if not self.conf_json_validator.is_test_in_conf_json(collect_ids(file_path)):
self._is_valid = False
elif re.match(INTEGRATION_REGEX, file_path, re.IGNORECASE) or \
re.match(INTEGRATION_YML_REGEX, file_path, re.IGNORECASE) or \
re.match(IMAGE_REGEX, file_path, re.IGNORECASE):
image_validator = ImageValidator(file_path)
if not image_validator.is_valid():
self._is_valid = False
description_validator = DescriptionValidator(file_path)
if not description_validator.is_valid():
self._is_valid = False
elif re.match(IMAGE_REGEX, file_path, re.IGNORECASE):
image_validator = ImageValidator(file_path)
if not image_validator.is_valid():
self._is_valid = False
def validate_no_secrets_found(self, branch_name):
"""Check if any secrets are found in your change set.
Args:
branch_name (string): The name of the branch you are working on.
"""
secrets_found = get_secrets(branch_name, self.is_circle)
if secrets_found:
self._is_valid = False
def validate_no_old_format(self, old_format_files):
""" Validate there are no files in the old format(unified yml file for the code and configuration).
Args:
old_format_files(set): file names which are in the old format.
"""
invalid_files = []
for f in old_format_files:
yaml_data = get_yaml(f)
if 'toversion' not in yaml_data: # we only fail on old format if no toversion (meaning it is latest)
invalid_files.append(f)
if invalid_files:
print_error("You must update the following files to the new package format. The files are:\n{}".format(
'\n'.join(list(invalid_files))))
self._is_valid = False
def validate_committed_files(self, branch_name, is_backward_check=True):
"""Validate that all the committed files in your branch are valid
Args:
branch_name (string): The name of the branch you are working on.
"""
modified_files, added_files, old_format_files = self.get_modified_and_added_files(branch_name, self.is_circle)
self.validate_no_secrets_found(branch_name)
self.validate_modified_files(modified_files, is_backward_check)
self.validate_added_files(added_files)
self.validate_no_old_format(old_format_files)
def validate_all_files(self):
"""Validate all files in the repo are in the right format."""
for regex in CHECKED_TYPES_REGEXES:
splitted_regex = regex.split(".*")
directory = splitted_regex[0]
for root, dirs, files in os.walk(directory):
if root not in DIR_LIST: # Skipping in case we entered a package
continue
print_color("Validating {} directory:".format(directory), LOG_COLORS.GREEN)
for file_name in files:
file_path = os.path.join(root, file_name)
# skipping hidden files
if file_name.startswith('.'):
continue
print("Validating " + file_name)
structure_validator = StructureValidator(file_path)
if not structure_validator.is_valid_scheme():
self._is_valid = False
if root in PACKAGE_SUPPORTING_DIRECTORIES:
for inner_dir in dirs:
file_path = glob.glob(os.path.join(root, inner_dir, '*.yml'))[0]
print("Validating " + file_path)
structure_validator = StructureValidator(file_path)
if not structure_validator.is_valid_scheme():
self._is_valid = False
def is_valid_structure(self, branch_name, is_backward_check=True):
"""Check if the structure is valid for the case we are in, master - all files, branch - changed files.
Args:
branch_name (string): The name of the branch we are working on.
Returns:
(bool). Whether the structure is valid or not.
"""
if not self.conf_json_validator.is_valid_conf_json():
self._is_valid = False
if branch_name != 'master' and not branch_name.startswith('19.') and not branch_name.startswith('20.'):
# validates only committed files
self.validate_committed_files(branch_name, is_backward_check=is_backward_check)
else:
# validates all of Content repo directories according to their schemas
self.validate_all_files()
return self._is_valid
def main():
"""Execute FilesValidator checks on the modified changes in your branch, or all files in case of master.
This script runs both in a local and a remote environment. In a local environment we don't have any
logger assigned, and then pykwalify raises an error, since it is logging the validation results.
Therefore, if we are in a local env, we set up a logger. Also, we set the logger's level to critical
so the user won't be disturbed by non critical loggings
"""
branches = run_command("git branch")
branch_name_reg = re.search("\* (.*)", branches)
branch_name = branch_name_reg.group(1)
parser = argparse.ArgumentParser(description='Utility CircleCI usage')
parser.add_argument('-c', '--circle', type=str2bool, default=False, help='Is CircleCi or not')
parser.add_argument('-b', '--backwardComp', type=str2bool, default=True, help='To check backward compatibility.')
parser.add_argument('-t', '--test-filter', type=str2bool, default=False, help='Check that tests are valid.')
options = parser.parse_args()
is_circle = options.circle
is_backward_check = options.backwardComp
logging.basicConfig(level=logging.CRITICAL)
print_color("Starting validating files structure", LOG_COLORS.GREEN)
files_validator = FilesValidator(is_circle)
if not files_validator.is_valid_structure(branch_name, is_backward_check=is_backward_check):
sys.exit(1)
if options.test_filter:
try:
print_color("Updating idset. Be patient if this is the first time...", LOG_COLORS.YELLOW)
subprocess.check_output(["./Tests/scripts/update_id_set.py"])
print_color("Checking that we have tests for all content...", LOG_COLORS.YELLOW)
try:
tests_out = subprocess.check_output(["./Tests/scripts/configure_tests.py", "-s", "true"],
stderr=subprocess.STDOUT)
print(tests_out)
except Exception:
print_color("Recreating idset to be sure that configure tests failure is accurate."
" Be patient this can take 15-20 seconds ...", LOG_COLORS.YELLOW)
subprocess.check_output(["./Tests/scripts/update_id_set.py", "-r"])
print_color("Checking that we have tests for all content again...", LOG_COLORS.YELLOW)
subprocess.check_call(["./Tests/scripts/configure_tests.py", "-s", "true"])
except Exception as ex:
print_color("Failed validating tests: {}".format(ex), LOG_COLORS.RED)
sys.exit(1)
print_color("Finished validating files structure", LOG_COLORS.GREEN)
sys.exit(0)
if __name__ == "__main__":
main()
| 46.127764
| 120
| 0.647651
|
aae65772c998f1e05e3bdd213168429b2fd08415
| 2,512
|
py
|
Python
|
resilient/resilient/resilient_rest_mock.py
|
carlosotgz/resilient-python-api
|
647a629c296602e8541b72155eab26d7fdc6b67f
|
[
"MIT"
] | 28
|
2017-12-22T00:26:59.000Z
|
2022-01-22T14:51:33.000Z
|
resilient/resilient/resilient_rest_mock.py
|
carlosotgz/resilient-python-api
|
647a629c296602e8541b72155eab26d7fdc6b67f
|
[
"MIT"
] | 18
|
2018-03-06T19:04:20.000Z
|
2022-03-21T15:06:30.000Z
|
resilient/resilient/resilient_rest_mock.py
|
carlosotgz/resilient-python-api
|
647a629c296602e8541b72155eab26d7fdc6b67f
|
[
"MIT"
] | 28
|
2018-05-01T17:53:22.000Z
|
2022-03-28T09:56:59.000Z
|
# (c) Copyright IBM Corp. 2010, 2017. All Rights Reserved.
""" Requests mock for Resilient REST API """
import logging
from collections import namedtuple
import json
import re
import requests_mock
from six import add_metaclass
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.StreamHandler())
LOG.setLevel(logging.DEBUG)
def resilient_endpoint(request_type, uri):
def mark(func):
func.uri = uri
func.request_type = request_type
return func
return mark
class ResilientMockType(type):
def __new__(mcl, name, bases, nmspc):
Endpoint = namedtuple("Endpoint", "type uri")
try:
endpoints = bases[0].registered_endpoints
except:
endpoints = {}
for obj in nmspc.values():
if hasattr(obj, 'uri'):
endpoints[Endpoint(type=obj.request_type, uri=obj.uri)] = obj
nmspc['registered_endpoints'] = endpoints
return super(ResilientMockType, mcl).__new__(mcl, name, bases, nmspc)
@add_metaclass(ResilientMockType)
class ResilientMock(object):
""" Base class for creating Resilient Rest API Mock definitions """
def __init__(self, org_name=None, email=None):
self.email = email or "api@example.com"
self.org_name = org_name or "Test Org"
LOG.info("Initialize ResilientMock %s %s", self.email, self.org_name)
self.adapter = requests_mock.Adapter()
for endpoint, handler in self.registered_endpoints.items():
# Register with regex since some endpoints embed the org_id in the path
LOG.info("Registering %s %s to %s", endpoint.type,
endpoint.uri, str(handler))
self.adapter.add_matcher(lambda request,
method=endpoint.type,
callback=handler,
uri=endpoint.uri: self._custom_matcher(method,
uri,
lambda request: callback(self, request),
request))
@staticmethod
def _custom_matcher(request_type, uri, response_callback, request):
""" matcher function for passing to adapter.add_matcher() """
if request.method == request_type and re.search(uri, request.url):
return response_callback(request)
else:
return None
| 38.646154
| 107
| 0.589968
|
e12d5ae4a744195027e13bbe5be2de2f8ea925ac
| 382
|
py
|
Python
|
packages/optitype/optitype.1.0.0/files/hook-numpy.py
|
solvuu/opam-repo-bio
|
0f33dad92113195693d876b5a04182eea08eeea8
|
[
"CC0-1.0"
] | 7
|
2015-10-12T20:23:43.000Z
|
2016-05-11T21:07:43.000Z
|
packages/optitype/optitype.1.0.0/files/hook-numpy.py
|
solvuu/opam-repo-bio
|
0f33dad92113195693d876b5a04182eea08eeea8
|
[
"CC0-1.0"
] | 20
|
2015-12-31T20:31:23.000Z
|
2016-05-16T20:37:06.000Z
|
packages/optitype/optitype.1.0.0/files/hook-numpy.py
|
solvuu/opam-repo-bio
|
0f33dad92113195693d876b5a04182eea08eeea8
|
[
"CC0-1.0"
] | 3
|
2016-01-21T23:52:14.000Z
|
2016-05-11T21:07:47.000Z
|
from PyInstaller import log as logging
from PyInstaller import compat
from os import listdir
libdir = compat.base_prefix + "/lib"
mkllib = filter(lambda x : x.startswith('libmkl_'), listdir(libdir))
if mkllib <> []:
logger = logging.getLogger(__name__)
logger.info("MKL installed as part of numpy, importing that!")
binaries = map(lambda l: (libdir + "/" + l, ''), mkllib)
| 34.727273
| 68
| 0.712042
|
7cbe9302c95e473a1947b80183ee75182bf66b23
| 5,139
|
py
|
Python
|
proj/fpga/zcu106/Vitis-AI-DPU_TRD-for-ZCU106/zcu106_dpu/Vitis-AI/VART/samples/inception_v1_mt_py/inception_v1.py
|
timebe00/Mercenary
|
7762bad28e4f49b2ad84fb8abbd8056bd01f61d4
|
[
"MIT"
] | 3
|
2020-10-29T15:00:30.000Z
|
2021-10-21T08:09:34.000Z
|
proj/fpga/zcu106/Vitis-AI-DPU_TRD-for-ZCU106/zcu106_dpu/Vitis-AI/VART/samples/inception_v1_mt_py/inception_v1.py
|
timebe00/Mercenary
|
7762bad28e4f49b2ad84fb8abbd8056bd01f61d4
|
[
"MIT"
] | 20
|
2020-10-31T03:19:03.000Z
|
2020-11-02T18:59:49.000Z
|
proj/fpga/zcu106/Vitis-AI-DPU_TRD-for-ZCU106/zcu106_dpu/Vitis-AI/VART/samples/inception_v1_mt_py/inception_v1.py
|
timebe00/Mercenary
|
7762bad28e4f49b2ad84fb8abbd8056bd01f61d4
|
[
"MIT"
] | 9
|
2020-10-14T02:04:10.000Z
|
2020-12-01T08:23:02.000Z
|
'''
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
import cv2
import numpy as np
import runner
import os
import input_fn
import math
import threading
import time
import sys
'''
Calculate softmax
data: data to be calculated
size: data size
return: softamx result
'''
def CPUCalcSoftmax(data,size):
sum=0.0
result = [0 for i in range(size)]
for i in range(size):
result[i] = math.exp(data[i])
sum +=result[i]
for i in range(size):
result[i] /=sum
return result
def get_script_directory():
path = os.getcwd()
return path
'''
Get topk results according to its probability
datain: data result of softmax
filePath: filePath in witch that records the infotmation of kinds
'''
def TopK(datain,size,filePath):
cnt=[i for i in range(size) ]
pair=zip(datain,cnt)
pair=sorted(pair,reverse=True)
softmax_new,cnt_new=zip(*pair)
fp=open(filePath, "r")
data1=fp.readlines()
fp.close()
for i in range(5):
flag=0
for line in data1:
if flag==cnt_new[i]:
print("Top[%d] %f %s" %(i, (softmax_new[i]),(line.strip)("\n")))
flag=flag+1
l = threading.Lock()
SCRIPT_DIR = get_script_directory()
calib_image_dir = SCRIPT_DIR + "/../images/"
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
global threadnum
threadnum = 0
global runTotall
runRotal = 0
'''
run inception_v1 with batch
dpu: dpu runner
img: imagelist to be run
cnt: threadnum
'''
def runInceptionV1(dpu,img,cnt):
"""get tensor"""
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
tensorformat = dpu.get_tensor_format()
if tensorformat == dpu.TensorFormat.NCHW:
outputHeight = outputTensors[0].dims[2]
outputWidth = outputTensors[0].dims[3]
outputChannel = outputTensors[0].dims[1]
elif tensorformat == dpu.TensorFormat.NHWC:
outputHeight = outputTensors[0].dims[1]
outputWidth = outputTensors[0].dims[2]
outputChannel = outputTensors[0].dims[3]
else:
exit("Format error")
outputSize = outputHeight*outputWidth*outputChannel
softmax = np.empty(outputSize)
global runTotall
count = cnt
batchSize = inputTensors[0].dims[1]
while count < runTotall:
l.acquire()
if (runTotall < (count+batchSize)):
runSize = runTotall - count
else:
runSize = batchSize
l.release()
shapeIn = (runSize,) + tuple([inputTensors[0].dims[i] for i in range(inputTensors[0].ndims)][1:])
"""prepare batch input/output """
outputData = []
inputData = []
outputData.append(np.empty((runSize,outputHeight,outputWidth,outputChannel), dtype = np.float32, order = 'C'))
inputData.append(np.empty((shapeIn), dtype = np.float32, order = 'C'))
"""init input image to input buffer """
for j in range(runSize):
imageRun = inputData[0]
imageRun[j,...] = img[count+j].reshape(inputTensors[0].dims[1],inputTensors[0].dims[2],inputTensors[0].dims[3])
"""run with batch """
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
for j in range(len(outputData)):
outputData[j] = outputData[j].reshape(runSize, outputSize)
"""softmax calculate with batch """
for j in range(runSize):
softmax = CPUCalcSoftmax(outputData[0][j], outputSize)
l.acquire()
count = count + threadnum*runSize
l.release()
def main(argv):
global threadnum
"""create runner """
dpu = runner.Runner(argv[2])[0]
listimage=os.listdir(calib_image_dir)
threadAll = []
threadnum = int(argv[1])
i = 0
global runTotall
runTotall = len(listimage)
"""image list to be run """
img = []
for i in range(runTotall):
path = os.path.join(calib_image_dir,listimage[i])
image = cv2.imread(path)
img.append(input_fn.preprocess_fn(image))
batchSize = dpu.get_input_tensors()[0].dims[0];
"""run with batch """
time1 = time.time()
for i in range(int(threadnum)):
t1 = threading.Thread(target=runInceptionV1, args=(dpu, img, i*batchSize))
threadAll.append(t1)
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
timetotal = time2 - time1
fps = float(runTotall / timetotal)
print("%.2f FPS" %fps)
del dpu
if __name__ == "__main__":
if len(sys.argv) != 3:
print("please input thread number and json file path.")
else :
main(sys.argv)
| 27.778378
| 123
| 0.643316
|
0b80a5ad054a041ff8f08e3bacd69f57fd4f25ba
| 585
|
py
|
Python
|
tools/mime-type-check.py
|
aydinnemati/web-server
|
a3b60e93bf9fd1bd30f58eb2caeaf948ffd31311
|
[
"MIT"
] | null | null | null |
tools/mime-type-check.py
|
aydinnemati/web-server
|
a3b60e93bf9fd1bd30f58eb2caeaf948ffd31311
|
[
"MIT"
] | null | null | null |
tools/mime-type-check.py
|
aydinnemati/web-server
|
a3b60e93bf9fd1bd30f58eb2caeaf948ffd31311
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import re, json
found_extensions = {}
with open("./config/defaults/mime.types") as file_in:
lines = []
for line in file_in:
m = re.search('([^\s]+)[\s]+([^"]+);', line)
if m:
mimetype = m.group(1)
extensions = m.group(2).split(' ')
for extension in extensions:
if extension not in found_extensions:
found_extensions[extension] = mimetype
else:
print(extension)
raise Exception('Duplicate extension')
print(json.dumps(found_extensions, indent=2, sort_keys=True))
| 26.590909
| 61
| 0.591453
|
6b04a8c43037d1b4d37e93b29b7ce7bc95bd563c
| 673
|
py
|
Python
|
publishconf.py
|
julianespinel/website
|
a16394cf878fdfd8e1fa86ed23779d31ec4f1316
|
[
"MIT"
] | null | null | null |
publishconf.py
|
julianespinel/website
|
a16394cf878fdfd8e1fa86ed23779d31ec4f1316
|
[
"MIT"
] | 9
|
2020-08-30T19:52:19.000Z
|
2021-10-03T16:55:45.000Z
|
publishconf.py
|
julianespinel/website
|
a16394cf878fdfd8e1fa86ed23779d31ec4f1316
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import toml
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# Load non-secrets from file
public = toml.load('public.toml')
# If your site is available via HTTPS, make sure SITEURL begins with https://
SITEURL = public['website']['url']
WEBSITE_VERSION = public['website']['version']
RELATIVE_URLS = True
FEED_ALL_ATOM = 'feeds/all.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Load secrets from file
secrets = toml.load('secrets.toml')
GOOGLE_ANALYTICS = secrets['website']['google_analytics']
| 22.433333
| 77
| 0.734027
|
ab9e8d26d372c26c715b75a9d8e7c5feabee12ad
| 2,980
|
py
|
Python
|
src/ska_mid_cbf_mcs/vcc/vcc_band_3.py
|
jamesjiang52/mid-cbf-mcs
|
072f8fdb91d77010e875f441e536fab842bf8319
|
[
"BSD-3-Clause"
] | null | null | null |
src/ska_mid_cbf_mcs/vcc/vcc_band_3.py
|
jamesjiang52/mid-cbf-mcs
|
072f8fdb91d77010e875f441e536fab842bf8319
|
[
"BSD-3-Clause"
] | 4
|
2021-05-20T05:19:23.000Z
|
2021-05-20T05:19:26.000Z
|
src/ska_mid_cbf_mcs/vcc/vcc_band_3.py
|
ska-telescope/mid-cbf-mcs
|
072f8fdb91d77010e875f441e536fab842bf8319
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of the Vcc project
#
#
#
# Distributed under the terms of the GPL license.
# See LICENSE.txt for more info.
# """
# Author: James Jiang James.Jiang@nrc-cnrc.gc.ca,
# Herzberg Astronomy and Astrophysics, National Research Council of Canada
# Copyright (c) 2019 National Research Council of Canada
# """
# """
# VccBand3 TANGO device class for the prototype
# """
import os
import sys
import json
# tango imports
import tango
from tango import DebugIt
from tango.server import run
from tango.server import Device
from tango.server import attribute, command
from tango.server import device_property
from tango import AttrQuality, DispLevel, DevState
from tango import AttrWriteType, PipeWriteType
# SKA import
file_path = os.path.dirname(os.path.abspath(__file__))
from ska_tango_base.control_model import HealthState, AdminMode
from ska_tango_base import SKACapability
from ska_tango_base.commands import ResultCode
__all__ = ["VccBand3", "main"]
class VccBand3(SKACapability):
"""
VccBand3 TANGO device class for the prototype
"""
# PROTECTED REGION ID(VccBand3.class_variable) ENABLED START #
# PROTECTED REGION END # // VccBand3.class_variable
# -----------------
# Device Properties
# -----------------
# ----------
# Attributes
# ----------
# ---------------
# General methods
# ---------------
class InitCommand(SKACapability.InitCommand):
def do(self):
"""
Stateless hook for device initialisation.
:return: A tuple containing a return code and a string
message indicating status. The message is for
information purpose only.
:rtype: (ResultCode, str)
"""
self.logger.debug("Entering InitCommand()")
super().do()
device = self.target
#self.logger.warn("State() = {}".format(device.get_state()))
message = "VccBand3 Init command completed OK"
self.logger.info(message)
return (ResultCode.OK, message)
def always_executed_hook(self):
# PROTECTED REGION ID(VccBand3.always_executed_hook) ENABLED START #
"""hook to be executed before commands"""
pass
# PROTECTED REGION END # // VccBand3.always_executed_hook
def delete_device(self):
# PROTECTED REGION ID(VccBand3.delete_device) ENABLED START #
"""hook to delete device"""
pass
# PROTECTED REGION END # // VccBand3.delete_device
# ------------------
# Attributes methods
# ------------------
# --------
# Commands
# --------
# None
# ----------
# Run server
# ----------
def main(args=None, **kwargs):
# PROTECTED REGION ID(VccBand3.main) ENABLED START #
return run((VccBand3,), args=args, **kwargs)
# PROTECTED REGION END # // VccBand3.main
if __name__ == '__main__':
main()
| 24.833333
| 76
| 0.615101
|
b38ce6d293709fe5117a12a3f76334da7bc331c7
| 208
|
py
|
Python
|
apluslms_roman/schemas/__init__.py
|
ppessi/roman
|
19aafde41f307c2b51163f36e4ed71d28f95ee50
|
[
"MIT"
] | null | null | null |
apluslms_roman/schemas/__init__.py
|
ppessi/roman
|
19aafde41f307c2b51163f36e4ed71d28f95ee50
|
[
"MIT"
] | 14
|
2018-02-17T04:04:16.000Z
|
2020-01-13T18:40:57.000Z
|
apluslms_roman/schemas/__init__.py
|
ppessi/roman
|
19aafde41f307c2b51163f36e4ed71d28f95ee50
|
[
"MIT"
] | 6
|
2018-06-12T06:48:50.000Z
|
2021-05-18T16:36:04.000Z
|
from os.path import join
from apluslms_yamlidator.schemas import schema_registry
from .. import CACHE_DIR
schema_registry.register_module(__name__)
schema_registry.register_cache(join(CACHE_DIR, 'schemas'))
| 29.714286
| 58
| 0.850962
|
3bd82199aee0c758e2afeedb214d39152011b185
| 5,206
|
py
|
Python
|
kws_streaming/layers/lstm_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
kws_streaming/layers/lstm_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
kws_streaming/layers/lstm_test.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kws_streaming.layers.lstm."""
from absl.testing import parameterized
import numpy as np
from kws_streaming.layers import lstm
from kws_streaming.layers import modes
from kws_streaming.layers import test_utils
from kws_streaming.layers.compat import tf
from kws_streaming.layers.compat import tf1
tf1.disable_eager_execution()
class LSTMTest(tf.test.TestCase, parameterized.TestCase):
def _set_params(self, use_peepholes):
test_utils.set_seed(123)
# generate input signal
self.inference_batch_size = 1
self.data_size = 32
self.feature_size = 4
self.signal = np.random.rand(self.inference_batch_size, self.data_size,
self.feature_size)
# create non streamable model
inputs = tf.keras.layers.Input(
shape=(self.data_size, self.feature_size),
batch_size=self.inference_batch_size,
dtype=tf.float32)
self.units = 3
self.num_proj = 4
outputs = lstm.LSTM(
units=self.units,
return_sequences=True,
use_peepholes=use_peepholes,
num_proj=self.num_proj)(
inputs)
self.model_non_streamable = tf.keras.Model(inputs, outputs)
self.output_lstm = self.model_non_streamable.predict(self.signal)
@parameterized.named_parameters([
dict(testcase_name='with peephole', use_peepholes=True),
dict(testcase_name='without peephole', use_peepholes=False)
])
def test_streaming_inference_internal_state(self, use_peepholes):
# create streaming inference model with internal state
self._set_params(use_peepholes)
mode = modes.Modes.STREAM_INTERNAL_STATE_INFERENCE
inputs = tf.keras.layers.Input(
shape=(1, self.feature_size),
batch_size=self.inference_batch_size,
dtype=tf.float32)
outputs = lstm.LSTM(
units=self.units,
mode=mode,
use_peepholes=use_peepholes,
num_proj=self.num_proj)(
inputs)
model_stream = tf.keras.Model(inputs, outputs)
# set weights + states
if use_peepholes:
weights_states = self.model_non_streamable.get_weights() + [
np.zeros((self.inference_batch_size, self.units))
] + [np.zeros((self.inference_batch_size, self.num_proj))]
else:
weights_states = self.model_non_streamable.get_weights() + [
np.zeros((self.inference_batch_size, self.units))
] + [np.zeros((self.inference_batch_size, self.units))]
model_stream.set_weights(weights_states)
# compare streamable (with internal state) vs non streamable models
for i in range(self.data_size): # loop over time samples
input_stream = self.signal[:, i, :]
input_stream = np.expand_dims(input_stream, 1)
output_stream = model_stream.predict(input_stream)
self.assertAllClose(output_stream[0][0], self.output_lstm[0][i])
@parameterized.named_parameters([
dict(testcase_name='with peephole', use_peepholes=True),
dict(testcase_name='without peephole', use_peepholes=False)
])
def test_streaming_inference_external_state(self, use_peepholes):
# create streaming inference model with external state
self._set_params(use_peepholes)
mode = modes.Modes.STREAM_EXTERNAL_STATE_INFERENCE
inputs = tf.keras.layers.Input(
shape=(1, self.feature_size),
batch_size=self.inference_batch_size,
dtype=tf.float32)
lstm_layer = lstm.LSTM(
units=self.units,
mode=mode,
use_peepholes=use_peepholes,
num_proj=self.num_proj)
outputs = lstm_layer(inputs)
model_stream = tf.keras.Model([inputs] + lstm_layer.get_input_state(),
[outputs] + lstm_layer.get_output_state())
# set weights only
model_stream.set_weights(self.model_non_streamable.get_weights())
# input states
input_state1 = np.zeros((self.inference_batch_size, self.units))
if use_peepholes:
input_state2 = np.zeros((self.inference_batch_size, self.num_proj))
else:
input_state2 = np.zeros((self.inference_batch_size, self.units))
# compare streamable vs non streamable models
for i in range(self.data_size): # loop over time samples
input_stream = self.signal[:, i, :]
input_stream = np.expand_dims(input_stream, 1)
output_streams = model_stream.predict(
[input_stream, input_state1, input_state2])
# update input states
input_state1 = output_streams[1]
input_state2 = output_streams[2]
self.assertAllClose(output_streams[0][0][0], self.output_lstm[0][i])
if __name__ == '__main__':
tf.test.main()
| 37.453237
| 76
| 0.708029
|
7106b9be18f8718f4df730e3b3881117558040ca
| 3,587
|
py
|
Python
|
girder/cli/build.py
|
manthey/girder
|
1ae1068b02b3dc775df957f3a3c79a6aa9798043
|
[
"Apache-2.0"
] | null | null | null |
girder/cli/build.py
|
manthey/girder
|
1ae1068b02b3dc775df957f3a3c79a6aa9798043
|
[
"Apache-2.0"
] | null | null | null |
girder/cli/build.py
|
manthey/girder
|
1ae1068b02b3dc775df957f3a3c79a6aa9798043
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import os
from pkg_resources import resource_filename
from subprocess import check_call
import sys
import click
from girder.constants import STATIC_ROOT_DIR
from girder.plugin import allPlugins, getPlugin
_GIRDER_BUILD_ASSETS_PATH = resource_filename('girder', 'web_client')
@click.command(name='build', help='Build web client static assets.')
@click.option('--dev/--no-dev', default=False,
help='Build girder client for development.')
@click.option('--watch', default=False, is_flag=True,
help='Build girder library bundle in watch mode (implies --dev --no-reinstall).')
@click.option('--watch-plugin',
help='Build a girder plugin bundle in watch mode (implies --dev --no-reinstall).')
@click.option('--reinstall/--no-reinstall', default=True,
help='Force regenerate node_modules.')
def main(dev, watch, watch_plugin, reinstall):
if watch and watch_plugin:
raise click.UsageError('--watch and --watch-plugins cannot be used together')
if watch or watch_plugin:
dev = True
reinstall = False
staging = _GIRDER_BUILD_ASSETS_PATH
_generatePackageJSON(staging, os.path.join(_GIRDER_BUILD_ASSETS_PATH, 'package.json.template'))
if not os.path.isdir(os.path.join(staging, 'node_modules')) or reinstall:
# The autogeneration of package.json breaks how package-lock.json is
# intended to work. If we don't delete it first, you will frequently
# get "file doesn't exist" errors.
npmLockFile = os.path.join(staging, 'package-lock.json')
if os.path.exists(npmLockFile):
os.unlink(npmLockFile)
check_call(['npm', 'install'], cwd=staging)
quiet = '--no-progress=false' if sys.stdout.isatty() else '--no-progress=true'
buildCommand = [
'npx', 'grunt', '--static-path=%s' % STATIC_ROOT_DIR, quiet]
if watch:
buildCommand.append('--watch')
if watch_plugin:
buildCommand.extend([
'--watch',
'webpack:plugin_%s' % watch_plugin
])
if dev:
buildCommand.append('--env=dev')
else:
buildCommand.append('--env=prod')
check_call(buildCommand, cwd=staging)
def _collectPluginDependencies():
packages = {}
for pluginName in allPlugins():
plugin = getPlugin(pluginName)
packages.update(plugin.npmPackages())
return packages
def _generatePackageJSON(staging, source):
with open(source, 'r') as f:
sourceJSON = json.load(f)
deps = sourceJSON['dependencies']
plugins = _collectPluginDependencies()
deps.update(plugins)
sourceJSON['girder'] = {
'plugins': list(plugins.keys())
}
with open(os.path.join(staging, 'package.json'), 'w') as f:
json.dump(sourceJSON, f)
| 36.232323
| 99
| 0.647895
|
cbe2f3d79c5534a10e59fae092f7ddd707bcd2cc
| 9,799
|
py
|
Python
|
tr/session_test.py
|
DentonGentry/gfiber-catawampus
|
b01e4444f3c7f12b1af7837203b37060fd443bb7
|
[
"Apache-2.0"
] | 2
|
2017-10-03T16:06:29.000Z
|
2020-09-08T13:03:13.000Z
|
tr/session_test.py
|
DentonGentry/gfiber-catawampus
|
b01e4444f3c7f12b1af7837203b37060fd443bb7
|
[
"Apache-2.0"
] | null | null | null |
tr/session_test.py
|
DentonGentry/gfiber-catawampus
|
b01e4444f3c7f12b1af7837203b37060fd443bb7
|
[
"Apache-2.0"
] | 1
|
2017-05-07T17:39:02.000Z
|
2017-05-07T17:39:02.000Z
|
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# unittest requires method names starting in 'test'
# pylint: disable=invalid-name
"""Unit tests for session.py."""
__author__ = 'dgentry@google.com (Denton Gentry)'
import time
import google3
import session
from wvtest import unittest
class CwmpSessionTest(unittest.TestCase):
"""tests for CwmpSession."""
def testStateConnect(self):
cs = session.CwmpSession('')
self.assertTrue(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
# should be no change
cs.state_update(on_hold=True)
self.assertTrue(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
cs.state_update(cpe_to_acs_empty=True)
self.assertTrue(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
cs.state_update(acs_to_cpe_empty=True)
self.assertTrue(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
# transition to ACTIVE
cs.state_update(sent_inform=True)
self.assertFalse(cs.inform_required())
self.assertTrue(cs.request_allowed())
self.assertTrue(cs.response_allowed())
def testActive(self):
cs = session.CwmpSession('')
cs.state_update(sent_inform=True)
self.assertFalse(cs.inform_required())
self.assertTrue(cs.request_allowed())
self.assertTrue(cs.response_allowed())
# should be no change
cs.state_update(sent_inform=True)
self.assertFalse(cs.inform_required())
self.assertTrue(cs.request_allowed())
self.assertTrue(cs.response_allowed())
cs.state_update(acs_to_cpe_empty=True)
self.assertFalse(cs.inform_required())
self.assertTrue(cs.request_allowed())
self.assertTrue(cs.response_allowed())
# transition to ONHOLD
cs.state_update(on_hold=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
# transition back to ACTIVE
cs.state_update(on_hold=False)
self.assertFalse(cs.inform_required())
self.assertTrue(cs.request_allowed())
self.assertTrue(cs.response_allowed())
# transition to NOMORE
cs.state_update(cpe_to_acs_empty=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
def testOnHold(self):
cs = session.CwmpSession('')
cs.state_update(sent_inform=True)
cs.state_update(on_hold=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
# No change
cs.state_update(on_hold=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
cs.state_update(sent_inform=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
cs.state_update(cpe_to_acs_empty=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
cs.state_update(acs_to_cpe_empty=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
# back to ACTIVE
cs.state_update(on_hold=False)
self.assertFalse(cs.inform_required())
self.assertTrue(cs.request_allowed())
self.assertTrue(cs.response_allowed())
def testNoMore(self):
cs = session.CwmpSession('')
# transition to NOMORE
cs.state_update(sent_inform=True)
cs.state_update(cpe_to_acs_empty=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
# should be no change
cs.state_update(sent_inform=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
cs.state_update(on_hold=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertTrue(cs.response_allowed())
# transition to DONE
cs.state_update(acs_to_cpe_empty=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
def testDone(self):
cs = session.CwmpSession('')
cs.state_update(sent_inform=True)
cs.state_update(cpe_to_acs_empty=True)
cs.state_update(acs_to_cpe_empty=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
cs.state_update(sent_inform=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
cs.state_update(cpe_to_acs_empty=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
cs.state_update(acs_to_cpe_empty=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
cs.state_update(sent_inform=True)
self.assertFalse(cs.inform_required())
self.assertFalse(cs.request_allowed())
self.assertFalse(cs.response_allowed())
class SimpleCacheObject(object):
def __init__(self):
self.cache_this_function_n = 0
self.cache_this_function_args_n = 0
@session.cache
def cache_this_function(self):
self.cache_this_function_n += 1
@session.cache
def cache_function_with_args(
self, arg1, arg2, x=0, y=0): # pylint: disable=unused-argument
self.cache_this_function_args_n += 1 + x + y
@session.cache
def SimpleCacheFunction():
return time.time()
cache_iter_start = 1
def _CacheIterFunction():
global cache_iter_start
yield cache_iter_start
cache_iter_start += 1
yield cache_iter_start
cache_iter_start += 1
@session.cache
def CacheIterFunction():
return _CacheIterFunction()
@session.cache
def CacheListFunction():
return list(_CacheIterFunction())
@session.cache_as_list
def CacheAsListFunction():
return _CacheIterFunction()
class SessionCacheTest(unittest.TestCase):
"""tests for SessionCache."""
def testCacheObject(self):
t1 = SimpleCacheObject()
t2 = SimpleCacheObject()
t3 = SimpleCacheObject()
for _ in range(1001):
t1.cache_this_function()
t2.cache_this_function()
t3.cache_this_function()
self.assertEqual(t1.cache_this_function_n, 1)
self.assertEqual(t2.cache_this_function_n, 1)
self.assertEqual(t3.cache_this_function_n, 1)
session.cache.flush()
for _ in range(101):
t1.cache_this_function()
t2.cache_this_function()
self.assertEqual(t1.cache_this_function_n, 2)
self.assertEqual(t2.cache_this_function_n, 2)
self.assertEqual(t3.cache_this_function_n, 1)
def testCacheFunction(self):
t = SimpleCacheFunction()
for _ in range(1000):
self.assertEqual(t, SimpleCacheFunction())
session.cache.flush()
self.assertNotEqual(t, SimpleCacheFunction())
def testCacheFunctionArgs(self):
t = SimpleCacheObject()
for i in range(100):
t.cache_function_with_args(i, 0)
self.assertEqual(t.cache_this_function_args_n, 100)
def testCacheFunctionComplicatedArgs(self):
t = SimpleCacheObject()
arg = [1, 2, [3, 4], [5, 6, [7, 8, [9, 10]]], 11, 12]
for i in range(10):
t.cache_function_with_args(i, arg, x=5, y=7)
self.assertEqual(t.cache_this_function_args_n,
10 * (1 + 5 + 7))
for i in range(10):
t.cache_function_with_args(i, arg, x=5, y=7)
t.cache_function_with_args(99, arg)
t.cache_function_with_args(99, arg, y=9)
self.assertEqual(t.cache_this_function_args_n,
10 * (1 + 5 + 7) +
1 + (1 + 9))
def testCacheIterFunction(self):
self.assertRaises(TypeError, CacheIterFunction)
self.assertEqual(CacheListFunction(), CacheListFunction())
self.assertEqual(list(CacheListFunction()), CacheListFunction())
self.assertEqual(len(CacheListFunction()), 2)
self.assertEqual(CacheAsListFunction(), CacheAsListFunction())
self.assertEqual(list(CacheAsListFunction()), CacheAsListFunction())
self.assertEqual(len(CacheAsListFunction()), 2)
self.assertEqual(CacheListFunction(), [1, 2])
self.assertEqual(CacheAsListFunction(), [3, 4])
RunAtEndTestResults = {}
def setBarAtEnd(val):
RunAtEndTestResults['bar'] = val
class RunAtEndTest(unittest.TestCase):
def setUp(self):
RunAtEndTestResults.clear()
def setFooAtEnd(self, val):
RunAtEndTestResults['foo'] = val
def testRunAtEnd(self):
session.RunAtEnd(lambda: self.setFooAtEnd(1))
session.RunAtEnd(lambda: setBarAtEnd(2))
self.assertEqual(len(RunAtEndTestResults), 0)
session._RunEndCallbacks()
self.assertEqual(len(RunAtEndTestResults), 2)
self.assertEqual(RunAtEndTestResults['foo'], 1)
self.assertEqual(RunAtEndTestResults['bar'], 2)
if __name__ == '__main__':
unittest.main()
| 30.058282
| 74
| 0.72589
|
4a937665b8a485320fffe37f4e7ab50f855f6e58
| 451
|
py
|
Python
|
forms/urls.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 3
|
2018-02-27T13:48:28.000Z
|
2018-03-03T21:57:50.000Z
|
forms/urls.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 6
|
2020-02-12T00:07:46.000Z
|
2022-03-11T23:25:59.000Z
|
forms/urls.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 1
|
2019-03-26T20:19:57.000Z
|
2019-03-26T20:19:57.000Z
|
from django.conf.urls import url
from . import views
app_name = 'forms'
urlpatterns = [
#forms/creator/12 ----in great use
url(r'^creator/(?P<pk>\d+)/$', views.creator_form, name='creator_form'),
# forms/create/ques/2 ---in use
url(r'^create/ques/(?P<pk>\d+)/$', views.add_ques, name='add_ques'),
# forms/update/ques/12/34 ---in use
url(r'^update/ques/(?P<pk>\d+)/(?P<qk>\d+)/$',views.edit_ques,name='ques_update'),
]
| 22.55
| 86
| 0.614191
|
0ebbb888a38a477da3c319627eb6276b2e54df5e
| 1,785
|
py
|
Python
|
main.py
|
alexander24032003/Flask-Repo
|
547d4f99766b411aa757580c7dbbd485ef0888ec
|
[
"MIT"
] | null | null | null |
main.py
|
alexander24032003/Flask-Repo
|
547d4f99766b411aa757580c7dbbd485ef0888ec
|
[
"MIT"
] | null | null | null |
main.py
|
alexander24032003/Flask-Repo
|
547d4f99766b411aa757580c7dbbd485ef0888ec
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import render_template, request, redirect, url_for
from post import Post
from comment import Comment
app = Flask(__name__)
@app.route('/')
def hello_world():
return redirect(url_for('list_posts'))
@app.route('/posts')
def list_posts():
return render_template('posts.html', posts=Post.all())
@app.route('/posts/<int:post_id>')
def show_post(post_id):
post = Post.find(post_id)
return render_template('post.html', post=post)
@app.route('/posts/<int:post_id>/edit', methods=['GET', 'POST'])
def edit_post(post_id):
post = Post.find(post_id)
if request.method == 'GET':
return render_template('edit_post.html', post=post)
elif request.method == 'POST':
post.name = request.form['name']
post.author = request.form['author']
post.content = request.form['content']
post.save()
return redirect(url_for('show_post', post_id=post.post_id))
@app.route('/posts/<int:post_id>/delete', methods=['POST'])
def delete_post(post_id):
post = Post.find(post_id)
post.delete()
return redirect(url_for('list_posts'))
@app.route('/posts/new', methods=['GET', 'POST'])
def new_post():
if request.method == 'GET':
return render_template('new_post.html')
elif request.method == 'POST':
values = (None, request.form['name'], request.form['author'], request.form['content'])
Post(*values).create()
return redirect(url_for('list_posts'))
@app.route('/comments/new', methods=['POST'])
def new_comment():
if request.method == 'POST':
post = Post.find(request.form['post_id'])
values = (None, post, request.form['message'])
Comment(*values).create()
return redirect(url_for('show_post', id=post.id))
| 27.461538
| 94
| 0.653782
|
b079c810fffe883cdb584df215590859d5197d33
| 2,908
|
py
|
Python
|
bin/create_students.py
|
phdfbk/phdfbk.github.io
|
a111369d358e473bebc158e088d6af9adf04ae82
|
[
"Apache-2.0"
] | null | null | null |
bin/create_students.py
|
phdfbk/phdfbk.github.io
|
a111369d358e473bebc158e088d6af9adf04ae82
|
[
"Apache-2.0"
] | 36
|
2016-08-13T11:30:34.000Z
|
2019-01-28T16:03:41.000Z
|
bin/create_students.py
|
phdfbk/phdfbk.github.io
|
a111369d358e473bebc158e088d6af9adf04ae82
|
[
"Apache-2.0"
] | 1
|
2019-05-23T13:41:52.000Z
|
2019-05-23T13:41:52.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unicodecsv
import requests
import cgi
from cStringIO import StringIO
from datetime import datetime
url_students ="https://docs.google.com/spreadsheets/d/1UJ8eHwcBsdKRjwc6uu2KVhNIn_FSEpLMh6xqMNQ5sUY/pub?gid=191666891&single=true&output=csv"
r = requests.get(url_students)
f = StringIO(r.text.encode('iso-8859-1'))
reader = unicodecsv.reader(f, encoding='iso-8859-1')
k = 0
for row in reader:
if k > 0:
yearmonthday = datetime.today().strftime('%Y-%m-%d')
yearmonthday = "2016-08-23"
daymonthyear = datetime.today().strftime('%d/%m/%Y')
daymonthyear = "23/08/2016"
surname = str(row[0].encode('iso-8859-1')).replace("\n","")
filename = yearmonthday
filename += "-" + str(row[0].encode('iso-8859-1')).lower().replace(" ","_") + ".md"
new_yaml = open(filename, 'w')
yaml_text = ""
yaml_text += "---\n"
yaml_text += "layout: default \n"
name = str(row[1].encode('iso-8859-1'))
yaml_text += "id: " + yearmonthday + "-" + surname + "-" + name.replace(" ","_") + "\n"
yaml_text += "surname: " + surname + "\n"
yaml_text += "name: " + name + "\n"
yaml_text += "university: " + str(row[2].encode('iso-8859-1')) + "\n"
yaml_text += "date: " + daymonthyear + "\n"
yaml_text += "aboutme: " + cgi.escape(str(row[3].encode('iso-8859-1'))).replace("\n","<br/>").replace(":", ":") + "\n"
yaml_text += "from: " + cgi.escape(str(row[4].encode('iso-8859-1'))) + "\n"
yaml_text += "research_topic: " + cgi.escape(str(row[5].encode('iso-8859-1')). replace(":", ":")) + "\n"
yaml_text += "abstract: " + str(row[6].encode('iso-8859-1')).replace("\n","<br/>").replace(":", ":") + "\n"
yaml_text += "advisor: " + str(row[7].encode('iso-8859-1')) + "\n"
yaml_text += "keywords: " + str(row[8].encode('iso-8859-1')) + "\n"
website = ""
try:
website = str(row[9].encode('iso-8859-1')).replace(":", ":")
except:
pass
if website.lower().find('url') == "-1":
website = "http://" + website
yaml_text += "website: " + website + "\n"
img = ""
try:
img = str(row[10].encode('iso-8859-1'))
except:
pass
if img == "":
img = "no_picture.jpg"
email = ""
try:
email = str(row[11].encode('iso-8859-1'))
except:
pass
email = email.replace('@', '<i class="fa fa-at" aria-hidden="true"></i>')
yaml_text += "img: " + img + "\n"
yaml_text += 'email: ' + email + '\n'
yaml_text += "alt: " + name + " " + surname + "\n"
yaml_text += "modal-id: stud" + str(k) + "\n"
new_yaml.write(yaml_text + "---\n")
new_yaml.close()
k += 1
| 44.060606
| 140
| 0.516162
|
b9deb3b3b8b578f4176a40753e4de1a49f6e2fc9
| 752
|
py
|
Python
|
tests/http/test_tls.py
|
PyCN/pulsar
|
fee44e871954aa6ca36d00bb5a3739abfdb89b26
|
[
"BSD-3-Clause"
] | 1,410
|
2015-01-02T14:55:07.000Z
|
2022-03-28T17:22:06.000Z
|
tests/http/test_tls.py
|
PyCN/pulsar
|
fee44e871954aa6ca36d00bb5a3739abfdb89b26
|
[
"BSD-3-Clause"
] | 194
|
2015-01-22T06:18:24.000Z
|
2020-10-20T21:21:58.000Z
|
tests/http/test_tls.py
|
PyCN/pulsar
|
fee44e871954aa6ca36d00bb5a3739abfdb89b26
|
[
"BSD-3-Clause"
] | 168
|
2015-01-31T10:29:55.000Z
|
2022-03-14T10:22:24.000Z
|
import os
from pulsar.apps.http import SSLError, HttpClient
from pulsar.utils.system import platform
from tests.http import base
crt = os.path.join(os.path.dirname(__file__), 'ca_bundle')
if platform.type != 'win':
class TestTlsHttpClient(base.TestHttpClient):
with_tls = True
async def test_verify(self):
c = HttpClient()
with self.assertRaises(SSLError):
await c.get(self.httpbin())
response = await c.get(self.httpbin(), verify=False)
self.assertEqual(response.status_code, 200)
response = await c.get(self.httpbin(), verify=crt)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request.verify, crt)
| 28.923077
| 64
| 0.652926
|
0248f2599780b5c0b6cdafc74d877f83dc796a55
| 25,383
|
py
|
Python
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/activate.py
|
mikiec84/PTVS
|
6cbeadd70a4438d6e6ea4d22a465d678eacf5eb2
|
[
"Apache-2.0"
] | null | null | null |
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/activate.py
|
mikiec84/PTVS
|
6cbeadd70a4438d6e6ea4d22a465d678eacf5eb2
|
[
"Apache-2.0"
] | 1
|
2019-04-02T23:35:13.000Z
|
2019-04-02T23:35:13.000Z
|
Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/activate.py
|
mikiec84/PTVS
|
6cbeadd70a4438d6e6ea4d22a465d678eacf5eb2
|
[
"Apache-2.0"
] | 2
|
2018-03-02T19:55:14.000Z
|
2019-02-14T22:37:28.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from errno import ENOENT
from glob import glob
import os
from os.path import (abspath, basename, dirname, expanduser, expandvars, isdir, join, normcase,
normpath)
import re
import sys
from tempfile import NamedTemporaryFile
from .base.context import ROOT_ENV_NAME, context, locate_prefix_by_name
context.__init__() # oOn import, context does not include SEARCH_PATH. This line fixes that.
try:
from cytoolz.itertoolz import concatv, drop
except ImportError: # pragma: no cover
from ._vendor.toolz.itertoolz import concatv, drop # NOQA
class Activator(object):
# Activate and deactivate have three tasks
# 1. Set and unset environment variables
# 2. Execute/source activate.d/deactivate.d scripts
# 3. Update the command prompt
#
# Shells should also use 'reactivate' following conda's install, update, and
# remove/uninstall commands.
#
# All core logic is in build_activate() or build_deactivate(), and is independent of
# shell type. Each returns a map containing the keys:
# export_vars
# unset_var
# activate_scripts
# deactivate_scripts
#
# The value of the CONDA_PROMPT_MODIFIER environment variable holds conda's contribution
# to the command prompt.
#
# To implement support for a new shell, ideally one would only need to add shell-specific
# information to the __init__ method of this class.
def __init__(self, shell, arguments=None):
self.shell = shell
self._raw_arguments = arguments
if PY2:
self.environ = {ensure_fs_path_encoding(k): ensure_fs_path_encoding(v)
for k, v in iteritems(os.environ)}
else:
self.environ = os.environ.copy()
if shell == 'posix':
self.pathsep_join = ':'.join
self.sep = '/'
self.path_conversion = native_path_to_unix
self.script_extension = '.sh'
self.tempfile_extension = None # write instructions to stdout rather than a temp file
self.shift_args = 0
self.command_join = '\n'
self.unset_var_tmpl = '\\unset %s'
self.export_var_tmpl = "\\export %s='%s'"
self.set_var_tmpl = "%s='%s'"
self.run_script_tmpl = '\\. "%s"'
elif shell == 'csh':
self.pathsep_join = ':'.join
self.sep = '/'
self.path_conversion = native_path_to_unix
self.script_extension = '.csh'
self.tempfile_extension = None # write instructions to stdout rather than a temp file
self.shift_args = 0
self.command_join = ';\n'
self.unset_var_tmpl = 'unsetenv %s'
self.export_var_tmpl = 'setenv %s "%s"'
self.set_var_tmpl = "set %s='%s'"
self.run_script_tmpl = 'source "%s"'
elif shell == 'xonsh':
self.pathsep_join = ':'.join
self.sep = '/'
self.path_conversion = native_path_to_unix
self.script_extension = '.xsh'
self.tempfile_extension = '.xsh'
self.shift_args = 0
self.command_join = '\n'
self.unset_var_tmpl = 'del $%s'
self.export_var_tmpl = "$%s = '%s'"
self.run_script_tmpl = 'source "%s"'
elif shell == 'cmd.exe':
self.pathsep_join = ';'.join
self.sep = '\\'
self.path_conversion = path_identity
self.script_extension = '.bat'
self.tempfile_extension = '.bat'
self.shift_args = 1
self.command_join = '\r\n' if on_win else '\n'
self.unset_var_tmpl = '@SET %s='
self.export_var_tmpl = '@SET "%s=%s"'
self.run_script_tmpl = '@CALL "%s"'
elif shell == 'fish':
self.pathsep_join = '" "'.join
self.sep = '/'
self.path_conversion = native_path_to_unix
self.script_extension = '.fish'
self.tempfile_extension = None # write instructions to stdout rather than a temp file
self.shift_args = 0
self.command_join = ';\n'
self.unset_var_tmpl = 'set -e %s'
self.export_var_tmpl = 'set -gx %s "%s"'
self.run_script_tmpl = 'source "%s"'
elif shell == 'powershell':
self.pathsep_join = ';'.join
self.sep = '\\'
self.path_conversion = path_identity
self.script_extension = '.ps1'
self.tempfile_extension = None # write instructions to stdout rather than a temp file
self.shift_args = 0
self.command_join = '\n'
self.unset_var_tmpl = 'Remove-Variable %s'
self.export_var_tmpl = '$env:%s = "%s"'
self.run_script_tmpl = '. "%s"'
else:
raise NotImplementedError()
def _finalize(self, commands, ext):
commands = concatv(commands, ('',)) # add terminating newline
if ext is None:
return self.command_join.join(commands)
elif ext:
with NamedTemporaryFile('w+b', suffix=ext, delete=False) as tf:
# the default mode is 'w+b', and universal new lines don't work in that mode
# command_join should account for that
tf.write(ensure_binary(self.command_join.join(commands)))
return tf.name
else:
raise NotImplementedError()
def activate(self):
return self._finalize(self._yield_commands(self.build_activate(self.env_name_or_prefix)),
self.tempfile_extension)
def deactivate(self):
return self._finalize(self._yield_commands(self.build_deactivate()),
self.tempfile_extension)
def reactivate(self):
return self._finalize(self._yield_commands(self.build_reactivate()),
self.tempfile_extension)
def execute(self):
# return value meant to be written to stdout
self._parse_and_set_args(self._raw_arguments)
return getattr(self, self.command)()
def _parse_and_set_args(self, arguments):
# the first index of arguments MUST be either activate, deactivate, or reactivate
if arguments is None:
from .exceptions import ArgumentError
raise ArgumentError("'activate', 'deactivate', or 'reactivate' command must be given")
command = arguments[0]
arguments = tuple(drop(self.shift_args + 1, arguments))
help_flags = ('-h', '--help', '/?')
non_help_args = tuple(arg for arg in arguments if arg not in help_flags)
help_requested = len(arguments) != len(non_help_args)
remainder_args = tuple(arg for arg in non_help_args if arg and arg != command)
if not command:
from .exceptions import ArgumentError
raise ArgumentError("'activate', 'deactivate', or 'reactivate' command must be given")
elif help_requested:
from . import CondaError
class Help(CondaError): # NOQA
pass
raise Help("help requested for %s" % command)
elif command not in ('activate', 'deactivate', 'reactivate'):
from .exceptions import ArgumentError
raise ArgumentError("invalid command '%s'" % command)
elif command == 'activate' and len(remainder_args) > 1:
from .exceptions import ArgumentError
raise ArgumentError('activate does not accept more than one argument:\n'
+ str(remainder_args) + '\n')
elif command != 'activate' and remainder_args:
from .exceptions import ArgumentError
raise ArgumentError('%s does not accept arguments\nremainder_args: %s\n'
% (command, remainder_args))
if command == 'activate':
self.env_name_or_prefix = remainder_args and remainder_args[0] or 'root'
self.command = command
def _yield_commands(self, cmds_dict):
for script in cmds_dict.get('deactivate_scripts', ()):
yield self.run_script_tmpl % script
for key in sorted(cmds_dict.get('unset_vars', ())):
yield self.unset_var_tmpl % key
for key, value in sorted(iteritems(cmds_dict.get('set_vars', {}))):
yield self.set_var_tmpl % (key, value)
for key, value in sorted(iteritems(cmds_dict.get('export_vars', {}))):
yield self.export_var_tmpl % (key, value)
for script in cmds_dict.get('activate_scripts', ()):
yield self.run_script_tmpl % script
def build_activate(self, env_name_or_prefix):
if re.search(r'\\|/', env_name_or_prefix):
prefix = expand(env_name_or_prefix)
if not isdir(join(prefix, 'conda-meta')):
from .exceptions import EnvironmentLocationNotFound
raise EnvironmentLocationNotFound(prefix)
elif env_name_or_prefix in (ROOT_ENV_NAME, 'root'):
prefix = context.root_prefix
else:
prefix = locate_prefix_by_name(env_name_or_prefix)
prefix = normpath(prefix)
# query environment
old_conda_shlvl = int(self.environ.get('CONDA_SHLVL', 0))
old_conda_prefix = self.environ.get('CONDA_PREFIX')
max_shlvl = context.max_shlvl
if old_conda_prefix == prefix and old_conda_shlvl > 0:
return self.build_reactivate()
if self.environ.get('CONDA_PREFIX_%s' % (old_conda_shlvl-1)) == prefix:
# in this case, user is attempting to activate the previous environment,
# i.e. step back down
return self.build_deactivate()
activate_scripts = self._get_activate_scripts(prefix)
conda_default_env = self._default_env(prefix)
conda_prompt_modifier = self._prompt_modifier(conda_default_env)
assert 0 <= old_conda_shlvl <= max_shlvl
set_vars = {}
if old_conda_shlvl == 0:
new_path = self.pathsep_join(self._add_prefix_to_path(prefix))
export_vars = {
'CONDA_PYTHON_EXE': self.path_conversion(sys.executable),
'CONDA_EXE': self.path_conversion(context.conda_exe),
'PATH': new_path,
'CONDA_PREFIX': prefix,
'CONDA_SHLVL': old_conda_shlvl + 1,
'CONDA_DEFAULT_ENV': conda_default_env,
'CONDA_PROMPT_MODIFIER': conda_prompt_modifier,
}
deactivate_scripts = ()
elif old_conda_shlvl == max_shlvl:
new_path = self.pathsep_join(self._replace_prefix_in_path(old_conda_prefix, prefix))
export_vars = {
'PATH': new_path,
'CONDA_PREFIX': prefix,
'CONDA_DEFAULT_ENV': conda_default_env,
'CONDA_PROMPT_MODIFIER': conda_prompt_modifier,
}
deactivate_scripts = self._get_deactivate_scripts(old_conda_prefix)
else:
new_path = self.pathsep_join(self._add_prefix_to_path(prefix))
export_vars = {
'PATH': new_path,
'CONDA_PREFIX': prefix,
'CONDA_PREFIX_%d' % old_conda_shlvl: old_conda_prefix,
'CONDA_SHLVL': old_conda_shlvl + 1,
'CONDA_DEFAULT_ENV': conda_default_env,
'CONDA_PROMPT_MODIFIER': conda_prompt_modifier,
}
deactivate_scripts = ()
self._update_prompt(set_vars, conda_prompt_modifier)
if on_win and self.shell == 'cmd.exe':
import ctypes
export_vars.update({
"PYTHONIOENCODING": ctypes.cdll.kernel32.GetACP(),
})
return {
'unset_vars': (),
'set_vars': set_vars,
'export_vars': export_vars,
'deactivate_scripts': deactivate_scripts,
'activate_scripts': activate_scripts,
}
def build_deactivate(self):
# query environment
old_conda_prefix = self.environ.get('CONDA_PREFIX')
old_conda_shlvl = int(self.environ.get('CONDA_SHLVL', 0))
if not old_conda_prefix or old_conda_shlvl < 1:
# no active environment, so cannot deactivate; do nothing
return {
'unset_vars': (),
'set_vars': {},
'export_vars': {},
'deactivate_scripts': (),
'activate_scripts': (),
}
deactivate_scripts = self._get_deactivate_scripts(old_conda_prefix)
new_conda_shlvl = old_conda_shlvl - 1
new_path = self.pathsep_join(self._remove_prefix_from_path(old_conda_prefix))
set_vars = {}
if old_conda_shlvl == 1:
# TODO: warn conda floor
conda_prompt_modifier = ''
unset_vars = (
'CONDA_PREFIX',
'CONDA_DEFAULT_ENV',
'CONDA_PYTHON_EXE',
'CONDA_EXE',
'CONDA_PROMPT_MODIFIER',
)
export_vars = {
'PATH': new_path,
'CONDA_SHLVL': new_conda_shlvl,
}
activate_scripts = ()
else:
new_prefix = self.environ.get('CONDA_PREFIX_%d' % new_conda_shlvl)
conda_default_env = self._default_env(new_prefix)
conda_prompt_modifier = self._prompt_modifier(conda_default_env)
unset_vars = (
'CONDA_PREFIX_%d' % new_conda_shlvl,
)
export_vars = {
'PATH': new_path,
'CONDA_SHLVL': new_conda_shlvl,
'CONDA_PREFIX': new_prefix,
'CONDA_DEFAULT_ENV': conda_default_env,
'CONDA_PROMPT_MODIFIER': conda_prompt_modifier,
}
activate_scripts = self._get_activate_scripts(new_prefix)
self._update_prompt(set_vars, conda_prompt_modifier)
return {
'unset_vars': unset_vars,
'set_vars': set_vars,
'export_vars': export_vars,
'deactivate_scripts': deactivate_scripts,
'activate_scripts': activate_scripts,
}
def build_reactivate(self):
conda_prefix = self.environ.get('CONDA_PREFIX')
conda_shlvl = int(self.environ.get('CONDA_SHLVL', 0))
if not conda_prefix or conda_shlvl < 1:
# no active environment, so cannot reactivate; do nothing
return {
'unset_vars': (),
'set_vars': {},
'export_vars': {},
'deactivate_scripts': (),
'activate_scripts': (),
}
conda_default_env = self.environ.get('CONDA_DEFAULT_ENV', self._default_env(conda_prefix))
new_path = self.pathsep_join(self._replace_prefix_in_path(conda_prefix, conda_prefix))
set_vars = {}
conda_prompt_modifier = self._prompt_modifier(conda_default_env)
self._update_prompt(set_vars, conda_prompt_modifier)
# environment variables are set only to aid transition from conda 4.3 to conda 4.4
return {
'unset_vars': (),
'set_vars': set_vars,
'export_vars': {
'PATH': new_path,
'CONDA_SHLVL': conda_shlvl,
'CONDA_PROMPT_MODIFIER': self._prompt_modifier(conda_default_env),
},
'deactivate_scripts': self._get_deactivate_scripts(conda_prefix),
'activate_scripts': self._get_activate_scripts(conda_prefix),
}
def _get_starting_path_list(self):
path = self.environ.get('PATH', '')
if on_win:
# On Windows, the Anaconda Python interpreter prepends sys.prefix\Library\bin on
# startup. It's a hack that allows users to avoid using the correct activation
# procedure; a hack that needs to go away because it doesn't add all the paths.
# See: https://github.com/AnacondaRecipes/python-feedstock/blob/master/recipe/0005-Win32-Ensure-Library-bin-is-in-os.environ-PATH.patch # NOQA
# But, we now detect if that has happened because:
# 1. In future we would like to remove this hack and require real activation.
# 2. We should not assume that the Anaconda Python interpreter is being used.
path_split = path.split(os.pathsep)
library_bin = r"%s\Library\bin" % (sys.prefix)
# ^^^ deliberately the same as: https://github.com/AnacondaRecipes/python-feedstock/blob/8e8aee4e2f4141ecfab082776a00b374c62bb6d6/recipe/0005-Win32-Ensure-Library-bin-is-in-os.environ-PATH.patch#L20 # NOQA
if paths_equal(path_split[0], library_bin):
return path_split[1:]
else:
return path_split
else:
return path.split(os.pathsep)
@staticmethod
def _get_path_dirs(prefix):
if on_win: # pragma: unix no cover
yield prefix.rstrip("\\")
yield join(prefix, 'Library', 'mingw-w64', 'bin')
yield join(prefix, 'Library', 'usr', 'bin')
yield join(prefix, 'Library', 'bin')
yield join(prefix, 'Scripts')
yield join(prefix, 'bin')
else:
yield join(prefix, 'bin')
def _get_path_dirs2(self, prefix):
if on_win: # pragma: unix no cover
yield prefix
yield self.sep.join((prefix, 'Library', 'mingw-w64', 'bin'))
yield self.sep.join((prefix, 'Library', 'usr', 'bin'))
yield self.sep.join((prefix, 'Library', 'bin'))
yield self.sep.join((prefix, 'Scripts'))
yield self.sep.join((prefix, 'bin'))
else:
yield self.sep.join((prefix, 'bin'))
def _add_prefix_to_path(self, prefix, starting_path_dirs=None):
prefix = self.path_conversion(prefix)
if starting_path_dirs is None:
path_list = list(self.path_conversion(self._get_starting_path_list()))
else:
path_list = list(self.path_conversion(starting_path_dirs))
path_list[0:0] = list(self._get_path_dirs2(prefix))
return tuple(path_list)
def _remove_prefix_from_path(self, prefix, starting_path_dirs=None):
return self._replace_prefix_in_path(prefix, None, starting_path_dirs)
def _replace_prefix_in_path(self, old_prefix, new_prefix, starting_path_dirs=None):
old_prefix = self.path_conversion(old_prefix)
new_prefix = self.path_conversion(new_prefix)
if starting_path_dirs is None:
path_list = list(self.path_conversion(self._get_starting_path_list()))
else:
path_list = list(self.path_conversion(starting_path_dirs))
def index_of_path(paths, test_path):
for q, path in enumerate(paths):
if paths_equal(path, test_path):
return q
return None
if old_prefix is not None:
prefix_dirs = tuple(self._get_path_dirs2(old_prefix))
first_idx = index_of_path(path_list, prefix_dirs[0])
if first_idx is None:
first_idx = 0
else:
last_idx = index_of_path(path_list, prefix_dirs[-1])
assert last_idx is not None
del path_list[first_idx:last_idx + 1]
else:
first_idx = 0
if new_prefix is not None:
path_list[first_idx:first_idx] = list(self._get_path_dirs2(new_prefix))
return tuple(path_list)
def _update_prompt(self, set_vars, conda_prompt_modifier):
if not context.changeps1:
return
if self.shell == 'posix':
ps1 = self.environ.get('PS1', '')
current_prompt_modifier = self.environ.get('CONDA_PROMPT_MODIFIER')
if current_prompt_modifier:
ps1 = re.sub(re.escape(current_prompt_modifier), r'', ps1)
# Because we're using single-quotes to set shell variables, we need to handle the
# proper escaping of single quotes that are already part of the string.
# Best solution appears to be https://stackoverflow.com/a/1250279
ps1 = ps1.replace("'", "'\"'\"'")
set_vars.update({
'PS1': conda_prompt_modifier + ps1,
})
elif self.shell == 'csh':
prompt = self.environ.get('prompt', '')
current_prompt_modifier = self.environ.get('CONDA_PROMPT_MODIFIER')
if current_prompt_modifier:
prompt = re.sub(re.escape(current_prompt_modifier), r'', prompt)
set_vars.update({
'prompt': conda_prompt_modifier + prompt,
})
def _default_env(self, prefix):
if prefix == context.root_prefix:
return 'base'
return basename(prefix) if basename(dirname(prefix)) == 'envs' else prefix
def _prompt_modifier(self, conda_default_env):
return "(%s) " % conda_default_env if context.changeps1 else ""
def _get_activate_scripts(self, prefix):
return self.path_conversion(sorted(glob(join(
prefix, 'etc', 'conda', 'activate.d', '*' + self.script_extension
))))
def _get_deactivate_scripts(self, prefix):
return self.path_conversion(sorted(glob(join(
prefix, 'etc', 'conda', 'deactivate.d', '*' + self.script_extension
)), reverse=True))
def expand(path):
return abspath(expanduser(expandvars(path)))
def ensure_binary(value):
try:
return value.encode('utf-8')
except AttributeError: # pragma: no cover
# AttributeError: '<>' object has no attribute 'encode'
# In this case assume already binary type and do nothing
return value
def ensure_fs_path_encoding(value):
try:
return value.decode(FILESYSTEM_ENCODING)
except AttributeError:
return value
def native_path_to_unix(paths): # pragma: unix no cover
# on windows, uses cygpath to convert windows native paths to posix paths
if not on_win:
return path_identity(paths)
if paths is None:
return None
from subprocess import CalledProcessError, PIPE, Popen
from shlex import split
command = 'cygpath --path -f -'
single_path = isinstance(paths, string_types)
joined = paths if single_path else ("%s" % os.pathsep).join(paths)
if hasattr(joined, 'encode'):
joined = joined.encode('utf-8')
try:
p = Popen(split(command), stdin=PIPE, stdout=PIPE, stderr=PIPE)
except EnvironmentError as e:
if e.errno != ENOENT:
raise
# This code path should (hopefully) never be hit be real conda installs. It's here
# as a backup for tests run under cmd.exe with cygpath not available.
def _translation(found_path): # NOQA
found = found_path.group(1).replace("\\", "/").replace(":", "").replace("//", "/")
return "/" + found.rstrip("/")
joined = ensure_fs_path_encoding(joined)
stdout = re.sub(
r'([a-zA-Z]:[\/\\\\]+(?:[^:*?\"<>|;]+[\/\\\\]*)*)',
_translation,
joined
).replace(";/", ":/").rstrip(";")
else:
stdout, stderr = p.communicate(input=joined)
rc = p.returncode
if rc != 0 or stderr:
message = "\n stdout: %s\n stderr: %s\n rc: %s\n" % (stdout, stderr, rc)
print(message, file=sys.stderr)
raise CalledProcessError(rc, command, message)
if hasattr(stdout, 'decode'):
stdout = stdout.decode('utf-8')
stdout = stdout.strip()
final = stdout and stdout.split(':') or ()
return final[0] if single_path else tuple(final)
def path_identity(paths):
if isinstance(paths, string_types):
return paths
elif paths is None:
return None
else:
return tuple(paths)
def paths_equal(path1, path2):
if on_win:
return normcase(abspath(path1)) == normcase(abspath(path2))
else:
return abspath(path1) == abspath(path2)
on_win = bool(sys.platform == "win32")
PY2 = sys.version_info[0] == 2
FILESYSTEM_ENCODING = sys.getfilesystemencoding()
if PY2: # pragma: py3 no cover
string_types = basestring, # NOQA
text_type = unicode # NOQA
def iteritems(d, **kw):
return d.iteritems(**kw)
else: # pragma: py2 no cover
string_types = str,
text_type = str
def iteritems(d, **kw):
return iter(d.items(**kw))
def main(argv=None):
from .common.compat import init_std_stream_encoding
init_std_stream_encoding()
argv = argv or sys.argv
assert len(argv) >= 3
assert argv[1].startswith('shell.')
shell = argv[1].replace('shell.', '', 1)
activator_args = argv[2:]
activator = Activator(shell, activator_args)
try:
print(activator.execute(), end='')
return 0
except Exception as e:
from . import CondaError
if isinstance(e, CondaError):
print(text_type(e), file=sys.stderr)
return e.return_code
else:
raise
if __name__ == '__main__':
sys.exit(main())
| 39.353488
| 218
| 0.599653
|
dbf0db4db74b463c78ff2a7f2fe17f3d69fa44c2
| 6,737
|
py
|
Python
|
src/ProcessingT.py
|
94CD94/FaciesClassification
|
23d56378a8d68d036fee4cc945a8f03b18a07043
|
[
"MIT"
] | null | null | null |
src/ProcessingT.py
|
94CD94/FaciesClassification
|
23d56378a8d68d036fee4cc945a8f03b18a07043
|
[
"MIT"
] | null | null | null |
src/ProcessingT.py
|
94CD94/FaciesClassification
|
23d56378a8d68d036fee4cc945a8f03b18a07043
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
import seaborn as sns
from ROC import multiclass_ROC
from Learning_curve import plot_learning_curve
from sklearn.utils.testing import ignore_warnings
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from PLOT import Plots
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import ShuffleSplit
from Grids import Grid
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from ROC import multiclass_ROC
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
import plotly.plotly as py
import plotly.graph_objs as go
from sklearn.svm import LinearSVC
from Transform import Transform
from sklearn.utils import check_random_state
from sklearn.preprocessing import KBinsDiscretizer
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import Pipeline as imbPipeline
from sklearn.model_selection import validation_curve
from sklearn.decomposition import PCA
import graphviz
from sklearn.model_selection import cross_validate
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from SPLIT import get_safe_balanced_split
#dot_data = tree.export_graphviz(d, out_file=None)
#graph = graphviz.Source(dot_data)
#graph.render("iris")
#IMPORT DATA
cols=['VP','VS','RHO','Y']
X=pd.read_csv('LOGS2.csv', sep=';')
X=X[cols]
Log=X.loc[X.VP < 1900 ].copy()
X.drop(Log.loc[Log.Y==1].index,axis=0,inplace=True)
X.VP=X.VP+np.random.normal(0,X.VP.std(),X.shape[0])
X.VS=X.VS+np.random.normal(0,X.VS.std(),X.shape[0])
X.RHO=X.RHO+np.random.normal(0,X.RHO.std(),X.shape[0])
Xt=pd.read_csv('AVAIMP.txt',sep=';')
Xt=Xt[cols]
#Log=Xt.loc[Xt.VP < 1900 ].copy()
#Xt.drop(Log.loc[Log.Y==1].index,axis=0,inplace=True)
Xt=Test
X=X.sample(frac=1)
y=X.Y
yt=Xt.Y
T=Transform()
T.transform(X)
T.transform(Xt)
columns=['RHO','VP','VS','K','Zp','Zs','u','Lame']
X=X[columns]
Xt=Xt[columns]
#Xt=pd.read_csv('NUOVOTEST.txt', sep=';')
#cols2=['Z','RHO','VP','VS','PIGE','SW','SH','Y']
#Xt.columns=cols2
#Xt=Xt[cols2]
#colors = ["pale red", "denim blue", "medium green"]
#
#Xt2=pd.read_csv('Avares.txt', sep=';')
#Xt2.columns=cols
#Xt2.RHO=Xt2.RHO/1000
#X=X.sample(random_state=1,frac=1).reset_index(drop=True)
#
#a=pd.read_csv('Test.txt', sep=';')
#a.columns=['1','2','3']
#Test=pd.DataFrame()
#for i in a['1']:
# Test=Test.append(X.loc[(np.abs(np.round(X.VP,2) - i)).argmin()].transpose())
# X.drop((np.abs(np.round(X.VP,2) - i)).argmin(),inplace=True)
#Xt=Test
#
#Log=Xt.loc[Xt.VP < 1900 ].copy()
#Xt.drop(Log.loc[Log.Y==1].index,axis=0,inplace=True)
#
#random_state=1
#random_state=check_random_state(random_state)
#y=X.Y
#yt=Xt.Y
##yt2=Xt2.Y
#pl=Plots()
#T=Transform()
#T.transform(X)
#T.transform(Xt)
#scale=StandardScaler()
#grids=Grid()
#columns=['VP','VS','RHO','K','Zp','Zs','u','Lame']
#
#X=X[columns]
#Xt=Xt[columns]
#Xt2=Xt2[columns]
#
#cv = StratifiedKFold(n_splits=5, random_state=random_state)
##
#estimators = [ ('scale',scale),('sm',sm),('clf',RandomForestClassifier(random_state=random_state))]
#
#
#pipe = imbPipeline(estimators1)
#
#param_grid = dict(sm__k_neighbors=range(2,4) ,sm__sampling_strategy=smotgrid ,clf__n_neighbors=Grid.KNN()['n_neighbors'],clf__p=Grid.KNN()['p'],clf__weights=Grid.KNN()['weights'])
#param_grid1 = dict(clf__n_neighbors=Grid.KNN()['n_neighbors'],clf__p=Grid.KNN()['p'],clf__weights=Grid.KNN()['weights'])
#
##title = "Learning Curves"
##plot_learning_curve(pipe,title, X, y , ylim=None, cv=cv,
## n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5))
##plt.show()
#
#rid_search = GridSearchCV(pipe, param_grid=param_grid1,cv=cv,n_jobs=-1,verbose=1,scoring='f1_micro')
##rid_search.fit(X,y)
#
#yp=rid_search.best_estimator_.predict(Xt)
#
#with open("Final_report.txt", "w") as text_file:
# print(f"rep: {rep}", file=text_file)
#
#
#rep=classification_report(yt,yp,digits=5 ,target_names=['Shale','BrineSand','GasSand'])
#
#cvr=rid_search.cv_results_
#
#confusion_1 = confusion_matrix(yt, yp)
#
#
#f, ax = plt.subplots()
#hm = sns.heatmap(confusion_1, annot=True, cmap="coolwarm",fmt='.2f',
# linewidths=.05)
#plt.show()
#
#yp=np.asarray(yp)
#yp.shape=[yp.size,1]
#yy=np.concatenate([yp,yp,yp],axis=1)
#plt.imshow(yy, extent=[0,200,0,1400],aspect=1)
#plt.axis('scaled')
#
##if hasattr(rid_search.best_estimator_, "predict_proba"):
## yp=rid_search.best_estimator_.predict_proba(Xt)
##else:
## yp=rid_search.best_estimator_.decision_function(Xt)
##
##multiclass_ROC(Xt,yt,yp)
#ylim=[(0.6,1)]
#title = "Learning Curves "
#plot_learning_curve(pipe2, title,X, y, ylim=ylim, cv=cv,
# n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5))
#plt.show()
#yp2=rid_search.best_estimator_.predict(Xt2)
rep2=classification_report(yt,yp,digits=5 ,target_names=['Shale','BrineSand','GasSand'])
yp2=np.asarray(yt)
yp2.shape=[yp2.size,1]
yy=np.concatenate([yp2,yp2,yp2],axis=1)
plt.imshow(yy, extent=[0,200,0,1400],aspect=1)
plt.axis('scaled')
plt.savefig('Well_pred')
#
#
x=np.linspace(0,1,40)
plt.plot(x,-np.log(1-x),label='y=0')
plt.plot(x,-np.log(x),label='y=1')
plt.xlabel('P1')
plt.legend(loc=9)
a=pd.DataFrame(X.VP.loc[X.Y==1])
a.columns=['Shale']
b=pd.DataFrame(X.VP.loc[ X.Y==2])
b.columns=['Brine Sand']
c=pd.DataFrame(X.VP.loc[X.Y==3])
c.reset_index(inplace=True,drop=True)
c.columns=['Gas Sand']
x=X[columns]
x=scale.fit_transform(x)
x=pd.DataFrame(x,columns=columns)
x=pd.concat([x,X.Y],axis=1)
fig = plt.figure()
fig.suptitle('VS Density estimation')
ax = fig.add_subplot(111)
sns.kdeplot(X['VS'].loc[X.Y==1],kernel='gau',color='r',shade=True,legend=False)
sns.kdeplot(X['VS'].loc[X.Y==2],kernel='gau',color='b',shade=True,legend=False)
sns.kdeplot(X['VS'].loc[X.Y==3],kernel='gau',color='g',shade=True,legend=False)
| 32.23445
| 181
| 0.703132
|
e0eb0c4b0a6718d7ee33fbe61a933be37dd4f0bd
| 1,177
|
py
|
Python
|
gcloud/storage/demo/demo.py
|
rakyll/gcloud-python
|
ad1f0f735963dbc138d6d8e20ed0dbf5e6fea0d3
|
[
"Apache-2.0"
] | 1
|
2019-04-16T11:09:50.000Z
|
2019-04-16T11:09:50.000Z
|
gcloud/storage/demo/demo.py
|
rakyll/gcloud-python
|
ad1f0f735963dbc138d6d8e20ed0dbf5e6fea0d3
|
[
"Apache-2.0"
] | null | null | null |
gcloud/storage/demo/demo.py
|
rakyll/gcloud-python
|
ad1f0f735963dbc138d6d8e20ed0dbf5e6fea0d3
|
[
"Apache-2.0"
] | null | null | null |
# Welcome to the gCloud Storage Demo! (hit enter)
# We're going to walk through some of the basics...,
# Don't worry though. You don't need to do anything, just keep hitting enter...
# Let's start by importing the demo module and getting a connection:
from gcloud.storage import demo
connection = demo.get_connection()
# OK, now let's look at all of the buckets...
print connection.get_all_buckets() # This might take a second...
# Now let's create a new bucket...
import time
bucket_name = ("bucket-%s" % time.time()).replace(".", "") # Get rid of dots...
print bucket_name
bucket = connection.create_bucket(bucket_name)
print bucket
# Let's look at all of the buckets again...
print connection.get_all_buckets()
# How about we create a new key inside this bucket.
key = bucket.new_key("my-new-file.txt")
# Now let's put some data in there.
key.set_contents_from_string("this is some data!")
# ... and we can read that data back again.
print key.get_contents_as_string()
# Now let's delete that key.
print key.delete()
# And now that we're done, let's delete that bucket...
print bucket.delete()
# Alright! That's all!
# Here's an interactive prompt for you now...
| 29.425
| 79
| 0.726423
|
917a4daa5f3085ab6cce9aa0b5885667705dd1de
| 336
|
py
|
Python
|
app/rest_api/admin.py
|
21vcloud/Controller
|
63169d220f412330a22e3a2fe9964c73893d4e0f
|
[
"Apache-2.0"
] | null | null | null |
app/rest_api/admin.py
|
21vcloud/Controller
|
63169d220f412330a22e3a2fe9964c73893d4e0f
|
[
"Apache-2.0"
] | null | null | null |
app/rest_api/admin.py
|
21vcloud/Controller
|
63169d220f412330a22e3a2fe9964c73893d4e0f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from rest_api.models import AccessKey
class AccessKeyForm(admin.ModelAdmin):
list_display = ('id', 'access_key', 'secret_key')
search_fields = ('id', 'access_key', 'secret_key')
# admin.site.register(AccessKey, AccessKeyForm)
| 24
| 54
| 0.738095
|
09c90f4e546ee2eda6e635deb3857b53383d2701
| 974
|
py
|
Python
|
logic/utilities.py
|
rdelacrz/connect-four
|
7123d6e649aadcb76f429c61dde405527211c5b2
|
[
"MIT"
] | null | null | null |
logic/utilities.py
|
rdelacrz/connect-four
|
7123d6e649aadcb76f429c61dde405527211c5b2
|
[
"MIT"
] | null | null | null |
logic/utilities.py
|
rdelacrz/connect-four
|
7123d6e649aadcb76f429c61dde405527211c5b2
|
[
"MIT"
] | null | null | null |
"""
Defines utility functions used by both the AI and the core logic.
"""
# Third-party modules
from cefpython3 import cefpython as cef
def js_callback(func):
"""
Takes the return value of the function being wrapped and passes it through the callback function (if any).
It is meant for JavaScript callback purposes, which cannot return a value because inter-process messaging
is asynchronous.
"""
def wrapper(*args):
# Grabs callback function from positional arguments (if one is provided)
callback_fn = None
func_args = args
if type(args[-1]) is cef.JavascriptCallback:
callback_fn = args[-1]
func_args = args[:-1]
# Passes return value from function accordingly (depending on whether a callback function is passed or not)
ret = func(*func_args)
if callback_fn is not None:
callback_fn.Call(ret)
else:
return ret
return wrapper
| 32.466667
| 115
| 0.663244
|
520eae1acab1968bb364674a4c3f24ccffa5012d
| 6,884
|
py
|
Python
|
praw/models/util.py
|
Theonefoster/praw
|
3d79b43a5193387974455f3d3ccdfff644714d09
|
[
"BSD-2-Clause"
] | 4
|
2018-09-28T01:47:53.000Z
|
2021-02-13T20:21:14.000Z
|
praw/models/util.py
|
Theonefoster/praw
|
3d79b43a5193387974455f3d3ccdfff644714d09
|
[
"BSD-2-Clause"
] | 4
|
2018-09-19T06:27:24.000Z
|
2018-09-22T00:44:41.000Z
|
praw/models/util.py
|
Theonefoster/praw
|
3d79b43a5193387974455f3d3ccdfff644714d09
|
[
"BSD-2-Clause"
] | null | null | null |
"""Provide helper classes used by other models."""
import random
import time
class BoundedSet(object):
"""A set with a maximum size that evicts the oldest items when necessary.
This class does not implement the complete set interface.
"""
def __init__(self, max_items):
"""Construct an instance of the BoundedSet."""
self.max_items = max_items
self._fifo = []
self._set = set()
def __contains__(self, item):
"""Test if the BoundedSet contains item."""
return item in self._set
def add(self, item):
"""Add an item to the set discarding the oldest item if necessary."""
if len(self._set) == self.max_items:
self._set.remove(self._fifo.pop(0))
self._fifo.append(item)
self._set.add(item)
class ExponentialCounter(object):
"""A class to provide an exponential counter with jitter."""
def __init__(self, max_counter):
"""Initialize an instance of ExponentialCounter.
:param max_counter: The maximum base value. Note that the computed
value may be 3.125% higher due to jitter.
"""
self._base = 1
self._max = max_counter
def counter(self):
"""Increment the counter and return the current value with jitter."""
max_jitter = self._base / 16.
value = self._base + random.random() * max_jitter - max_jitter / 2
self._base = min(self._base * 2, self._max)
return value
def reset(self):
"""Reset the counter to 1."""
self._base = 1
def permissions_string(permissions, known_permissions):
"""Return a comma separated string of permission changes.
:param permissions: A list of strings, or ``None``. These strings can
exclusively contain ``+`` or ``-`` prefixes, or contain no prefixes at
all. When prefixed, the resulting string will simply be the joining of
these inputs. When not prefixed, all permissions are considered to be
additions, and all permissions in the ``known_permissions`` set that
aren't provided are considered to be removals. When None, the result is
``+all``.
:param known_permissions: A set of strings representing the available
permissions.
"""
to_set = []
if permissions is None:
to_set = ['+all']
else:
to_set = ['-all']
omitted = sorted(known_permissions - set(permissions))
to_set.extend('-{}'.format(x) for x in omitted)
to_set.extend('+{}'.format(x) for x in permissions)
return ','.join(to_set)
def stream_generator(function, pause_after=None, skip_existing=False):
"""Yield new items from ListingGenerators and ``None`` when paused.
:param function: A callable that returns a ListingGenerator, e.g.
``subreddit.comments`` or ``subreddit.new``.
:param pause_after: An integer representing the number of requests that
result in no new items before this function yields ``None``,
effectively introducing a pause into the stream. A negative value
yields ``None`` after items from a single response have been yielded,
regardless of number of new items obtained in that response. A value of
``0`` yields ``None`` after every response resulting in no new items,
and a value of ``None`` never introduces a pause (default: None).
:param skip_existing: When True does not yield any results from the first
request thereby skipping any items that existed in the stream prior to
starting the stream (default: False).
.. note:: This function internally uses an exponential delay with jitter
between subsequent responses that contain no new results, up to a
maximum delay of just over a 16 seconds. In practice that means that the
time before pause for ``pause_after=N+1`` is approximately twice the
time before pause for ``pause_after=N``.
For example, to create a stream of comment replies, try:
.. code:: python
reply_function = reddit.inbox.comment_replies
for reply in praw.models.util.stream_generator(reply_function):
print(reply)
To pause a comment stream after six responses with no new
comments, try:
.. code:: python
subreddit = reddit.subreddit('redditdev')
for comment in subreddit.stream.comments(pause_after=6):
if comment is None:
break
print(comment)
To resume fetching comments after a pause, try:
.. code:: python
subreddit = reddit.subreddit('help')
comment_stream = subreddit.stream.comments(pause_after=5)
for comment in comment_stream:
if comment is None:
break
print(comment)
# Do any other processing, then try to fetch more data
for comment in comment_stream:
if comment is None:
break
print(comment)
To bypass the internal exponential backoff, try the following. This
approach is useful if you are monitoring a subreddit with infrequent
activity, and you want the to consistently learn about new items from the
stream as soon as possible, rather than up to a delay of just over sixteen
seconds.
.. code:: python
subreddit = reddit.subreddit('help')
for comment in subreddit.stream.comments(pause_after=0):
if comment is None:
continue
print(comment)
"""
before_fullname = None
exponential_counter = ExponentialCounter(max_counter=16)
seen_fullnames = BoundedSet(301)
without_before_counter = 0
responses_without_new = 0
valid_pause_after = pause_after is not None
while True:
found = False
newest_fullname = None
limit = 100
if before_fullname is None:
limit -= without_before_counter
without_before_counter = (without_before_counter + 1) % 30
for item in reversed(list(function(
limit=limit, params={'before': before_fullname}))):
if item.fullname in seen_fullnames:
continue
found = True
seen_fullnames.add(item.fullname)
newest_fullname = item.fullname
if not skip_existing:
yield item
before_fullname = newest_fullname
skip_existing = False
if valid_pause_after and pause_after < 0:
yield None
elif found:
exponential_counter.reset()
responses_without_new = 0
else:
responses_without_new += 1
if valid_pause_after and responses_without_new > pause_after:
exponential_counter.reset()
responses_without_new = 0
yield None
else:
time.sleep(exponential_counter.counter())
| 36.041885
| 79
| 0.644102
|
a68e8a9dc72fa2189832f09062e8996dd4d65a98
| 6,754
|
py
|
Python
|
storm_control/hal4000/miscControl/zStage.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 47
|
2015-02-11T16:05:54.000Z
|
2022-03-26T14:13:12.000Z
|
storm_control/hal4000/miscControl/zStage.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 110
|
2015-01-30T03:53:41.000Z
|
2021-11-03T15:58:44.000Z
|
storm_control/hal4000/miscControl/zStage.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 61
|
2015-01-09T18:31:27.000Z
|
2021-12-21T13:07:51.000Z
|
#!/usr/bin/env python
"""
The z stage UI.
Hazen Babcock 05/18
"""
import os
from PyQt5 import QtCore, QtGui, QtWidgets
import storm_control.sc_library.parameters as params
import storm_control.hal4000.halLib.halDialog as halDialog
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.hal4000.halLib.halModule as halModule
import storm_control.hal4000.qtdesigner.z_stage_ui as zStageUi
class ZStageView(halDialog.HalDialog):
"""
Manages the z stage GUI.
"""
def __init__(self, configuration = None, **kwds):
super().__init__(**kwds)
self.parameters = params.StormXMLObject()
self.retracted_z = configuration.get("retracted_z")
self.z_stage_fn = None
# Load UI
self.ui = zStageUi.Ui_Dialog()
self.ui.setupUi(self)
icon_path = os.path.join(os.path.dirname(__file__),"../icons/")
self.ui.upLButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "2uparrow-128.png")))
self.ui.upLButton.clicked.connect(self.handleUpLButton)
self.ui.upSButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "1uparrow-128.png")))
self.ui.upSButton.clicked.connect(self.handleUpSButton)
self.ui.downSButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "1downarrow-128.png")))
self.ui.downSButton.clicked.connect(self.handleDownSButton)
self.ui.downLButton.setIcon(QtGui.QIcon(os.path.join(icon_path, "2downarrow-128.png")))
self.ui.downLButton.clicked.connect(self.handleDownLButton)
self.ui.homeButton.clicked.connect(self.handleHomeButton)
self.ui.retractButton.clicked.connect(self.handleRetractButton)
self.ui.zeroButton.clicked.connect(self.handleZeroButton)
self.ui.goButton.clicked.connect(self.handleGoButton)
# Set to minimum size & fix.
self.adjustSize()
self.setFixedSize(self.width(), self.height())
# Add parameters.
self.parameters.add(params.ParameterRangeFloat(description ="Z Stage large step size",
name = "z_large_step",
value = configuration.get("large_step"),
min_value = 0.001,
max_value = 100.0))
self.parameters.add(params.ParameterRangeFloat(description ="Z Stage small step size",
name = "z_small_step",
value = configuration.get("small_step"),
min_value = 0.001,
max_value = 10.0))
self.setEnabled(False)
def getParameters(self):
return self.parameters
def handleDownLButton(self, boolean):
self.z_stage_fn.goRelative(-1.0*self.parameters.get("z_large_step"))
def handleDownSButton(self, boolean):
self.z_stage_fn.goRelative(-1.0*self.parameters.get("z_small_step"))
def handleGoButton(self, boolean):
self.z_stage_fn.goAbsolute(self.ui.goSpinBox.value())
def handleHomeButton(self, boolean):
self.z_stage_fn.goAbsolute(0.0)
def handleRetractButton(self, boolean):
self.z_stage_fn.goAbsolute(self.retracted_z)
def handleUpLButton(self, boolean):
self.z_stage_fn.goRelative(self.parameters.get("z_large_step"))
def handleUpSButton(self, boolean):
self.z_stage_fn.goRelative(self.parameters.get("z_small_step"))
def handleZeroButton(self, boolean):
self.z_stage_fn.zero()
def handleZStagePosition(self, z_value):
self.ui.zPosLabel.setText("{0:.2f}".format(z_value))
# def handleZValueChanged(self, z_value):
# self.z_stage_fn.goAbsolute(z_value)
def newParameters(self, parameters):
self.parameters.setv("z_large_step", parameters.get("z_large_step"))
self.parameters.setv("z_small_step", parameters.get("z_small_step"))
def setFunctionality(self, z_stage_fn):
self.z_stage_fn = z_stage_fn
self.z_stage_fn.zStagePosition.connect(self.handleZStagePosition)
self.ui.goSpinBox.setMinimum(self.z_stage_fn.getMinimum())
self.ui.goSpinBox.setMaximum(self.z_stage_fn.getMaximum())
self.setEnabled(True)
class ZStage(halModule.HalModule):
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.configuration = module_params.get("configuration")
self.view = ZStageView(module_name = self.module_name,
configuration = module_params.get("configuration"))
self.view.halDialogInit(qt_settings,
module_params.get("setup_name") + " z stage")
def cleanUp(self, qt_settings):
self.view.cleanUp(qt_settings)
def handleResponse(self, message, response):
if message.isType("get functionality"):
self.view.setFunctionality(response.getData()["functionality"])
def processMessage(self, message):
if message.isType("configure1"):
self.sendMessage(halMessage.HalMessage(m_type = "add to menu",
data = {"item name" : "Z Stage",
"item data" : "z stage"}))
self.sendMessage(halMessage.HalMessage(m_type = "get functionality",
data = {"name" : self.configuration.get("z_stage_fn")}))
self.sendMessage(halMessage.HalMessage(m_type = "initial parameters",
data = {"parameters" : self.view.getParameters()}))
elif message.isType("new parameters"):
p = message.getData()["parameters"]
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"old parameters" : self.view.getParameters().copy()}))
self.view.newParameters(p.get(self.module_name))
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"new parameters" : self.view.getParameters()}))
elif message.isType("show"):
if (message.getData()["show"] == "z stage"):
self.view.show()
elif message.isType("start"):
if message.getData()["show_gui"]:
self.view.showIfVisible()
| 41.950311
| 124
| 0.599349
|
b35ff9d7da0da0d6f0e30738d48d3f73a591fa31
| 4,712
|
py
|
Python
|
getpricepaid.py
|
spencer84/Chester-Data-Analytics
|
f64fe2a131ce6dc39c017476812435ef2b874733
|
[
"MIT"
] | null | null | null |
getpricepaid.py
|
spencer84/Chester-Data-Analytics
|
f64fe2a131ce6dc39c017476812435ef2b874733
|
[
"MIT"
] | null | null | null |
getpricepaid.py
|
spencer84/Chester-Data-Analytics
|
f64fe2a131ce6dc39c017476812435ef2b874733
|
[
"MIT"
] | null | null | null |
import requests
import datetime
import sqlite3
import queue
def get_postcode_district(postcode):
""" Returns the postcode district/area from a given postcode
"""
if ' ' in postcode:
return postcode.split(' ')[0]
elif len(postcode) == 6:
return postcode[:3]
else:
return postcode[:4]
class LandData:
def __init__(self):
self.url = 'https://landregistry.data.gov.uk/data/ppi/transaction-record.json?'
self.properties = 'transactionId,transactionDate,pricePaid,propertyAddress.' \
'paon,propertyAddress.street,propertyAddress.postcode'
self.town = None
self.page = 0
self.results_len = 0
self.params = {'_view': 'basic',
'_properties': self.properties,
'_pageSize': 200,
'propertyAddress.town': self.town,
'_page': self.page}
self.status = None
self.conn = None
self.cur = None
self.results_to_add = queue.Queue()
self.request_status_code = None
self.unique_postcode_areas = []
def create_connection(self, db):
self.conn = sqlite3.connect(db)
print("Connected to " + db)
def create_cursor(self):
self.cur = self.conn.cursor()
def close_connection(self):
self.conn.commit()
self.conn.close()
print("Connection closed")
def price_paid_query(self):
"""
Queries the Land Registry API and with the given 'town' attribute of the object. Updates the request_status_code
and the results_len attributes
"""
self.params = {'_view': 'basic',
'_properties': self.properties,
'_pageSize': 200,
'propertyAddress.town': self.town,
'_page': self.page}
response = requests.get(self.url, params=self.params)
self.request_status_code = response.status_code
results = response.json()['result']['items']
self.results_len = len(results)
# Need to write these results to the database
curr_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Iterate through the results dataframe, adding each value to the DB
for i in results:
vals_to_insert = [self.town, None, None, None, None, i['transactionDate']
, i['pricePaid'], curr_time]
property_details = i['propertyAddress']
# KeyError thrown if a given key isn't returned; Need to check before assigning value
if 'postcode' in property_details:
postcode_district = get_postcode_district(i['propertyAddress']['postcode'])
vals_to_insert[1] = postcode_district
# If the postcode district not otherwise added, then add to list
# This assumes that a given town will cover unique postcode areas (i.e. not split
# over multiple areas
if postcode_district not in self.unique_postcode_areas:
self.unique_postcode_areas.append(postcode_district)
vals_to_insert[2] = i['propertyAddress']['postcode']
if 'paon' in property_details:
vals_to_insert[3] = i['propertyAddress']['paon']
if 'street' in property_details:
vals_to_insert[4] = i['propertyAddress']['street']
# Add each line of results to the queue of values to be added to the database
self.results_to_add.put(vals_to_insert)
def get_full_price_paid(self):
end_of_results = False
while not end_of_results: # Recursion until a page length less than max achieved
self.price_paid_query()
self.page += 1
if self.results_len < 200:
end_of_results = True
print(self.page)
# Write results to database
print("Writing results to db")
self.data_to_db()
def data_to_db(self):
while not self.results_to_add.empty():
self.cur.execute("INSERT INTO land_reg VALUES(?,?,?,?,?,?,?,?)",tuple(self.results_to_add.get()))
# Find all unique postcode areas
curr_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
for area in self.unique_postcode_areas:
self.cur.execute("INSERT INTO data_log VALUES(?,?,?)", (area, 'land_reg', curr_time))
self.conn.commit()
self.conn.close()
return
# This is just for testing
# land = LandData()
# land.town = 'CHESTER'
# land.create_connection('cda.db')
# land.create_cursor()
# land.get_full_price_paid()
| 40.273504
| 120
| 0.598684
|
af52d7bf0ab048723e44f33dd9014740f0b6092e
| 8,001
|
py
|
Python
|
tests/test_cli/test_remove/test_skill.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cli/test_remove/test_skill.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cli/test_remove/test_skill.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2021-2022 Valory AG
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for the `aea remove skill` sub-command."""
import os
import shutil
import tempfile
import unittest.mock
from pathlib import Path
import yaml
import aea
import aea.configurations.base
from aea.cli import cli
from aea.configurations.base import AgentConfig, DEFAULT_AEA_CONFIG_FILE
from packages.fetchai.skills.gym import PUBLIC_ID as GYM_SKILL_PUBLIC_ID
from tests.conftest import AUTHOR, CLI_LOG_OPTION, CliRunner, ROOT_DIR
class TestRemoveSkillWithPublicId:
"""Test that the command 'aea remove skill' works correctly when using the public id."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
cls.skill_id = str(GYM_SKILL_PUBLIC_ID)
cls.skill_name = "gym"
os.chdir(cls.t)
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR],
)
assert result.exit_code == 0
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
assert result.exit_code == 0
os.chdir(cls.agent_name)
# change default registry path
config = AgentConfig.from_json(yaml.safe_load(open(DEFAULT_AEA_CONFIG_FILE)))
config.registry_path = os.path.join(ROOT_DIR, "packages")
yaml.safe_dump(dict(config.json), open(DEFAULT_AEA_CONFIG_FILE, "w"))
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "add", "--local", "skill", cls.skill_id],
standalone_mode=False,
)
assert result.exit_code == 0
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "remove", "skill", cls.skill_id],
standalone_mode=False,
)
def test_exit_code_equal_to_zero(self):
"""Test that the exit code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 0
def test_directory_does_not_exist(self):
"""Test that the directory of the removed skill does not exist."""
assert not Path("skills", self.skill_name).exists()
def test_skill_not_present_in_agent_config(self):
"""Test that the name of the removed skill is not present in the agent configuration file."""
agent_config = aea.configurations.base.AgentConfig.from_json(
yaml.safe_load(open(DEFAULT_AEA_CONFIG_FILE))
)
assert self.skill_id not in agent_config.skills
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestRemoveSkillFailsWhenSkillIsNotSupported:
"""Test that the command 'aea remove skill' fails when the skill is not supported."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
cls.skill_id = str(GYM_SKILL_PUBLIC_ID)
os.chdir(cls.t)
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR],
)
assert result.exit_code == 0
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
assert result.exit_code == 0
os.chdir(cls.agent_name)
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "remove", "skill", cls.skill_id],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the exit code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_error_message_skill_not_existing(self):
"""Test that the log error message is fixed.
The expected message is: 'The skill '{skill_name}' is not supported.'
"""
s = "The skill '{}' is not supported.".format(self.skill_id)
assert self.result.exception.message == s
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestRemoveSkillFailsWhenExceptionOccurs:
"""Test that the command 'aea remove skill' fails when an exception occurs while removing the directory."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
cls.skill_id = str(GYM_SKILL_PUBLIC_ID)
cls.skill_name = "gym"
os.chdir(cls.t)
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR],
)
assert result.exit_code == 0
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
assert result.exit_code == 0
os.chdir(cls.agent_name)
# change default registry path
config = AgentConfig.from_json(yaml.safe_load(open(DEFAULT_AEA_CONFIG_FILE)))
config.registry_path = os.path.join(ROOT_DIR, "packages")
yaml.safe_dump(dict(config.json), open(DEFAULT_AEA_CONFIG_FILE, "w"))
result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "add", "--local", "skill", cls.skill_id],
standalone_mode=False,
)
assert result.exit_code == 0
cls.patch = unittest.mock.patch(
"shutil.rmtree", side_effect=BaseException("an exception")
)
cls.patch.start()
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "remove", "skill", cls.skill_name],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the exit code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.patch.stop()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
| 33.061983
| 111
| 0.599675
|
4c54f4c17a5c394a043e09dee3b224346ae284e7
| 12,508
|
py
|
Python
|
refitt/web/api/endpoint/facility.py
|
refitt/ref
|
3ccc398e7b95f77549ab77884b87f40abdd3effb
|
[
"Apache-2.0"
] | 4
|
2020-09-11T01:15:11.000Z
|
2021-05-12T16:46:48.000Z
|
refitt/web/api/endpoint/facility.py
|
refitt/ref
|
3ccc398e7b95f77549ab77884b87f40abdd3effb
|
[
"Apache-2.0"
] | 12
|
2021-03-20T03:24:53.000Z
|
2022-02-19T03:20:43.000Z
|
refitt/web/api/endpoint/facility.py
|
refitt/ref
|
3ccc398e7b95f77549ab77884b87f40abdd3effb
|
[
"Apache-2.0"
] | 2
|
2021-02-01T23:49:39.000Z
|
2021-12-11T19:01:23.000Z
|
# SPDX-FileCopyrightText: 2019-2021 REFITT Team
# SPDX-License-Identifier: Apache-2.0
"""Facility profile endpoints."""
# type annotations
from typing import Union
# external libs
from flask import request
# internal libs
from ....database.model import Client, Facility, IntegrityError, NotFound
from ..app import application
from ..auth import authenticated, authorization
from ..response import endpoint, ConstraintViolation
from ..tools import require_data, collect_parameters, disallow_parameters
# public interface
__all__ = []
info: dict = {
'Description': 'Request, add, update facility profiles',
'Endpoints': {
'/facility': {},
'/facility/<facility_id>': {},
'/facility/<facility_id>/user': {},
'/facility/<facility_id>/user/<user_id>': {},
}
}
@application.route('/facility', methods=['POST'])
@endpoint('application/json')
@authenticated
@authorization(level=1)
def add_facility(admin: Client) -> dict: # noqa: unused client
"""Add new facility profile."""
disallow_parameters(request)
profile = require_data(request, data_format='json', validate=(lambda data: Facility.from_dict(data)))
try:
facility_id = profile.pop('id', None)
if not facility_id:
facility_id = Facility.add(profile).id
else:
Facility.update(facility_id, **profile)
except IntegrityError as error:
raise ConstraintViolation(str(error.args[0])) from error
return {'facility': {'id': facility_id}}
info['Endpoints']['/facility']['POST'] = {
'Description': 'Add or overwrite facility profile',
'Permissions': 'Admin (level 0)',
'Requires': {
'Auth': 'Authorization Bearer Token',
'Payload': {
'Description': 'Facility profile data',
'Type': 'application/json',
},
},
'Responses': {
200: {
'Description': 'Success',
'Payload': {
'Description': 'New facility ID',
'Type': 'application/json'
},
},
400: {'Description': 'JSON payload missing, malformed, or invalid'},
401: {'Description': 'Access level insufficient, revoked, or token expired'},
403: {'Description': 'Token not found or invalid'},
}
}
@application.route('/facility/<id_or_name>', methods=['GET'])
@endpoint('application/json')
@authenticated
@authorization(level=1)
def get_facility(admin: Client, id_or_name: Union[int, str]) -> dict: # noqa: unused client
"""Query for existing facility profile."""
disallow_parameters(request)
try:
facility_id = int(id_or_name)
return {'facility': Facility.from_id(facility_id).to_json()}
except ValueError:
facility_name = str(id_or_name)
return {'facility': Facility.from_name(facility_name).to_json()}
info['Endpoints']['/facility/<facility_id>']['GET'] = {
'Description': 'Request facility profile',
'Permissions': 'Admin (level 1)',
'Requires': {
'Auth': 'Authorization Bearer Token',
'Path': {
'facility_id': {
'Description': 'Unique ID for facility (or `name`)',
'Type': 'Integer',
}
},
},
'Responses': {
200: {
'Description': 'Success',
'Payload': {
'Description': 'Facility profile',
'Type': 'application/json'
},
},
401: {'Description': 'Access level insufficient, revoked, or token expired'},
403: {'Description': 'Token not found or invalid'},
404: {'Description': 'Facility does not exist'},
}
}
@application.route('/facility/<int:facility_id>', methods=['PUT'])
@endpoint('application/json')
@authenticated
@authorization(level=1)
def update_facility(admin: Client, facility_id: int) -> dict: # noqa: unused client
"""Update facility profile attributes."""
try:
profile = Facility.update(facility_id, **collect_parameters(request, allow_any=True))
except IntegrityError as error:
raise ConstraintViolation(str(error.args[0])) from error
return {'facility': profile.to_json()}
info['Endpoints']['/facility/<facility_id>']['PUT'] = {
'Description': 'Update facility profile attributes',
'Permissions': 'Admin (level 1)',
'Requires': {
'Auth': 'Authorization Bearer Token',
'Path': {
'facility_id': {
'Description': 'Unique ID for facility',
'Type': 'Integer',
}
},
},
'Optional': {
'Parameters': {
'name': {
'Description': 'Unique name for facility',
'Type': 'Float'
},
'latitude': {
'Description': 'Decimal latitude in degrees North',
'Type': 'Float'
},
'longitude': {
'Description': 'Decimal longitude in degrees West',
'Type': 'Float'
},
'elevation': {
'Description': 'Decimal elevation in meters above sea-level',
'Type': 'Float'
},
'limiting_magnitude': {
'Description': 'Decimal apparent magnitude',
'Type': 'Float'
},
'*': {
'Description': 'Arbitrary field added to JSON `data`',
'Type': '*'
}
},
},
'Responses': {
200: {
'Description': 'Success',
'Payload': {
'Description': 'Updated facility profile',
'Type': 'application/json'
},
},
401: {'Description': 'Access level insufficient, revoked, or token expired'},
403: {'Description': 'Token not found or invalid'},
404: {'Description': 'Facility does not exist'},
}
}
@application.route('/facility/<int:facility_id>', methods=['DELETE'])
@endpoint('application/json')
@authenticated
@authorization(level=1)
def delete_facility(admin: Client, facility_id: int) -> dict: # noqa: unused client
"""Delete a facility profile (assuming no existing relationships)."""
disallow_parameters(request)
try:
Facility.delete(facility_id)
except IntegrityError as error:
raise ConstraintViolation(str(error.args[0])) from error
return {'facility': {'id': facility_id}}
info['Endpoints']['/facility/<facility_id>']['DELETE'] = {
'Description': 'Delete facility profile (assuming no existing relationships)',
'Permissions': 'Admin (level 1)',
'Requires': {
'Auth': 'Authorization Bearer Token',
'Path': {
'facility_id': {
'Description': 'Unique ID for facility',
'Type': 'Integer',
}
},
},
'Responses': {
200: {'Description': 'Success'},
401: {'Description': 'Access level insufficient, revoked, or token expired'},
403: {'Description': 'Token not found or invalid'},
404: {'Description': 'Facility does not exist'},
}
}
@application.route('/facility/<int:facility_id>/user', methods=['GET'])
@endpoint('application/json')
@authenticated
@authorization(level=1)
def get_all_facility_users(admin: Client, facility_id: int) -> dict: # noqa: unused client
"""Query for users related to the given facility."""
disallow_parameters(request)
return {
'user': [
user.to_json()
for user in Facility.from_id(facility_id).users()
]
}
info['Endpoints']['/facility/<facility_id>/user']['GET'] = {
'Description': 'Request user profiles associated with this facility',
'Permissions': 'Admin (level 1)',
'Requires': {
'Auth': 'Authorization Bearer Token',
'Path': {
'facility_id': {
'Description': 'Unique ID for facility',
'Type': 'Integer',
}
},
},
'Responses': {
200: {
'Description': 'Success',
'Payload': {
'Description': 'List of user profiles',
'Type': 'application/json'
},
},
401: {'Description': 'Access level insufficient, revoked, or token expired'},
403: {'Description': 'Token not found or invalid'},
404: {'Description': 'Facility does not exist'},
}
}
@application.route('/facility/<int:facility_id>/user/<int:user_id>', methods=['GET'])
@endpoint('application/json')
@authenticated
@authorization(level=1)
def get_facility_user(admin: Client, facility_id: int, user_id: int) -> dict: # noqa: unused client
"""Query for a user related to the given facility."""
disallow_parameters(request)
users = [user.to_json() for user in Facility.from_id(facility_id).users() if user.id == user_id]
if not users:
raise NotFound(f'User ({user_id}) not associated with facility ({facility_id})')
else:
return {'user': users[0]}
info['Endpoints']['/facility/<facility_id>/user/<user_id>']['GET'] = {
'Description': 'Check user is associated with this facility',
'Permissions': 'Admin (level 1)',
'Requires': {
'Auth': 'Authorization Bearer Token',
'Path': {
'facility_id': {
'Description': 'Unique ID for facility',
'Type': 'Integer',
},
'user_id': {
'Description': 'Unique ID for user',
'Type': 'Integer',
}
},
},
'Responses': {
200: {
'Description': 'Success',
'Payload': {
'Description': 'Associated user profile',
'Type': 'application/json'
},
},
401: {'Description': 'Access level insufficient, revoked, or token expired'},
403: {'Description': 'Token not found or invalid'},
404: {'Description': 'Facility does not exist or user not associated with this facility'},
}
}
@application.route('/facility/<int:facility_id>/user/<int:user_id>', methods=['PUT'])
@endpoint('application/json')
@authenticated
@authorization(level=1)
def add_facility_user_association(admin: Client, facility_id: int, user_id: int) -> dict: # noqa: unused client
"""Associate facility with the given user."""
disallow_parameters(request)
Facility.from_id(facility_id).add_user(user_id)
return {}
info['Endpoints']['/facility/<facility_id>/user/<user_id>']['PUT'] = {
'Description': 'Associate user with facility',
'Permissions': 'Admin (level 1)',
'Requires': {
'Auth': 'Authorization Bearer Token',
'Path': {
'facility_id': {
'Description': 'Unique ID for facility',
'Type': 'Integer',
},
'user_id': {
'Description': 'Unique ID for user',
'Type': 'Integer',
}
},
},
'Responses': {
200: {'Description': 'Success'},
401: {'Description': 'Access level insufficient, revoked, or token expired'},
403: {'Description': 'Token not found or invalid'},
404: {'Description': 'Facility or user does not exist'},
}
}
@application.route('/facility/<int:facility_id>/user/<int:user_id>', methods=['DELETE'])
@endpoint('application/json')
@authenticated
@authorization(level=1)
def delete_facility_user_association(admin: Client, facility_id: int, user_id: int) -> dict: # noqa: unused client
"""Dissociate the facility for the given user."""
disallow_parameters(request)
Facility.from_id(facility_id).delete_user(user_id)
return {}
info['Endpoints']['/facility/<facility_id>/user/<user_id>']['DELETE'] = {
'Description': 'Disassociate user with facility',
'Permissions': 'Admin (level 1)',
'Requires': {
'Auth': 'Authorization Bearer Token',
'Path': {
'facility_id': {
'Description': 'Unique ID for facility',
'Type': 'Integer',
},
'user_id': {
'Description': 'Unique ID for user',
'Type': 'Integer',
}
},
},
'Responses': {
200: {'Description': 'Success'},
401: {'Description': 'Access level insufficient, revoked, or token expired'},
403: {'Description': 'Token not found or invalid'},
404: {'Description': 'Facility or user does not exist'},
}
}
| 32.829396
| 115
| 0.575951
|
f7de8378bc55ab8464334c04827021106729ea13
| 1,067
|
py
|
Python
|
pywebhooks/__init__.py
|
chadlung/pywebhooks
|
4b5f41be7c3c498a31cb0225cbde8e63c48ce999
|
[
"Apache-2.0"
] | 94
|
2015-04-03T12:10:54.000Z
|
2021-08-30T13:50:48.000Z
|
pywebhooks/__init__.py
|
chadlung/pywebhooks
|
4b5f41be7c3c498a31cb0225cbde8e63c48ce999
|
[
"Apache-2.0"
] | 10
|
2016-06-07T17:34:39.000Z
|
2019-11-23T00:00:09.000Z
|
pywebhooks/__init__.py
|
chadlung/pywebhooks
|
4b5f41be7c3c498a31cb0225cbde8e63c48ce999
|
[
"Apache-2.0"
] | 10
|
2015-04-14T18:03:08.000Z
|
2021-08-30T13:50:49.000Z
|
DEFAULT_DB_NAME = 'pywebhooks'
DEFAULT_ACCOUNTS_TABLE = 'accounts'
DEFAULT_REGISTRATIONS_TABLE = 'registrations'
DEFAULT_TRIGGERED_TABLE = 'triggered_webhooks'
DEFAULT_SUBSCRIPTIONS_TABLE = 'subscriptions'
DEFAULT_TABLE_NAMES = [
DEFAULT_ACCOUNTS_TABLE,
DEFAULT_REGISTRATIONS_TABLE,
DEFAULT_TRIGGERED_TABLE,
DEFAULT_SUBSCRIPTIONS_TABLE
]
# This is the timeout for the response time from the client's endpoint. This is
# used when validating a new account or they attempt to change a secret or
# api key and in sending out webhook events. This should be a low value and end
# users should be aware of this time (in seconds) in which to respond.
REQUEST_TIMEOUT = 5.0
# Retry a failed webhook notification to an endpoint in 2 minutes
DEFAULT_RETRY = 120
DEFAULT_FINAL_RETRY = 3600 # On the final retry, try again in an hour
# How many times a webhook post can fail to contact the endpoint before
# its ignored
MAX_FAILED_COUNT = 250
RETHINK_HOST = 'rethinkdb'
CELERY_BROKER_URL = 'redis://redis:6379/0'
RETHINK_PORT = 28015
RETHINK_AUTH_KEY = ''
| 32.333333
| 79
| 0.788191
|
4d9730f5aef0bd8d322583a08360223da84a5f3c
| 2,388
|
py
|
Python
|
tests/natural_bm/test_callbacks.py
|
alexhunterlang/natural_bm
|
b2a1cb15694f4f3a80a3a1cc6f8423892563806d
|
[
"MIT"
] | 1
|
2018-06-07T00:54:17.000Z
|
2018-06-07T00:54:17.000Z
|
tests/natural_bm/test_callbacks.py
|
alexhunterlang/natural_bm
|
b2a1cb15694f4f3a80a3a1cc6f8423892563806d
|
[
"MIT"
] | null | null | null |
tests/natural_bm/test_callbacks.py
|
alexhunterlang/natural_bm
|
b2a1cb15694f4f3a80a3a1cc6f8423892563806d
|
[
"MIT"
] | null | null | null |
#%%
import pytest
import os
from csv import Sniffer
from natural_bm import callbacks
from natural_bm import optimizers
from natural_bm import training
from natural_bm.models import Model
from natural_bm.datasets import random
from natural_bm.utils_testing import nnet_for_testing
#%%
@pytest.mark.parametrize('sep', [',', '\t'], ids=['csv', 'tsv'])
def test_CSVLogger(sep):
"""
This test is a slight modification of test_CSVLogger from
https://github.com/fchollet/keras/blob/master/tests/keras/test_callbacks.py
"""
nnet = nnet_for_testing('rbm')
data = random.Random('probability')
batch_size = 6
n_epoch = 1
if sep == '\t':
filepath = 'log.tsv'
elif sep == ',':
filepath = 'log.csv'
def make_model(dbm, data):
optimizer = optimizers.SGD()
trainer = training.CD(dbm)
model = Model(dbm, optimizer, trainer)
return model
# case 1, create new file with defined separator
model = make_model(nnet, data)
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
history = model.fit(data.train.data,
batch_size=batch_size,
n_epoch=n_epoch,
callbacks=cbks,
validation_data=data.valid.data)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model(nnet, data)
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
history = model.fit(data.train.data,
batch_size=batch_size,
n_epoch=n_epoch,
callbacks=cbks,
validation_data=data.valid.data)
# case 3, reuse of CSVLogger object
history = model.fit(data.train.data,
batch_size=batch_size,
n_epoch=n_epoch,
callbacks=cbks,
validation_data=data.valid.data)
import re
with open(filepath) as csvfile:
output = " ".join(csvfile.readlines())
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
#%% Main
if __name__ == '__main__':
pytest.main([__file__])
| 27.767442
| 79
| 0.602178
|
3eccd9e4b7ade8d1a77e4b3af717b0fccf6cf790
| 997
|
py
|
Python
|
src/homework/d_repetition/main.py
|
acc-cosc-1336-spring-2022/acc-cosc-1336-spring-2022-marshall0603181
|
8660a72234f212833bf27b68f37b9e0cc7911769
|
[
"MIT"
] | null | null | null |
src/homework/d_repetition/main.py
|
acc-cosc-1336-spring-2022/acc-cosc-1336-spring-2022-marshall0603181
|
8660a72234f212833bf27b68f37b9e0cc7911769
|
[
"MIT"
] | null | null | null |
src/homework/d_repetition/main.py
|
acc-cosc-1336-spring-2022/acc-cosc-1336-spring-2022-marshall0603181
|
8660a72234f212833bf27b68f37b9e0cc7911769
|
[
"MIT"
] | null | null | null |
import repetition
# choice has to do with 'Homework 3 menu'.
choice2 = 'y'
while choice2 == 'y':
print('Homework 3 Menu')
choice = int(input('\n(1) Factorial \n(2) Sum odd numbers \n(3) Exit \n\n '))
if choice == 1:
num = int(input('enter number greater than 0 and less than 10: '))
if num > 0 and num < 10:
print(repetition.get_factorial(num))
while num <= 0 or num >= 10:
print('Invalid entry')
num = int(input('enter number greater than 0 and less than 10: '))
if num > 0 and num < 10:
print(repetition.get_factorial(num))
elif choice == 2:
num = int(input('enter a number: '))
print(repetition.sum_odd_numbers(num))
elif choice == 3:
exit('Exiting...')
else:
print('Error')
choice2 = str(input('Do you wish to continue? y or n: '))
# choice2 has to do with the question 'Do you want to exit? y or n: '.
if choice2 == 'n':
exit('Exiting...')
| 36.925926
| 81
| 0.570712
|
81e574e9b4f45083143bc61f715e8f59470368c8
| 6,473
|
py
|
Python
|
pylib/pc/PointHierarchy.py
|
schellmi42/tensorflow_graphics_point_clouds
|
c8e2dc2963c3eecfb27542449603f81d78494783
|
[
"Apache-2.0"
] | 3
|
2020-07-10T12:07:02.000Z
|
2022-03-21T09:28:55.000Z
|
pylib/pc/PointHierarchy.py
|
schellmi42/tensorflow_graphics_point_clouds
|
c8e2dc2963c3eecfb27542449603f81d78494783
|
[
"Apache-2.0"
] | null | null | null |
pylib/pc/PointHierarchy.py
|
schellmi42/tensorflow_graphics_point_clouds
|
c8e2dc2963c3eecfb27542449603f81d78494783
|
[
"Apache-2.0"
] | 1
|
2021-10-11T08:27:44.000Z
|
2021-10-11T08:27:44.000Z
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent a point cloud hierarchy."""
import numpy as np
import tensorflow as tf
from pylib.pc.utils import check_valid_point_hierarchy_input
from pylib.pc import PointCloud
from pylib.pc import Grid
from pylib.pc import Neighborhood
from pylib.pc import sample
from pylib.pc.utils import cast_to_num_dims
class PointHierarchy:
""" A hierarchy of sampled point clouds.
Args:
point_cloud: A `PointCloud` instance..
cell_sizes: A list of `floats` or `float` `Tensors` of shape `[D]`,
the cell sizes for the sampling. The length of the list defines
the number of samplings.
sample_mode: A `string`, either `'poisson'`or `'cell average'`.
"""
def __init__(self,
point_cloud: PointCloud,
cell_sizes,
sample_mode='poisson',
name=None):
#Initialize the attributes.
self._aabb = point_cloud.get_AABB()
self._point_clouds = [point_cloud]
self._cell_sizes = []
self._neighborhoods = []
self._dimension = point_cloud._dimension
self._batch_shape = point_cloud._batch_shape
#Create the different sampling operations.
cur_point_cloud = point_cloud
for sample_iter, cur_cell_sizes in enumerate(cell_sizes):
cur_cell_sizes = tf.convert_to_tensor(
value=cur_cell_sizes, dtype=tf.float32)
# Check if the cell size is defined for all the dimensions.
# If not, the last cell size value is tiled until all the dimensions
# have a value.
cur_num_dims = tf.gather(cur_cell_sizes.shape, 0)
cur_cell_sizes = tf.cond(
cur_num_dims < self._dimension,
lambda: tf.concat((cur_cell_sizes,
tf.tile(tf.gather(cur_cell_sizes,
[tf.rank(cur_cell_sizes) - 1]),
[self._dimension - cur_num_dims])),
axis=0),
lambda: cur_cell_sizes)
tf.assert_greater(
self._dimension + 1,
cur_num_dims,
f'Too many dimensions in cell sizes {cur_num_dims} ' + \
f'instead of max. {self._dimension}')
# old version, does not run in graph mode
# if cur_num_dims < self._dimension:
# cur_cell_sizes = tf.concat((cur_cell_sizes,
# tf.tile(tf.gather(cur_cell_sizes,
# [tf.rank(cur_cell_sizes) - 1]),
# [self._dimension - cur_num_dims])),
# axis=0)
# if cur_num_dims > self._dimension:
# raise ValueError(
# f'Too many dimensions in cell sizes {cur_num_dims} ' + \
# f'instead of max. {self._dimension}')
self._cell_sizes.append(cur_cell_sizes)
#Create the sampling operation.
cur_grid = Grid(cur_point_cloud, cur_cell_sizes, self._aabb)
cur_neighborhood = Neighborhood(cur_grid, cur_cell_sizes)
cur_point_cloud, _ = sample(cur_neighborhood, sample_mode)
self._neighborhoods.append(cur_neighborhood)
cur_point_cloud.set_batch_shape(self._batch_shape)
self._point_clouds.append(cur_point_cloud)
def get_points(self, batch_id=None, max_num_points=None, name=None):
""" Returns the points.
Note:
In the following, A1 to An are optional batch dimensions.
If called withoud specifying 'id' returns the points in padded format
`[A1, ..., An, V, D]`.
Args:
batch_id: An `int`, identifier of point cloud in the batch, if `None`
returns all points.
Return:
A list of `float` `Tensors` of shape
`[N_i, D]`, if 'batch_id' was given
or
`[A1, ..., An, V_i, D]`, if no 'batch_id' was given.
"""
points = []
for point_cloud in self._point_clouds:
points.append(point_cloud.get_points(batch_id))
return points
def get_sizes(self, name=None):
""" Returns the sizes of the point clouds in the point hierarchy.
Note:
In the following, A1 to An are optional batch dimensions.
Returns:
A `list` of `Tensors` of shape '`[A1, .., An]`'
"""
sizes = []
for point_cloud in self._point_clouds:
sizes.append(point_cloud.get_sizes())
return sizes
def set_batch_shape(self, batch_shape, name=None):
""" Function to change the batch shape.
Use this to set a batch shape instead of using 'self._batch_shape'
to also change dependent variables.
Note:
In the following, A1 to An are optional batch dimensions.
Args:
batch_shape: An 1D `int` `Tensor` `[A1, ..., An]`.
Raises:
ValueError: if shape does not sum up to batch size.
"""
for point_cloud in self._point_clouds:
point_cloud.set_batch_shape(batch_shape)
def get_neighborhood(self, i=None, transposed=False):
""" Returns the neighborhood between level `i` and `i+1` of the hierarchy.
If called without argument returns a list of all neighborhoods.
Args:
i: An `int`, can be negative but must be in range
`[-num_levels, num_levels-1]`.
transposed: A `bool`, if `True` returns the neighborhood between
level `i+1` and `i`.
Returs:
A `Neighborhood` instance or a `list` of `Neighborhood` instances.
"""
if i is None:
if transposed:
return [nb.transposed() for nb in self._neighborhoods]
else:
return self._neighborhoods
else:
if transposed:
return self._neighborhoods[i].transpose()
else:
return self._neighborhoods[i]
def __getitem__(self, index):
return self._point_clouds[index]
def __len__(self):
return len(self._point_clouds)
| 34.248677
| 79
| 0.626294
|
1e736673206162a194cdb6860dca00ee90a2d88c
| 165
|
py
|
Python
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_DayOfMonth_LSTM.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_DayOfMonth_LSTM.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_PolyTrend_Seasonal_DayOfMonth_LSTM.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['PolyTrend'] , ['Seasonal_DayOfMonth'] , ['LSTM'] );
| 41.25
| 87
| 0.757576
|
79c9cd94871bef0c6d46c1a7e6af0177dd5e4945
| 5,387
|
py
|
Python
|
homeassistant/components/ssdp/__init__.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 4
|
2019-07-03T22:36:57.000Z
|
2019-08-10T15:33:25.000Z
|
homeassistant/components/ssdp/__init__.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 7
|
2019-08-23T05:26:02.000Z
|
2022-03-11T23:57:18.000Z
|
homeassistant/components/ssdp/__init__.py
|
alindeman/home-assistant
|
b274b10f3874c196f0db8f9cfa5f47eb756d1f8e
|
[
"Apache-2.0"
] | 2
|
2018-08-15T03:59:35.000Z
|
2018-10-18T12:20:05.000Z
|
"""The SSDP integration."""
import asyncio
from datetime import timedelta
import logging
from urllib.parse import urlparse
from xml.etree import ElementTree
import aiohttp
from netdisco import ssdp, util
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.generated.ssdp import SSDP
DOMAIN = 'ssdp'
SCAN_INTERVAL = timedelta(seconds=60)
ATTR_HOST = 'host'
ATTR_PORT = 'port'
ATTR_SSDP_DESCRIPTION = 'ssdp_description'
ATTR_ST = 'ssdp_st'
ATTR_NAME = 'name'
ATTR_MODEL_NAME = 'model_name'
ATTR_MODEL_NUMBER = 'model_number'
ATTR_SERIAL = 'serial_number'
ATTR_MANUFACTURER = 'manufacturer'
ATTR_MANUFACTURERURL = 'manufacturerURL'
ATTR_UDN = 'udn'
ATTR_UPNP_DEVICE_TYPE = 'upnp_device_type'
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up the SSDP integration."""
async def initialize():
scanner = Scanner(hass)
await scanner.async_scan(None)
async_track_time_interval(hass, scanner.async_scan, SCAN_INTERVAL)
hass.loop.create_task(initialize())
return True
class Scanner:
"""Class to manage SSDP scanning."""
def __init__(self, hass):
"""Initialize class."""
self.hass = hass
self.seen = set()
self._description_cache = {}
async def async_scan(self, _):
"""Scan for new entries."""
_LOGGER.debug("Scanning")
# Run 3 times as packets can get lost
for _ in range(3):
entries = await self.hass.async_add_executor_job(ssdp.scan)
await self._process_entries(entries)
# We clear the cache after each run. We track discovered entries
# so will never need a description twice.
self._description_cache.clear()
async def _process_entries(self, entries):
"""Process SSDP entries."""
tasks = []
for entry in entries:
key = (entry.st, entry.location)
if key in self.seen:
continue
self.seen.add(key)
tasks.append(self._process_entry(entry))
if not tasks:
return
to_load = [result for result in await asyncio.gather(*tasks)
if result is not None]
if not to_load:
return
tasks = []
for entry, info, domains in to_load:
for domain in domains:
_LOGGER.debug("Discovered %s at %s", domain, entry.location)
tasks.append(self.hass.config_entries.flow.async_init(
domain, context={'source': DOMAIN}, data=info
))
await asyncio.wait(tasks)
async def _process_entry(self, entry):
"""Process a single entry."""
domains = set(SSDP["st"].get(entry.st, []))
xml_location = entry.location
if not xml_location:
if domains:
return (entry, info_from_entry(entry, None), domains)
return None
# Multiple entries usally share same location. Make sure
# we fetch it only once.
info_req = self._description_cache.get(xml_location)
if info_req is None:
info_req = self._description_cache[xml_location] = \
self.hass.async_create_task(
self._fetch_description(xml_location))
info = await info_req
domains.update(SSDP["manufacturer"].get(info.get('manufacturer'), []))
domains.update(SSDP["device_type"].get(info.get('deviceType'), []))
if domains:
return (entry, info_from_entry(entry, info), domains)
return None
async def _fetch_description(self, xml_location):
"""Fetch an XML description."""
session = self.hass.helpers.aiohttp_client.async_get_clientsession()
try:
resp = await session.get(xml_location, timeout=5)
xml = await resp.text()
# Samsung Smart TV sometimes returns an empty document the
# first time. Retry once.
if not xml:
resp = await session.get(xml_location, timeout=5)
xml = await resp.text()
except (aiohttp.ClientError, asyncio.TimeoutError) as err:
_LOGGER.debug("Error fetching %s: %s", xml_location, err)
return {}
try:
tree = ElementTree.fromstring(xml)
except ElementTree.ParseError as err:
_LOGGER.debug("Error parsing %s: %s", xml_location, err)
return {}
return util.etree_to_dict(tree).get('root', {}).get('device', {})
def info_from_entry(entry, device_info):
"""Get most important info from an entry."""
url = urlparse(entry.location)
info = {
ATTR_HOST: url.hostname,
ATTR_PORT: url.port,
ATTR_SSDP_DESCRIPTION: entry.location,
ATTR_ST: entry.st,
}
if device_info:
info[ATTR_NAME] = device_info.get('friendlyName')
info[ATTR_MODEL_NAME] = device_info.get('modelName')
info[ATTR_MODEL_NUMBER] = device_info.get('modelNumber')
info[ATTR_SERIAL] = device_info.get('serialNumber')
info[ATTR_MANUFACTURER] = device_info.get('manufacturer')
info[ATTR_MANUFACTURERURL] = device_info.get('manufacturerURL')
info[ATTR_UDN] = device_info.get('UDN')
info[ATTR_UPNP_DEVICE_TYPE] = device_info.get('deviceType')
return info
| 30.607955
| 78
| 0.629107
|
12b806a28ed2197f6f296e7aa25dbf73bcc7d67c
| 131
|
py
|
Python
|
association/apps.py
|
bpprc/database
|
6e8302729793ddf840630840bd08c96ddd35a52e
|
[
"BSD-3-Clause"
] | 1
|
2021-04-14T16:54:57.000Z
|
2021-04-14T16:54:57.000Z
|
association/apps.py
|
bpprc/database
|
6e8302729793ddf840630840bd08c96ddd35a52e
|
[
"BSD-3-Clause"
] | null | null | null |
association/apps.py
|
bpprc/database
|
6e8302729793ddf840630840bd08c96ddd35a52e
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class AssociationConfig(AppConfig):
name = "association"
verbose_name = "Metadatabase"
| 18.714286
| 35
| 0.755725
|
1f195ec6962c01598d686ac5a5b75b95ebb06598
| 4,382
|
py
|
Python
|
DirToCatalog.py
|
mcfoi/arcpy-toolbox
|
348ef55c7235b7c1eeece06aa4a0a244674f893e
|
[
"Apache-2.0"
] | 3
|
2015-04-24T15:36:33.000Z
|
2020-08-06T16:53:56.000Z
|
DirToCatalog.py
|
mcfoi/arcpy-toolbox
|
348ef55c7235b7c1eeece06aa4a0a244674f893e
|
[
"Apache-2.0"
] | null | null | null |
DirToCatalog.py
|
mcfoi/arcpy-toolbox
|
348ef55c7235b7c1eeece06aa4a0a244674f893e
|
[
"Apache-2.0"
] | 1
|
2020-07-21T00:11:51.000Z
|
2020-07-21T00:11:51.000Z
|
##""********************************************************************************************************************
##TOOL NAME: DirToCatalog
##SOURCE NAME: DirToCatalog.py
##VERSION: ArcGIS 9.0
##AUTHOR: Environmental Systems Research Institute Inc.
##REQUIRED ARGUMENTS: Input workspace
## Output location
## Output name
##OPTIONAL ARGUMENTS: Keyword
## : Managed/unmanaged
##TOOL DESCRIPTION: Load all rasters in the input workspace to the output raster catalog, the raster and geometry
## columns have the same spatial reference as the first raster dataset in the list
##
##DATE: 5/31/2004
##UPDATED: 1/25/2005 for ArcGIS 9.2
##Usage: DirToMosaic <Input_Directory> <Output_Location> <Output_Name><Keyword>{managed/unmanaged}
##
##NOTE ADDED AT ARCGIS 9.2: In ArcGIS 9.2, a new Geoprocessing tool WorkspaceToRasterCatalog is the quivalent to
## what this sample script does and it performs better.
##
##*********************************************************************************************************************"""
##
#Importing ScriptUtils which imports required librarys and creates the geoprocssing object
import ConversionUtils, os
msgNonExist="Output locations does not exist: "
msgSuccess="Successfully loaded: "
msgFail="Failed to load: "
msgNotGDB="Output location is not GDB type: "
msgCreate="Successfully created: "
msgExist="Output raster catalog exists: "
try:
#The input workspace
workspace = ConversionUtils.gp.GetParameterAsText(0)
#The output workspace
out_location = ConversionUtils.gp.GetParameterAsText(1)
#The output name
out_name = ConversionUtils.gp.GetParameterAsText(2)
#Check existence
if not ConversionUtils.gp.Exists(workspace):
raise Exception, msgNonExist + " %s" % (workspace)
# Check if output workspace is GDB
outWorkspace = ConversionUtils.gp.Describe(out_location)
if (outWorkspace.WorkspaceType == "FileSystem"):
raise Exception, msgNotGDB + " %s" % (out_location)
keyword = ConversionUtils.gp.GetParameterAsText(3)
manage = ConversionUtils.gp.GetParameterAsText(4)
ConversionUtils.gp.workspace = workspace
out_raster = ConversionUtils.gp.QualifyTableName(out_name, out_location)
out_raster = out_location + os.sep + out_raster
#Check existence
if ConversionUtils.gp.Exists(out_raster):
raise Exception, msgExist + " %s" % (out_raster)
ConversionUtils.gp.SetParameterAsText(5,out_raster)
#The raster datasets in the input workspace
in_raster_datasets = ConversionUtils.gp.ListRasters()
#The first raster dataset in the list
#in_raster_dataset = in_raster_datasets.next()
#Loop through all raster datasets in the list and load to raster catalog.
#while in_raster_dataset <> None:
icnt = 1
for in_raster_dataset in in_raster_datasets:
if (icnt == 1) :
dataset = ConversionUtils.gp.Describe(in_raster_dataset)
#Get spatial reference of first raster dataset, if no spatial reference, set it to unknown.
try:
sr = dataset.SpatialReference
except Exception, ErrorDesc:
sr = '#'
#Create raster catalog
ConversionUtils.gp.CreateRasterCatalog(out_location, out_name, sr, sr, keyword, "#","#","#",manage)
ConversionUtils.gp.AddMessage(msgCreate + " %s " % (out_raster))
icnt = 2
try:
ConversionUtils.gp.CopyRaster_management(in_raster_dataset, out_raster)
ConversionUtils.gp.AddMessage(msgSuccess + " %s " % (in_raster_dataset))
except Exception, ErrorDesc:
# Except block for the loop. If the tool fails to convert one of the Rasters, it will come into this block
# and add warnings to the messages, then proceed to attempt to convert the next input Raster.
WarningMessage = (msgFail + " %s" %(in_raster_dataset))
ConversionUtils.gp.AddWarning(WarningMessage)
#in_raster_dataset = in_raster_datasets.next()
except Exception, ErrorDesc:
# Except block if the tool could not run at all.
# For example, not all parameters are provided, or if the output path doesn't exist.
ConversionUtils.gp.AddError(str(ErrorDesc))
| 40.953271
| 122
| 0.651073
|
cdc9f0315718713e493e7853983f0c208dd0be2b
| 8,752
|
py
|
Python
|
test_code/grid_cluster_analysis.py
|
eufmike/storm_image_processing
|
076335519be0be3b66d289a180421d36770ab820
|
[
"CC-BY-4.0"
] | null | null | null |
test_code/grid_cluster_analysis.py
|
eufmike/storm_image_processing
|
076335519be0be3b66d289a180421d36770ab820
|
[
"CC-BY-4.0"
] | null | null | null |
test_code/grid_cluster_analysis.py
|
eufmike/storm_image_processing
|
076335519be0be3b66d289a180421d36770ab820
|
[
"CC-BY-4.0"
] | null | null | null |
# %%
import os, sys
import re
import cv2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from numpy import linspace, meshgrid
from scipy.interpolate import griddata
import matplotlib.image as mpimg
import matplotlib.style
import matplotlib as mpl
mpl.style.use('default')
from PIL import Image
# Functions Section Begins ----------------------------------------------------- #
def dircheck(targetpaths):
"""
dircheck checks the target folder and create the folder if it does not exist.
targetdirlist: list of folderpath
"""
# print(type(targetpaths))
if isinstance(targetpaths, str):
print(os.path.exists(targetpaths))
if not os.path.exists(targetpaths):
os.makedirs(targetpaths)
elif isinstance(targetpaths, list):
for path in targetpaths:
if not os.path.exists(path):
os.makedirs(path)
def listfiles(path, extension = None):
filelist = []
fileabslist = []
for directory, dir_names, file_names in os.walk(path):
# print(file_names)
for file_name in file_names:
if (not file_name.startswith('.')) & (file_name.endswith(extension)):
filepath_tmp = os.path.join(directory, file_name)
filelist.append(file_name)
fileabslist.append(filepath_tmp)
return {'filelist': filelist,
'fileabslist': fileabslist}
# Functions Section Ends ----------------------------------------------------- #
# %%
nchannels = 2
dir_check = []
# %%
# input folder
path = '/Volumes/LaCie_DataStorage/xiaochao_wei_STORM imaging/STORM_imaging'
analysis_dir = 'analysis_20190308'
spacialtestdir = 'spacial_test'
intcsv_dir = 'spacialdata_local_pad_grid'
intcsv_path = os.path.join(path, analysis_dir, spacialtestdir, intcsv_dir)
print(intcsv_path)
# output folder
nnd_dir = 'nnd'
intcsv_histo_dir = 'int_grid_histo'
intcsv_histo_summary_dir = 'int_grid_histo_summary'
intcsv_bw = 'int_grid_bw'
intcsv_histo_path = os.path.join(path, analysis_dir, spacialtestdir, nnd_dir, intcsv_histo_dir)
intcsv_histo_summary_path = os.path.join(path, analysis_dir, spacialtestdir, nnd_dir, intcsv_histo_summary_dir)
for c in range(nchannels):
dir_check.append(os.path.join(intcsv_histo_path, str(c+1)))
dir_check.append(os.path.join(intcsv_bw, str(c+1)))
dir_check.append(intcsv_histo_summary_path)
dircheck(dir_check)
# %%
# Grouped by the channels and treatment ------------------------------------------ #
filelist = {}
filenamelist = listfiles(os.path.join(intcsv_path, '1'), '.tif')['filelist']
filedir = ['ip_filename', 'ip_path', 'op_hist', 'op_bw']
treatment = ['wildtype', 'knockout']
channel = list(range(2))
print(channel)
# group the data by the treatment
for c in channel:
filelist[str(c+1)] = {}
for group in treatment:
filelist[str(c+1)][group] = {}
# create list
filelist_temp = []
for l in filenamelist:
if group == 'wildtype':
x = re.search('(.*)_w{1}[0-9]{1}_(.*)', l)
else:
x = re.search('(.*)_k{1}[0-9]{1}_(.*)', l)
try:
found = x.group(0)
filelist_temp.append(found)
except AttributeError:
found = ''
ip_filepath = []
op_hist_filepath = []
op_th_filepath = []
for f in filelist_temp:
filepath_tmp = os.path.join(intcsv_path, str(c+1), f)
ip_filepath.append(filepath_tmp)
filename_tmp_png = f.replace('.tif', '.png')
op_hist_filepath_temp = os.path.join(intcsv_histo_path, str(c+1), filename_tmp_png)
op_hist_filepath.append(op_hist_filepath_temp)
# f_csv_tmp = os.path.join(csv_threshold_path, str(c+1), f)
# op_th_filepath.append(f_csv_tmp)
filelist[str(c+1)][group][filedir[0]] = filelist_temp
filelist[str(c+1)][group][filedir[1]] = ip_filepath
filelist[str(c+1)][group][filedir[2]] = op_hist_filepath
print(filelist)
# ----------------------------------------------------- #
# %%
data_list = []
for c in channel:
for group in treatment:
for i in range(len(filelist[str(c+1)][group][filedir[0]])):
filepath = filelist[str(c+1)][group][filedir[1]][i]
im = np.array(Image.open(filepath))
fig = plt.figure()
plt.yscale('log')
plt.hist(im.ravel(), bins=256, range=(0, 1000))
opfilename = filelist[str(c+1)][group][filedir[2]][i]
fig.savefig(opfilename)
plt.close()
data_tmp = pd.DataFrame({'density':im.ravel()})
data_tmp['filename'] = filelist[str(c+1)][group][filedir[0]][i]
data_tmp['group'] = group
data_tmp['channel'] = str(c+1)
data_list.append(data_tmp)
data_total = pd.concat(data_list, axis = 0)
# %%
for c in channel:
print(c)
data_temp = data_total[data_total['channel'] == str(c+1)]
#print(data_temp)
max_value = max(data_temp['density'])
print(max_value)
binsize = 25
bin_max_value = max_value//binsize
print(bin_max_value)
bin_list = list(range(0, (int(bin_max_value) + 2) * binsize, binsize))
print(bin_list)
fig, axes = plt.subplots()
colors = ['red', 'blue']
for m in range(len(treatment)):
for i in range(len(filelist[str(c+1)][treatment[m]][filedir[0]])):
# for i in range(10):
filename_tmp = filelist[str(c+1)][treatment[m]][filedir[0]][i]
# print(filename_tmp)
data_plot = data_temp[data_temp['filename'] == filename_tmp]
# print(data_plot)
plt.hist(data_plot['density'], bins= bin_list, histtype = 'step', color = colors[m], alpha = 0.2)
plt.yscale('log')
#plt.xscale('log')
fig.savefig(os.path.join(intcsv_histo_summary_path, 'density' + '_c' + str(c+1) + '.png'))
axes.set_xlim(0, max_value)
plt.close()
# %%
# mergeed plot, grouped by channel and treatment, average and errorbar
for c in channel:
print('channel: {}'.format(c))
# load data
data_temp = data_total[data_total['channel'] == str(c+1)]
#print(data_temp)
# prepare binning (bin_list)
max_value = max(data_temp['density'])
print('max_value: {}'.format(max_value))
binsize = 25
bin_max_value = max_value//binsize
print('bin_max_value: {}'.format(bin_max_value))
bin_list = list(range(0, (int(bin_max_value) + 2) * binsize, binsize))
print(bin_list)
# prepare binned data
data_total_tmp = data_total
data_total_tmp['bins'] = pd.cut(data_total['density'], bins = bin_list)
# 1st group by bins
data_total_tmp = data_total_tmp.groupby(by = ['channel', 'group', 'filename', 'bins']).size()
# reset index
data_total_tmp = data_total_tmp.reset_index()
data_total_tmp = data_total_tmp.rename(index = int, columns={0: 'counts'})
# 2nd group by
data_total_tmp_mean = data_total_tmp.groupby(by = ['channel', 'group', 'bins']).mean()['counts']
data_total_tmp_sem = data_total_tmp.groupby(by = ['channel', 'group', 'bins']).sem()['counts']
print('binned data, mean')
display(data_total_tmp_mean)
print('binned data, sem')
display(data_total_tmp_sem)
# plot mean dataset
fig, axes = plt.subplots()
fig.set_figheight(15)
fig.set_figwidth(15)
colors = ['red', 'blue']
for m in range(len(treatment)):
# print(m)
data_mean_temp_mean = data_total_tmp_mean.loc[str(c+1), treatment[m]]
x = list(range(0, data_mean_temp_mean.shape[0]*binsize, binsize))
# print(x)
# x = data_mean_temp_mean.reset_index()['bins']
# print(x)
y = data_mean_temp_mean.reset_index()['counts']
# print(y)
data_mean_temp_sem = data_total_tmp_sem.loc[str(c+1), treatment[m]]
yerr = data_mean_temp_sem.reset_index()['counts']
# print(yerr)
plt.yscale('log')
#plt.xscale('log')
# make plots
plt.errorbar(x, y, yerr = yerr, color = colors[m], alpha = 0.2)
plt.yscale('log')
axes.set_xlim(0, max_value)
oppath_temp = os.path.join(intcsv_histo_summary_path, 'density_mean' + '_c' + str(c+1) + '.png')
print(oppath_temp)
fig.savefig(oppath_temp)
plt.close()
# %%
# create binary by thresholding
for c in channel:
print('channel: {}'.format(c))
for group in treatment:
for i in range(len(filelist[str(c+1)][group][filedir[0]])):
filepath = filelist[str(c+1)][group][filedir[1]][i]
print(filepath)
im = np.array(Image.open(filepath))
print(type(im))
cv2.imshow('image', im)
break
break
break
| 33.40458
| 111
| 0.616659
|
8f3b1a7ecf4a768e0d41fd4ef0f7519710eee52f
| 5,465
|
py
|
Python
|
stRT/preprocess/cluster/utils/integration.py
|
Yao-14/stAnalysis
|
d08483ce581f5b03cfcad8be500aaa64b0293f74
|
[
"BSD-3-Clause"
] | null | null | null |
stRT/preprocess/cluster/utils/integration.py
|
Yao-14/stAnalysis
|
d08483ce581f5b03cfcad8be500aaa64b0293f74
|
[
"BSD-3-Clause"
] | null | null | null |
stRT/preprocess/cluster/utils/integration.py
|
Yao-14/stAnalysis
|
d08483ce581f5b03cfcad8be500aaa64b0293f74
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import List, Optional
import numpy as np
import pandas as pd
from anndata import AnnData
from pandas import DataFrame
from scipy.sparse import csr_matrix, isspmatrix
# Convert sparse matrix to dense matrix.
to_dense_matrix = lambda X: np.array(X.todense()) if isspmatrix(X) else np.asarray(X)
def integrate(
adatas: List[AnnData],
batch_key: str = "slices",
) -> AnnData:
"""
Concatenating all anndata objects.
Args:
adatas: AnnData matrices to concatenate with.
batch_key: Add the batch annotation to :attr:`obs` using this key.
Returns:
integrated_adata: The concatenated AnnData, where adata.obs[batch_key] stores a categorical variable labeling the batch.
"""
def _other_annotation_merge(anno_datas):
anno_dict = {}
anno_keys = anno_datas[0].keys()
n_anno_keys = len(anno_keys)
if n_anno_keys > 0:
for anno_key in anno_keys:
kadtype = anno_datas[0][anno_key]
if isspmatrix(kadtype) or isinstance(kadtype, np.ndarray):
concat_array = np.concatenate(
[
to_dense_matrix(anno_data[anno_key])
for anno_data in anno_datas
],
axis=0,
)
concat_array_shape = concat_array.shape
if len(concat_array_shape) == 1:
concat_array = concat_array.reshape(concat_array_shape[0], 1)
anno_dict[anno_key] = concat_array
elif isinstance(kadtype, DataFrame):
concat_data = pd.concat(
[anno_data[anno_key] for anno_data in anno_datas], axis=0
)
concat_data.fillna(value=0, inplace=True)
anno_dict[anno_key] = concat_data
return anno_dict
else:
return None
# Merge the obsm data, varm data and layers of all anndata objcets separately.
obsm_datas = [adata.obsm for adata in adatas]
obsm_dict = _other_annotation_merge(anno_datas=obsm_datas)
varm_datas = [adata.varm for adata in adatas]
varm_dict = _other_annotation_merge(anno_datas=varm_datas)
layers_datas = [adata.layers for adata in adatas]
layers_dict = _other_annotation_merge(anno_datas=layers_datas)
# Delete obsm and varm data.
for adata in adatas:
del adata.obsm, adata.varm, adata.layers
# Concatenating obs and var data.
batch_ca = [adata.obs[batch_key][0] for adata in adatas]
integrated_adata = adatas[0].concatenate(
*adatas[1:],
batch_key=batch_key,
batch_categories=batch_ca,
join="outer",
fill_value=0,
uns_merge="unique"
)
# Add Concatenated obsm data, varm data and layers data to integrated anndata object.
if not (obsm_dict is None):
for key, value in obsm_dict.items():
integrated_adata.obsm[key] = value
if not (varm_dict is None):
for key, value in varm_dict.items():
integrated_adata.varm[key] = value
if not (layers_dict is None):
for key, value in layers_dict.items():
integrated_adata.layers[key] = value
return integrated_adata
def harmony_debatch(
adata: AnnData,
batch_key: str,
basis: str = "X_pca",
adjusted_basis: str = "X_pca_harmony",
max_iter_harmony: int = 10,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Use harmonypy [Korunsky19]_ to remove batch effects.
This function should be run after performing PCA but before computing the neighbor graph.
Original Code Repository: https://github.com/slowkow/harmonypy
Interesting example: https://slowkow.com/notes/harmony-animation/
Args:
adata: An Anndata object.
batch_key: The name of the column in ``adata.obs`` that differentiates among experiments/batches.
basis: The name of the field in ``adata.obsm`` where the PCA table is stored.
adjusted_basis: The name of the field in ``adata.obsm`` where the adjusted PCA table will be stored after
running this function.
max_iter_harmony: Maximum number of rounds to run Harmony. One round of Harmony involves one clustering and one
correction step.
copy: Whether to copy `adata` or modify it inplace.
Returns:
Updates adata with the field ``adata.obsm[adjusted_basis]``, containing principal components adjusted by
Harmony.
"""
try:
import harmonypy
except ImportError:
raise ImportError(
"\nYou need to install the package `harmonypy`."
"\n\t\tInstall harmonypy via `pip install harmonypy`"
)
adata = adata.copy() if copy else adata
# Convert sparse matrix to dense matrix.
raw_matrix = adata.X if basis == "X" else adata.obsm[basis]
matrix = to_dense_matrix(raw_matrix)
# Use Harmony to adjust the PCs.
harmony_out = harmonypy.run_harmony(
matrix, adata.obs, batch_key, max_iter_harmony=max_iter_harmony
)
adjusted_matrix = harmony_out.Z_corr.T
# Convert dense matrix to sparse matrix.
adjusted_matrix = (
csr_matrix(adjusted_matrix) if isspmatrix(raw_matrix) else adjusted_matrix
)
adata.obsm[adjusted_basis] = adjusted_matrix
return adata if copy else None
| 36.925676
| 128
| 0.63989
|
7a1ca12e811d67b018eb6636c122ee3ceceb93a9
| 9,889
|
py
|
Python
|
service/core.py
|
lojoja/service
|
27295a25c514119cf9975c76c9c3b784243977f1
|
[
"MIT"
] | null | null | null |
service/core.py
|
lojoja/service
|
27295a25c514119cf9975c76c9c3b784243977f1
|
[
"MIT"
] | null | null | null |
service/core.py
|
lojoja/service
|
27295a25c514119cf9975c76c9c3b784243977f1
|
[
"MIT"
] | null | null | null |
import logging
import os
import pathlib
import platform
import re
import click
from service import __version__, launchctl # noqa
from service.log import change_logger_level, setup_logger
__all__ = ['cli']
PROGRAM_NAME = 'service'
MIN_MACOS_VERSION = 12.0
CONFIG_FILE = '{}.conf'.format(PROGRAM_NAME)
logger = logging.getLogger(PROGRAM_NAME)
setup_logger(logger)
class Configuration(object):
""" Program configuration and environment settings. """
def __init__(self, verbose):
logger.debug('Gathering system and environment details')
self.macos_version = self._get_macos_version()
self.user = os.geteuid()
self.sudo = self.user == 0
self.reverse_domains = None
self.service = None
self.verbose = verbose
def _find_reverse_domains_config(self):
""" Locate the reverse domain configuration file to use. """
logger.debug('Finding reverse domain config file')
paths = ['/usr/local/etc', '/etc']
for p in paths:
conf = pathlib.Path(p, CONFIG_FILE)
logger.debug('Trying reverse domain config file "{}"'.format(conf))
if conf.is_file():
logger.debug('Reverse domain config file found; using "{}"'.format(conf))
return conf
logger.debug('Reverse domain config file not found')
return None
def _get_macos_version(self):
version = platform.mac_ver()[0]
version = float('.'.join(version.split('.')[:2])) # format as e.g., '10.10'
return version
def load_reverse_domains(self):
logger.debug('Loading reverse domains')
conf = self._find_reverse_domains_config()
data = []
if conf:
lines = []
try:
with conf.open(mode='r', encoding='utf-8') as f:
lines = f.read().splitlines()
except IOError:
raise click.ClickException('Failed to read reverse domains file')
for line in lines:
line = re.split(r'#|\s', line.strip(), 1)[0]
if line:
logger.debug('Adding reverse domain "{}"'.format(line))
data.append(line)
self.reverse_domains = data
class Service(object):
""" A service on the system. """
def __init__(self, name, config):
logger.debug('Initializing service')
self._domain = launchctl.DOMAIN_SYSTEM if config.sudo else '{}/{}'.format(launchctl.DOMAIN_GUI, config.user)
self._search_paths = self._get_search_paths()
self._file = self._find(name, config.reverse_domains)
@property
def domain(self):
""" Service target domain (e.g., system). """
return self._domain
@property
def file(self):
""" Full path to the service file, as a string. """
return str(self._file)
@property
def name(self):
""" Service filename without extension, as a string. """
return self._file.stem
@property
def search_paths(self):
return self._search_paths
def _find(self, name, rev_domains):
"""
Find the service based on the information given. Uses the `name` argument as input on the CLI if it includes
an absolute or relative path, adding the file extension if missing, otherwise constructs and tests all possible
file paths in the current launchctl domain for a match.
"""
logger.debug('Finding service "{}"'.format(name))
_name = name # save the original name
service_path = None
if not name.endswith('.plist'):
name += '.plist'
path = pathlib.Path(name)
if len(path.parts) > 1:
logger.debug('Resolving path from CLI input')
path = path.expanduser().absolute()
logger.debug('Trying "{}"'.format(path))
if path.is_file():
service_path = path
else:
filenames = [path.name] if len(path.suffixes) > 1 else ['{}.{}'.format(rd, path.name) for rd in rev_domains]
for search_path in self.search_paths:
for filename in filenames:
possible_file = search_path.joinpath(filename)
logger.debug('Trying "{}"'.format(possible_file))
if possible_file.is_file():
service_path = possible_file
break
else:
continue
break
if not service_path:
raise click.ClickException('Service "{}" not found'.format(_name))
logger.debug('Service found, using "{}"'.format(service_path))
self._validate_domain(service_path)
return service_path
def _get_search_paths(self):
""" Get the service search paths for system or user domains. """
logger.debug('Identifying search paths')
common_paths = ['Library/LaunchAgents', 'Library/LaunchDaemons']
prefixes = ['/', '/System'] if self.domain == launchctl.DOMAIN_SYSTEM else [pathlib.Path.home()]
search_paths = []
for prefix in prefixes:
for common_path in common_paths:
path = pathlib.Path(prefix, common_path)
if path.is_dir():
search_paths.append(path)
return search_paths
def _validate_domain(self, service_path):
""" Verify the service exists in the current domain and is not a macOS system service. """
logger.debug('Validating service domain')
if self.domain == launchctl.DOMAIN_SYSTEM:
if service_path.parts[1] == ('System'):
raise click.ClickException('Service "{}" is a macOS system service'.format(service_path))
if service_path.parts[1] == ('Users'):
raise click.ClickException('Service "{}" is not in the "{}" domain'.format(service_path, self.domain))
else:
if not service_path.parts[1] == ('Users'):
raise click.ClickException('Service "{}" is not in the "{}" domain'.format(service_path, self.domain))
class CLIGroup(click.Group):
"""
CLI Command group
Collect common group subcommand arguments so they can be handled once at the group level.
This provides a better cli interface without duplicating the code in each subcommand. Argument names
must still be included in each command's function signature.
"""
def invoke(self, ctx):
ctx.obj = tuple(ctx.args)
super(CLIGroup, self).invoke(ctx)
@click.group(cls=CLIGroup)
@click.option('--verbose/--quiet', '-v/-q', is_flag=True, default=None, help='Specify verbosity level.')
@click.version_option()
@click.pass_context
def cli(ctx, verbose):
change_logger_level(logger, verbose)
logger.debug('{} started'.format(PROGRAM_NAME))
config = Configuration(verbose)
logger.debug('Checking macOS version')
if config.macos_version < MIN_MACOS_VERSION:
raise click.ClickException('{0} requires macOS {1} or higher'.format(PROGRAM_NAME, MIN_MACOS_VERSION))
else:
logger.debug('macOS version is {}'.format(config.macos_version))
# Load reverse domains and initiate service only when a subcommand is given without the `--help` option
if ctx.invoked_subcommand and '--help' not in ctx.obj:
config.load_reverse_domains()
logger.debug('Processing group command arguments')
name = next((arg for arg in ctx.obj if not arg.startswith('-')), '')
service = Service(name, config)
config.service = service
# Store config on context.obj for subcommands to access
ctx.obj = config
def service_name_argument(func):
func = click.argument('name', default='')(func)
return func
@cli.command()
@service_name_argument
@click.pass_obj
def disable(config, name):
""" Disable a service. """
launchctl.disable(config.service, sudo=config.sudo)
logger.info('"{}" disabled'.format(config.service.name))
@cli.command()
@service_name_argument
@click.pass_obj
def enable(config, name):
""" Enable a service. Only available for system domain services."""
launchctl.enable(config.service, sudo=config.sudo)
logger.info('"{}" enabled'.format(config.service.name))
@cli.command()
@service_name_argument
@click.pass_obj
def restart(config, name):
""" Restart a service. Only available for system domain services. """
launchctl.restart(config.service, sudo=config.sudo)
logger.info('"{}" restarted'.format(config.service.name))
@cli.command()
@click.option(
'--enable', '-e', is_flag=True, default=False,
help='Enable sevice before starting. Only available for services in the system domain.',
)
@service_name_argument
@click.pass_obj
def start(config, name, enable):
""" Start a service, optionally enabling it first. """
if enable:
launchctl.enable(config.service, sudo=config.sudo)
logger.debug('"{}" enabled'.format(config.service.name))
launchctl.start(config.service, sudo=config.sudo)
logger.info('"{}" started'.format(config.service.name))
@cli.command()
@click.option(
'--disable', '-d', is_flag=True, default=False,
help='Disable service after stopping. Only available for services in the system domain.',
)
@service_name_argument
@click.pass_obj
def stop(config, name, disable):
""" Stop a service, optionally disabling it afterward. """
launchctl.stop(config.service, sudo=config.sudo)
logger.info('"{}" stopped'.format(config.service.name))
if disable:
launchctl.disable(config.service, sudo=config.sudo)
logger.debug('"{}" disabled'.format(config.service.name))
def show_exception(self, file=None):
logger.error(self.message)
click.ClickException.show = show_exception
click.UsageError.show = show_exception
| 32.963333
| 120
| 0.637274
|
2816cfdb94591f897dba7c94e443b5767495c94c
| 1,919
|
py
|
Python
|
claims_to_quality/analyzer/models/measures/measure_definition.py
|
CMSgov/qpp-claims-to-quality-public
|
1e2da9494faf9e316a17cbe899284db9e61d0902
|
[
"CC0-1.0"
] | 13
|
2018-09-28T14:02:59.000Z
|
2021-12-07T21:31:54.000Z
|
claims_to_quality/analyzer/models/measures/measure_definition.py
|
CMSgov/qpp-claims-to-quality-public
|
1e2da9494faf9e316a17cbe899284db9e61d0902
|
[
"CC0-1.0"
] | 1
|
2018-10-01T17:49:05.000Z
|
2018-10-09T01:10:56.000Z
|
claims_to_quality/analyzer/models/measures/measure_definition.py
|
CMSgov/qpp-claims-to-quality-public
|
1e2da9494faf9e316a17cbe899284db9e61d0902
|
[
"CC0-1.0"
] | 1
|
2021-02-08T18:32:16.000Z
|
2021-02-08T18:32:16.000Z
|
"""Models for measure definition."""
from claims_to_quality.analyzer.models.measures.eligibility_option import EligibilityOption
from claims_to_quality.analyzer.models.measures.performance_option import PerformanceOption
from claims_to_quality.analyzer.models.measures.stratum import Stratum
from claims_to_quality.lib.helpers import dict_utils
from schematics.models import Model
from schematics.types import BooleanType, StringType
from schematics.types.compound import ListType, ModelType
class MeasureDefinition(Model):
"""Top-level measure definition model."""
measure_number = StringType(serialized_name='measureId') # Formatted as e.g. '024'
eligibility_options = ListType(
ModelType(EligibilityOption), serialized_name='eligibilityOptions')
performance_options = ListType(
ModelType(PerformanceOption), serialized_name='performanceOptions')
is_inverse = BooleanType(serialized_name='isInverse')
strata = ListType(ModelType(Stratum), serialized_name='strata')
def __init__(self, *args, **kwargs):
"""Initialize a MeasureDefinition object."""
super(MeasureDefinition, self).__init__(*args, **kwargs)
self.procedure_code_map = dict_utils.merge_dictionaries_with_list_values(
[option.procedure_code_map for option in self.eligibility_options]
)
self.quality_code_map = dict_utils.merge_dictionaries_with_list_values(
[option.quality_code_map for option in self.performance_options]
)
def get_measure_quality_codes(self):
"""Get quality codes from the measure definition."""
return set([
quality_code.code for option in self.performance_options
for quality_code in option.quality_codes
])
def __repr__(self):
"""Return a string representation of the measure."""
return 'MeasureDefinition({})'.format(self.to_native())
| 42.644444
| 91
| 0.745701
|
27978f83500214065097e0524dd4eaf65ad3f305
| 2,441
|
py
|
Python
|
lookup.py
|
raywong702/lookup
|
0950b8a8279606499d3a5a7280c29996f7da1a53
|
[
"BSD-3-Clause"
] | null | null | null |
lookup.py
|
raywong702/lookup
|
0950b8a8279606499d3a5a7280c29996f7da1a53
|
[
"BSD-3-Clause"
] | null | null | null |
lookup.py
|
raywong702/lookup
|
0950b8a8279606499d3a5a7280c29996f7da1a53
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import ipaddress
import socket
import asyncio
import sys
import os.path
async def lookup(loop, addresses, max_length=None):
''' loop: asyncio event loop
addresses: list of ip address or hostnames
max_length: spacer width of first print column
prints address and its corresponding ip or hostname
'''
for address in addresses:
address = address.strip()
try:
ipaddress.ip_address(address)
lookup = await loop.getnameinfo((address, 80))
lookup = lookup[0]
except ValueError:
try:
lookup = await loop.getaddrinfo(
host=address,
port=80,
proto=socket.IPPROTO_TCP
)
lookup = lookup[0][4][0]
except (socket.gaierror, UnicodeError) as e:
if hasattr(e, 'message'):
lookup = f'EXCEPTION: {e.message}'
else:
lookup = f'EXCEPTION: {e}'
print(f'{address:{max_length}} {lookup}')
def lookup_loop(addresses, max_length=None):
''' addresses: list of ip addresses or hostnames
max_length: spacer width of first print column
async lookup wrapper
'''
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(lookup(loop, addresses, max_length))
finally:
loop.close()
def main():
''' reads input passed in
prints how to use if no args passed in
if first input is a file, then run lookup on each line of file
otherwise, run lookup on arguments
prints a list of addresses and their corresponding ip address or hostname
'''
if len(sys.argv) == 1:
self = sys.argv[0]
divider = '-' * 60
ARG_EXCEPTION = f'''
{divider}
Prints corresponding ip or hostname of input
USAGE:
{self} <file of ips or hostnames on each line>
-- or --
{self} <list of ips or hostnames. space delimited>
{divider}
'''
print(ARG_EXCEPTION)
elif os.path.isfile(sys.argv[1]):
addresses = open(sys.argv[1]).readlines()
max_length = len(max(open(sys.argv[1], 'r'), key=len))
lookup_loop(addresses, max_length=max_length)
else:
addresses = sys.argv[1:]
max_length = len(max(addresses, key=len))
lookup_loop(addresses, max_length=max_length)
if __name__ == '__main__':
main()
| 28.057471
| 77
| 0.597296
|
db4a1579416d6847733669769a9d3482623bcd72
| 2,664
|
py
|
Python
|
jina/types/score/map.py
|
arijitdas123student/jina
|
54d916e362bde0391b0af0f12241d531b8453247
|
[
"Apache-2.0"
] | 15,179
|
2020-04-28T10:23:56.000Z
|
2022-03-31T14:35:25.000Z
|
jina/types/score/map.py
|
manavshah123/jina
|
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
|
[
"Apache-2.0"
] | 3,912
|
2020-04-28T13:01:29.000Z
|
2022-03-31T14:36:46.000Z
|
jina/types/score/map.py
|
manavshah123/jina
|
f18b04eb82d18a3c554e2892bbae4b95fc0cb13e
|
[
"Apache-2.0"
] | 1,955
|
2020-04-28T10:50:49.000Z
|
2022-03-31T12:28:34.000Z
|
from collections.abc import MutableMapping
from typing import Optional, Union
import numpy as np
from ..score import NamedScore
from ...helper import typename
from ...proto import jina_pb2
__all__ = ['NamedScoreMapping']
if False:
from google.protobuf.pyext._message import MessageMapContainer
class NamedScoreMapping(MutableMapping):
"""
:class:`NamedScoreMapping` is one of the **primitive data type** in Jina.
It offers a Pythonic interface to allow users access and manipulate
:class:`jina.jina_pb2.NamedScoreMappingProto` object without working with Protobuf itself.
It offers an interface to access and update scores as `NamedScore` as values of a `dict` with a string key.
To create a :class:`NamedScoreMappingProto` object, simply:
.. highlight:: python
.. code-block:: python
from jina.types.score.map import NamedScoreMapping
scores = NamedScoreMapping()
scores['score'] = 50
:class:`NamedScoreMapping` can be built from ``jina_pb2.NamedScoreMappingProto`` (as a weak reference or a deep copy)
:param scores: The scores to construct from, depending on the ``copy``,
it builds a view or a copy from it.
:type score: Optional[jina_pb2.NamedScoreMappingProto]
:param copy: When ``scores`` is given as a :class:`NamedScoreMappingProto` object, build a
view (i.e. weak reference) from it or a deep copy from it.
:type copy: bool
:param kwargs: Other parameters to be set
"""
def __init__(
self,
scores: 'MessageMapContainer',
):
self._pb_body = scores
def __setitem__(
self,
key: str,
value: Union[jina_pb2.NamedScoreProto, NamedScore, float, np.generic],
):
if isinstance(value, jina_pb2.NamedScoreProto):
self._pb_body[key].CopyFrom(value)
elif isinstance(value, NamedScore):
self._pb_body[key].CopyFrom(value._pb_body)
elif isinstance(value, (float, int)):
self._pb_body[key].value = value
elif isinstance(value, np.generic):
self._pb_body[key].value = value.item()
else:
raise TypeError(f'score is in unsupported type {typename(value)}')
def __getitem__(
self,
key: str,
):
return NamedScore(self._pb_body[key])
def __delitem__(
self,
key: str,
):
del self._pb_body[key]
def __contains__(self, key: str):
return key in self._pb_body
def __iter__(self):
for key in self._pb_body:
yield key
def __len__(self):
return len(self._pb_body)
| 29.932584
| 121
| 0.653529
|
4468a838ea5916f8931adec6bd118bc4304fb038
| 13,334
|
py
|
Python
|
certbot-apache/tests/augeasnode_test.py
|
ravikumarmotukuri/certbot
|
f8c43aa9fe8c1917ad4bc2620134837db3cd7d70
|
[
"Apache-2.0"
] | null | null | null |
certbot-apache/tests/augeasnode_test.py
|
ravikumarmotukuri/certbot
|
f8c43aa9fe8c1917ad4bc2620134837db3cd7d70
|
[
"Apache-2.0"
] | 1
|
2020-04-03T12:57:27.000Z
|
2020-04-03T12:57:27.000Z
|
certbot-apache/tests/augeasnode_test.py
|
Tomoyuki-GH/certbot
|
41ee5be4b5e01f4a0616fe5e014a38f59a931f60
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for AugeasParserNode classes"""
import mock
import os
import unittest
import util
from certbot import errors
from certbot_apache._internal import assertions
from certbot_apache._internal import augeasparser
def _get_augeasnode_mock(filepath):
""" Helper function for mocking out DualNode instance with an AugeasNode """
def augeasnode_mock(metadata):
return augeasparser.AugeasBlockNode(
name=assertions.PASS,
ancestor=None,
filepath=filepath,
metadata=metadata)
return augeasnode_mock
class AugeasParserNodeTest(util.ApacheTest): # pylint: disable=too-many-public-methods
"""Test AugeasParserNode using available test configurations"""
def setUp(self): # pylint: disable=arguments-differ
super(AugeasParserNodeTest, self).setUp()
with mock.patch("certbot_apache._internal.configurator.ApacheConfigurator.get_parsernode_root") as mock_parsernode:
mock_parsernode.side_effect = _get_augeasnode_mock(
os.path.join(self.config_path, "apache2.conf"))
self.config = util.get_apache_configurator(
self.config_path, self.vhost_path, self.config_dir, self.work_dir, use_parsernode=True)
self.vh_truth = util.get_vh_truth(
self.temp_dir, "debian_apache_2_4/multiple_vhosts")
def test_save(self):
with mock.patch('certbot_apache._internal.parser.ApacheParser.save') as mock_save:
self.config.parser_root.save("A save message")
self.assertTrue(mock_save.called)
self.assertEqual(mock_save.call_args[0][0], "A save message")
def test_unsaved_files(self):
with mock.patch('certbot_apache._internal.parser.ApacheParser.unsaved_files') as mock_uf:
mock_uf.return_value = ["first", "second"]
files = self.config.parser_root.unsaved_files()
self.assertEqual(files, ["first", "second"])
def test_get_block_node_name(self):
from certbot_apache._internal.augeasparser import AugeasBlockNode
block = AugeasBlockNode(
name=assertions.PASS,
ancestor=None,
filepath=assertions.PASS,
metadata={"augeasparser": mock.Mock(), "augeaspath": "/files/anything"}
)
testcases = {
"/some/path/FirstNode/SecondNode": "SecondNode",
"/some/path/FirstNode/SecondNode/": "SecondNode",
"OnlyPathItem": "OnlyPathItem",
"/files/etc/apache2/apache2.conf/VirtualHost": "VirtualHost",
"/Anything": "Anything",
}
for test in testcases:
self.assertEqual(block._aug_get_name(test), testcases[test]) # pylint: disable=protected-access
def test_find_blocks(self):
blocks = self.config.parser_root.find_blocks("VirtualHost", exclude=False)
self.assertEqual(len(blocks), 12)
def test_find_blocks_case_insensitive(self):
vhs = self.config.parser_root.find_blocks("VirtualHost")
vhs2 = self.config.parser_root.find_blocks("viRtuAlHoST")
self.assertEqual(len(vhs), len(vhs2))
def test_find_directive_found(self):
directives = self.config.parser_root.find_directives("Listen")
self.assertEqual(len(directives), 1)
self.assertTrue(directives[0].filepath.endswith("/apache2/ports.conf"))
self.assertEqual(directives[0].parameters, (u'80',))
def test_find_directive_notfound(self):
directives = self.config.parser_root.find_directives("Nonexistent")
self.assertEqual(len(directives), 0)
def test_find_directive_from_block(self):
blocks = self.config.parser_root.find_blocks("virtualhost")
found = False
for vh in blocks:
if vh.filepath.endswith("sites-enabled/certbot.conf"):
servername = vh.find_directives("servername")
self.assertEqual(servername[0].parameters[0], "certbot.demo")
found = True
self.assertTrue(found)
def test_find_comments(self):
rootcomment = self.config.parser_root.find_comments(
"This is the main Apache server configuration file. "
)
self.assertEqual(len(rootcomment), 1)
self.assertTrue(rootcomment[0].filepath.endswith(
"debian_apache_2_4/multiple_vhosts/apache2/apache2.conf"
))
def test_set_parameters(self):
servernames = self.config.parser_root.find_directives("servername")
names = [] # type: List[str]
for servername in servernames:
names += servername.parameters
self.assertFalse("going_to_set_this" in names)
servernames[0].set_parameters(["something", "going_to_set_this"])
servernames = self.config.parser_root.find_directives("servername")
names = []
for servername in servernames:
names += servername.parameters
self.assertTrue("going_to_set_this" in names)
def test_set_parameters_atinit(self):
from certbot_apache._internal.augeasparser import AugeasDirectiveNode
servernames = self.config.parser_root.find_directives("servername")
setparam = "certbot_apache._internal.augeasparser.AugeasDirectiveNode.set_parameters"
with mock.patch(setparam) as mock_set:
AugeasDirectiveNode(
name=servernames[0].name,
parameters=["test", "setting", "these"],
ancestor=assertions.PASS,
metadata=servernames[0].metadata
)
self.assertTrue(mock_set.called)
self.assertEqual(
mock_set.call_args_list[0][0][0],
["test", "setting", "these"]
)
def test_set_parameters_delete(self):
# Set params
servername = self.config.parser_root.find_directives("servername")[0]
servername.set_parameters(["thisshouldnotexistpreviously", "another",
"third"])
# Delete params
servernames = self.config.parser_root.find_directives("servername")
found = False
for servername in servernames:
if "thisshouldnotexistpreviously" in servername.parameters:
self.assertEqual(len(servername.parameters), 3)
servername.set_parameters(["thisshouldnotexistpreviously"])
found = True
self.assertTrue(found)
# Verify params
servernames = self.config.parser_root.find_directives("servername")
found = False
for servername in servernames:
if "thisshouldnotexistpreviously" in servername.parameters:
self.assertEqual(len(servername.parameters), 1)
servername.set_parameters(["thisshouldnotexistpreviously"])
found = True
self.assertTrue(found)
def test_add_child_comment(self):
newc = self.config.parser_root.add_child_comment("The content")
comments = self.config.parser_root.find_comments("The content")
self.assertEqual(len(comments), 1)
self.assertEqual(
newc.metadata["augeaspath"],
comments[0].metadata["augeaspath"]
)
self.assertEqual(newc.comment, comments[0].comment)
def test_delete_child(self):
listens = self.config.parser_root.find_directives("Listen")
self.assertEqual(len(listens), 1)
self.config.parser_root.delete_child(listens[0])
listens = self.config.parser_root.find_directives("Listen")
self.assertEqual(len(listens), 0)
def test_delete_child_not_found(self):
listen = self.config.parser_root.find_directives("Listen")[0]
listen.metadata["augeaspath"] = "/files/something/nonexistent"
self.assertRaises(
errors.PluginError,
self.config.parser_root.delete_child,
listen
)
def test_add_child_block(self):
nb = self.config.parser_root.add_child_block(
"NewBlock",
["first", "second"]
)
rpath, _, directive = nb.metadata["augeaspath"].rpartition("/")
self.assertEqual(
rpath,
self.config.parser_root.metadata["augeaspath"]
)
self.assertTrue(directive.startswith("NewBlock"))
def test_add_child_block_beginning(self):
self.config.parser_root.add_child_block(
"Beginning",
position=0
)
parser = self.config.parser_root.parser
root_path = self.config.parser_root.metadata["augeaspath"]
# Get first child
first = parser.aug.match("{}/*[1]".format(root_path))
self.assertTrue(first[0].endswith("Beginning"))
def test_add_child_block_append(self):
self.config.parser_root.add_child_block(
"VeryLast",
)
parser = self.config.parser_root.parser
root_path = self.config.parser_root.metadata["augeaspath"]
# Get last child
last = parser.aug.match("{}/*[last()]".format(root_path))
self.assertTrue(last[0].endswith("VeryLast"))
def test_add_child_block_append_alt(self):
self.config.parser_root.add_child_block(
"VeryLastAlt",
position=99999
)
parser = self.config.parser_root.parser
root_path = self.config.parser_root.metadata["augeaspath"]
# Get last child
last = parser.aug.match("{}/*[last()]".format(root_path))
self.assertTrue(last[0].endswith("VeryLastAlt"))
def test_add_child_block_middle(self):
self.config.parser_root.add_child_block(
"Middle",
position=5
)
parser = self.config.parser_root.parser
root_path = self.config.parser_root.metadata["augeaspath"]
# Augeas indices start at 1 :(
middle = parser.aug.match("{}/*[6]".format(root_path))
self.assertTrue(middle[0].endswith("Middle"))
def test_add_child_block_existing_name(self):
parser = self.config.parser_root.parser
root_path = self.config.parser_root.metadata["augeaspath"]
# There already exists a single VirtualHost in the base config
new_block = parser.aug.match("{}/VirtualHost[2]".format(root_path))
self.assertEqual(len(new_block), 0)
vh = self.config.parser_root.add_child_block(
"VirtualHost",
)
new_block = parser.aug.match("{}/VirtualHost[2]".format(root_path))
self.assertEqual(len(new_block), 1)
self.assertTrue(vh.metadata["augeaspath"].endswith("VirtualHost[2]"))
def test_node_init_error_bad_augeaspath(self):
from certbot_apache._internal.augeasparser import AugeasBlockNode
parameters = {
"name": assertions.PASS,
"ancestor": None,
"filepath": assertions.PASS,
"metadata": {
"augeasparser": mock.Mock(),
"augeaspath": "/files/path/endswith/slash/"
}
}
self.assertRaises(
errors.PluginError,
AugeasBlockNode,
**parameters
)
def test_node_init_error_missing_augeaspath(self):
from certbot_apache._internal.augeasparser import AugeasBlockNode
parameters = {
"name": assertions.PASS,
"ancestor": None,
"filepath": assertions.PASS,
"metadata": {
"augeasparser": mock.Mock(),
}
}
self.assertRaises(
errors.PluginError,
AugeasBlockNode,
**parameters
)
def test_add_child_directive(self):
self.config.parser_root.add_child_directive(
"ThisWasAdded",
["with", "parameters"],
position=0
)
dirs = self.config.parser_root.find_directives("ThisWasAdded")
self.assertEqual(len(dirs), 1)
self.assertEqual(dirs[0].parameters, ("with", "parameters"))
# The new directive was added to the very first line of the config
self.assertTrue(dirs[0].metadata["augeaspath"].endswith("[1]"))
def test_add_child_directive_exception(self):
self.assertRaises(
errors.PluginError,
self.config.parser_root.add_child_directive,
"ThisRaisesErrorBecauseMissingParameters"
)
def test_parsed_paths(self):
paths = self.config.parser_root.parsed_paths()
self.assertEqual(len(paths), 6)
def test_find_ancestors(self):
vhsblocks = self.config.parser_root.find_blocks("VirtualHost")
macro_test = False
nonmacro_test = False
for vh in vhsblocks:
if "/macro/" in vh.metadata["augeaspath"].lower():
ancs = vh.find_ancestors("Macro")
self.assertEqual(len(ancs), 1)
macro_test = True
else:
ancs = vh.find_ancestors("Macro")
self.assertEqual(len(ancs), 0)
nonmacro_test = True
self.assertTrue(macro_test)
self.assertTrue(nonmacro_test)
def test_find_ancestors_bad_path(self):
self.config.parser_root.metadata["augeaspath"] = ""
ancs = self.config.parser_root.find_ancestors("Anything")
self.assertEqual(len(ancs), 0)
| 39.922156
| 123
| 0.636118
|
9407b0965d5f640b88dae5eb1c60ad57ba44eb75
| 954
|
py
|
Python
|
python/httpclient/http1.py
|
erisky/my_practices
|
10af209a163fb3e527b9c22c946365f1f317c07b
|
[
"MIT"
] | null | null | null |
python/httpclient/http1.py
|
erisky/my_practices
|
10af209a163fb3e527b9c22c946365f1f317c07b
|
[
"MIT"
] | null | null | null |
python/httpclient/http1.py
|
erisky/my_practices
|
10af209a163fb3e527b9c22c946365f1f317c07b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import httplib
import sys
import os
def GetStockPriceFromYahoo(stockid):
yahoo = httplib.HTTPConnection('tw.stock.yahoo.com')
req = "/q/q?s=" + str(stockid)
# print req
yahoo.request("GET", req)
resp1 = yahoo.getresponse()
#print resp1.status, resp1.reason
data1 = resp1.read()
if str(resp1.status) == '200':
idx = data1.find('13:')
if idx < 0:
idx = data1.find('14:')
if idx < 0:
return 0
tempstr1 = data1[idx:]
idx = tempstr1.find('<b>')
idx2 = tempstr1.find('</b>')
tempstr2 = tempstr1[(idx+3):idx2]
return float(tempstr2)
print GetStockPriceFromYahoo(6189)
print GetStockPriceFromYahoo(3211)
print GetStockPriceFromYahoo(1717)
print GetStockPriceFromYahoo(9904)
print GetStockPriceFromYahoo(1718)
print GetStockPriceFromYahoo(2605)
print GetStockPriceFromYahoo(2345)
print GetStockPriceFromYahoo(3027)
| 22.186047
| 56
| 0.658281
|
5d68ac42ee3f5fd17fc05cef3632173b9396681c
| 1,240
|
py
|
Python
|
tests/test_dataset/test_test_time_aug.py
|
yuexy/mmocr
|
82488024db159266e66ea6b0d6f84a5a18e87362
|
[
"Apache-2.0"
] | 2,261
|
2021-04-08T03:45:41.000Z
|
2022-03-31T23:37:46.000Z
|
tests/test_dataset/test_test_time_aug.py
|
yuexy/mmocr
|
82488024db159266e66ea6b0d6f84a5a18e87362
|
[
"Apache-2.0"
] | 789
|
2021-04-08T05:40:13.000Z
|
2022-03-31T09:42:39.000Z
|
tests/test_dataset/test_test_time_aug.py
|
yuexy/mmocr
|
82488024db159266e66ea6b0d6f84a5a18e87362
|
[
"Apache-2.0"
] | 432
|
2021-04-08T03:56:16.000Z
|
2022-03-30T18:44:43.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmocr.datasets.pipelines.test_time_aug import MultiRotateAugOCR
def test_resize_ocr():
input_img1 = np.ones((64, 256, 3), dtype=np.uint8)
input_img2 = np.ones((64, 32, 3), dtype=np.uint8)
rci = MultiRotateAugOCR(transforms=[], rotate_degrees=[0, 90, 270])
# test invalid arguments
with pytest.raises(AssertionError):
MultiRotateAugOCR(transforms=[], rotate_degrees=[45])
with pytest.raises(AssertionError):
MultiRotateAugOCR(transforms=[], rotate_degrees=[20.5])
# test call with input_img1
results = {'img_shape': input_img1.shape, 'img': input_img1}
results = rci(results)
assert np.allclose([64, 256, 3], results['img_shape'])
assert len(results['img']) == 1
assert len(results['img_shape']) == 1
assert np.allclose([64, 256, 3], results['img_shape'][0])
# test call with input_img2
results = {'img_shape': input_img2.shape, 'img': input_img2}
results = rci(results)
assert np.allclose([64, 32, 3], results['img_shape'])
assert len(results['img']) == 3
assert len(results['img_shape']) == 3
assert np.allclose([64, 32, 3], results['img_shape'][0])
| 35.428571
| 71
| 0.675
|
a317cb22f9e39dcd573ec56403f476a50335775d
| 1,652
|
py
|
Python
|
app/stopwatch/forms.py
|
zigellsn/JWConfStage
|
684060562a971b2dc33fe44b7f223babd4094786
|
[
"Apache-2.0"
] | null | null | null |
app/stopwatch/forms.py
|
zigellsn/JWConfStage
|
684060562a971b2dc33fe44b7f223babd4094786
|
[
"Apache-2.0"
] | 1
|
2019-02-20T21:15:08.000Z
|
2019-02-20T21:15:08.000Z
|
app/stopwatch/forms.py
|
zigellsn/JWConfStage
|
684060562a971b2dc33fe44b7f223babd4094786
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2022 Simon Zigelli
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import forms
from django.utils.translation import gettext_lazy as _
class StopwatchForm(forms.Form):
template_name = "stopwatch/form.html"
default_class = "dark:bg-gray-800 bg-white appearance-none outline-none ltr:mr-2 rtl:ml-2"
talk_name = forms.CharField(label=_("Aufgabe"), required=False, initial="",
widget=forms.TextInput(attrs={"class": default_class}))
talk_index = forms.IntegerField(required=False, initial=0, widget=forms.HiddenInput())
talk_user = forms.BooleanField(required=False, widget=forms.HiddenInput())
hours = []
for i in range(5):
hours.append((i, i))
minutes = []
for i in range(60):
minutes.append((i, i))
h = forms.ChoiceField(label=_("H"), choices=hours, widget=forms.Select(attrs={"class": default_class}))
m = forms.ChoiceField(label=_("M"), choices=minutes, widget=forms.Select(attrs={"class": default_class}))
s = forms.ChoiceField(label=_("S"), choices=minutes, widget=forms.Select(attrs={"class": default_class}))
| 44.648649
| 109
| 0.706416
|
a992d8cf9bd02c6031be3cc54ffc939d1247e2f2
| 2,835
|
py
|
Python
|
sdk/appplatform/azure-mgmt-appplatform/setup.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-06-02T08:01:35.000Z
|
2021-06-02T08:01:35.000Z
|
sdk/appplatform/azure-mgmt-appplatform/setup.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | null | null | null |
sdk/appplatform/azure-mgmt-appplatform/setup.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-appplatform"
PACKAGE_PPRINT_NAME = "MyService Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
long_description_content_type='text/x-rst',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.5.0',
'msrestazure>=0.4.32,<2.0.0',
'azure-common~=1.1',
],
extras_require={
":python_version<'3.0'": ['azure-mgmt-nspkg'],
}
)
| 32.215909
| 91
| 0.603175
|
35b5e045a549908bda6489419c13c5906288f20b
| 8,332
|
py
|
Python
|
tests/python/pants_test/engine/legacy/test_address_mapper.py
|
copumpkin/pants
|
e8367cf62279eab98f4e9e4575b9a059eeb83d2f
|
[
"Apache-2.0"
] | 1
|
2021-11-11T14:04:24.000Z
|
2021-11-11T14:04:24.000Z
|
tests/python/pants_test/engine/legacy/test_address_mapper.py
|
copumpkin/pants
|
e8367cf62279eab98f4e9e4575b9a059eeb83d2f
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/engine/legacy/test_address_mapper.py
|
copumpkin/pants
|
e8367cf62279eab98f4e9e4575b9a059eeb83d2f
|
[
"Apache-2.0"
] | 1
|
2021-11-11T14:04:12.000Z
|
2021-11-11T14:04:12.000Z
|
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
import mock
from pants.base.specs import SiblingAddresses, SingleAddress
from pants.bin.engine_initializer import EngineInitializer
from pants.build_graph.address import Address, BuildFileAddress
from pants.build_graph.address_mapper import AddressMapper
from pants.engine.legacy.address_mapper import LegacyAddressMapper
from pants.engine.nodes import Throw
from pants.engine.scheduler import ExecutionResult
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_file_dump, safe_mkdir
from pants_test.engine.util import init_native
class LegacyAddressMapperTest(unittest.TestCase):
_native = init_native()
def create_build_files(self, build_root):
# Create BUILD files
# build_root:
# BUILD
# BUILD.other
# dir_a:
# BUILD
# BUILD.other
# subdir:
# BUILD
# dir_b:
# BUILD
dir_a = os.path.join(build_root, 'dir_a')
dir_b = os.path.join(build_root, 'dir_b')
dir_a_subdir = os.path.join(dir_a, 'subdir')
safe_mkdir(dir_a)
safe_mkdir(dir_b)
safe_mkdir(dir_a_subdir)
safe_file_dump(os.path.join(build_root, 'BUILD'), 'target(name="a")\ntarget(name="b")')
safe_file_dump(os.path.join(build_root, 'BUILD.other'), 'target(name="c")')
safe_file_dump(os.path.join(dir_a, 'BUILD'), 'target(name="a")\ntarget(name="b")')
safe_file_dump(os.path.join(dir_a, 'BUILD.other'), 'target(name="c")')
safe_file_dump(os.path.join(dir_b, 'BUILD'), 'target(name="a")')
safe_file_dump(os.path.join(dir_a_subdir, 'BUILD'), 'target(name="a")')
def create_address_mapper(self, build_root):
work_dir = os.path.join(build_root, '.pants.d')
scheduler, _, _ = EngineInitializer.setup_legacy_graph(
[],
work_dir,
build_root=build_root,
native=self._native
)
return LegacyAddressMapper(scheduler, build_root)
def test_is_valid_single_address(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
self.assertFalse(mapper.is_valid_single_address(SingleAddress('dir_a', 'foo')))
self.assertTrue(mapper.is_valid_single_address(SingleAddress('dir_a', 'a')))
with self.assertRaises(TypeError):
mapper.is_valid_single_address('foo')
def test_scan_build_files(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
build_files = mapper.scan_build_files('')
self.assertEqual(build_files,
{'BUILD', 'BUILD.other',
'dir_a/BUILD', 'dir_a/BUILD.other',
'dir_b/BUILD', 'dir_a/subdir/BUILD'})
build_files = mapper.scan_build_files('dir_a/subdir')
self.assertEqual(build_files, {'dir_a/subdir/BUILD'})
def test_scan_build_files_edge_cases(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
# A non-existent dir.
build_files = mapper.scan_build_files('foo')
self.assertEqual(build_files, set())
# A dir with no BUILD files.
safe_mkdir(os.path.join(build_root, 'empty'))
build_files = mapper.scan_build_files('empty')
self.assertEqual(build_files, set())
def test_is_declaring_file(self):
scheduler = mock.Mock()
mapper = LegacyAddressMapper(scheduler, '')
self.assertTrue(mapper.is_declaring_file(Address('path', 'name'), 'path/BUILD'))
self.assertTrue(mapper.is_declaring_file(Address('path', 'name'), 'path/BUILD.suffix'))
self.assertFalse(mapper.is_declaring_file(Address('path', 'name'), 'path/not_a_build_file'))
self.assertFalse(mapper.is_declaring_file(Address('path', 'name'), 'differing-path/BUILD'))
self.assertFalse(mapper.is_declaring_file(
BuildFileAddress(target_name='name', rel_path='path/BUILD.new'),
'path/BUILD'))
self.assertTrue(mapper.is_declaring_file(
BuildFileAddress(target_name='name', rel_path='path/BUILD'),
'path/BUILD'))
def test_addresses_in_spec_path(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.addresses_in_spec_path('dir_a')
self.assertEqual(addresses,
{Address('dir_a', 'a'), Address('dir_a', 'b'), Address('dir_a', 'c')})
def test_addresses_in_spec_path_no_dir(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
with self.assertRaises(AddressMapper.BuildFileScanError) as cm:
mapper.addresses_in_spec_path('foo')
self.assertIn('does not match any targets.', str(cm.exception))
def test_addresses_in_spec_path_no_build_files(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
safe_mkdir(os.path.join(build_root, 'foo'))
mapper = self.create_address_mapper(build_root)
with self.assertRaises(AddressMapper.BuildFileScanError) as cm:
mapper.addresses_in_spec_path('foo')
self.assertIn('does not match any targets.', str(cm.exception))
def test_scan_specs(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.scan_specs([SingleAddress('dir_a', 'a'), SiblingAddresses('')])
self.assertEqual(addresses,
{Address('', 'a'), Address('', 'b'), Address('', 'c'), Address('dir_a', 'a')})
def test_scan_specs_bad_spec(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
with self.assertRaises(AddressMapper.BuildFileScanError) as cm:
mapper.scan_specs([SingleAddress('dir_a', 'd')])
self.assertIn('does not match any targets.', str(cm.exception))
def test_scan_addresses(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.scan_addresses()
self.assertEqual(addresses,
{Address('', 'a'), Address('', 'b'), Address('', 'c'),
Address('dir_a', 'a'), Address('dir_a', 'b'), Address('dir_a', 'c'),
Address('dir_b', 'a'), Address('dir_a/subdir', 'a')})
def test_scan_addresses_with_root_specified(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.scan_addresses(os.path.join(build_root, 'dir_a'))
self.assertEqual(addresses,
{Address('dir_a', 'a'), Address('dir_a', 'b'), Address('dir_a', 'c'),
Address('dir_a/subdir', 'a')})
def test_scan_addresses_bad_dir(self):
# scan_addresses() should not raise an error.
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.scan_addresses(os.path.join(build_root, 'foo'))
self.assertEqual(addresses, set())
def test_other_throw_is_fail(self):
# scan_addresses() should raise an error if the scheduler returns an error it can't ignore.
class ThrowReturningScheduler(object):
def execution_request(self, *args):
pass
def execute(self, *args):
return ExecutionResult(None, [(('some-thing', None), Throw(Exception('just an exception')))])
with temporary_dir() as build_root:
mapper = LegacyAddressMapper(ThrowReturningScheduler(), build_root)
with self.assertRaises(LegacyAddressMapper.BuildFileScanError) as cm:
mapper.scan_addresses(os.path.join(build_root, 'foo'))
self.assertIn('just an exception', str(cm.exception))
| 41.044335
| 101
| 0.695151
|
833ddf8678643f8debeff3ea82eac15166922863
| 416
|
py
|
Python
|
data/data.py
|
NonStopEagle137/image2latex
|
6e5ace9357a6a059a0fe9ac853a579c15fd808ff
|
[
"MIT"
] | null | null | null |
data/data.py
|
NonStopEagle137/image2latex
|
6e5ace9357a6a059a0fe9ac853a579c15fd808ff
|
[
"MIT"
] | null | null | null |
data/data.py
|
NonStopEagle137/image2latex
|
6e5ace9357a6a059a0fe9ac853a579c15fd808ff
|
[
"MIT"
] | null | null | null |
import tarfile
#simple function to extract the train data
#tar_file : the path to the .tar file
#path : the path where it will be extracted
def extract(tar_file, path):
opened_tar = tarfile.open(tar_file)
if tarfile.is_tarfile(tar_file):
opened_tar.extractall(path)
else:
print("The tar file you entered is not a tar file")
extract('formula_images.tar.gz','D:\FSDL\im2latex\data')
| 29.714286
| 59
| 0.709135
|
2269531fac8e07d5951561bda4f9e7b862855817
| 2,081
|
py
|
Python
|
FNN_skills/scripts/generate_data.py
|
SMZCC/TF-deep-learn
|
7517685d8b4fb51f1823d4595165538305739fc7
|
[
"MIT"
] | null | null | null |
FNN_skills/scripts/generate_data.py
|
SMZCC/TF-deep-learn
|
7517685d8b4fb51f1823d4595165538305739fc7
|
[
"MIT"
] | null | null | null |
FNN_skills/scripts/generate_data.py
|
SMZCC/TF-deep-learn
|
7517685d8b4fb51f1823d4595165538305739fc7
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# date: 2019/1/14, 16:08
# name: smz
import numpy as np
from FNN_skills.utils.data import gen_data
from FNN_skills.utils.data import gen_one_class_data
def generate_data():
num_classes = 4
num_fields = 2
num_samples = 320
means = np.random.rand(num_fields)
covs = np.eye(num_fields, num_fields)
diffs = [[3.0, 0.], [3.0, 3.0], [0., 3.0]]
X, Y = gen_data(num_classes, num_samples, means, covs, diffs, False)
Y = Y % 2
np.save("../data/train_X.npy", X)
np.save("../data/train_Y.npy", Y)
def generate_four_classes():
num_fields = 2
means = np.random.rand(num_fields)
covs = np.eye(num_fields, num_fields)
num_samples_train = 10000
num_samples_test = 2500
classes = 4
for class_ in range(classes):
if class_ == 0:
X, Y = gen_one_class_data(num_samples_train, means, covs, class_, classes)
test_X, test_Y = gen_one_class_data(num_samples_test, means, covs, class_, classes)
elif class_ == 1:
X, Y = gen_one_class_data(num_samples_train, means + [10., 0.], covs, class_, classes)
test_X, test_Y = gen_one_class_data(num_samples_test, means + [10., 0.], covs, class_, classes)
elif class_ == 2:
X, Y = gen_one_class_data(num_samples_train, means + [10., 10.], covs, class_, classes)
test_X, test_Y = gen_one_class_data(num_samples_test, means + [10., 10.], covs, class_, classes)
elif class_ == 3:
X, Y = gen_one_class_data(num_samples_train, means + [0., 10.], covs, class_, classes)
test_X, test_Y = gen_one_class_data(num_samples_test, means + [0., 10.], covs, class_, classes)
np.save("../data/means.npy", np.asarray(means))
np.save("../data/class_%s_train_X.npy"%(class_), X)
np.save("../data/class_%s_train_Y.npy"%(class_), Y)
np.save("../data/class_%s_test_X.npy"%(class_), test_X)
np.save("../data/class_%s_test_Y.npy"%(class_), test_Y)
if __name__ == "__main__":
# generate_data()
generate_four_classes()
| 34.114754
| 108
| 0.632869
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.