blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
123ca0d1ad25bc147b91176e98d2ebb5003c020d
|
Python
|
vstarman/python_codes
|
/5day/02.process的使用.py
|
UTF-8
| 813
| 3.1875
| 3
|
[] |
no_license
|
import multiprocessing, time
def show(title, name, age):
for i in range(10):
print(i, " %s; %s; %d" % (title, name, age))
time.sleep(0.1)
if __name__ == '__main__':
# 创建进程,参数以元祖,字典传参(注意字典中key名要和参数名一样)
sub_process = multiprocessing.Process(target=show, args=("hello",), kwargs={"name": "Samuel", "age": 25})
# 守护主进程,同生共死
sub_process.daemon = True
# 执行进程
sub_process.start()
# time.sleep(0.5)
# print("over")
time.sleep(0.5)
# sub_process.terminate() # 不管任务是否完成,立即终止子进程
print("over")
exit()
# sub_process.join() # 是否等待子进程执行结束,或等待多少秒
# print("over")
pool.apply_async()
| true
|
a2b5d76b2377cb73e68be6b751891fda59b38cc2
|
Python
|
perplexes/whysaurus
|
/archive/wikiParser.py
|
UTF-8
| 9,591
| 2.859375
| 3
|
[] |
no_license
|
import sys, os, re, cgi, glob, time
class Parser(object):
EOF = 0
def __init__(self, write=None, error=None):
self.text = None
self.pos = 0
if write is None:
write = sys.stdout.write
self.write = write
if error is None:
# example: sys.stderr.write("%s: %s" % (e, msg))
error = lambda e, msg: None
self.error = error
def find(self, tokens):
"""For each token in tokens, if the current position matches one of
those tokens, return True. Return False otherwise."""
for token in tokens:
if token == self.EOF:
if self.pos == len(self.text):
return True
elif self.text[self.pos:].startswith(token):
return True
return False
def eat(self, token):
"""Eat the length of token if token's an int, or the token itself."""
if type(token) is int:
if (self.pos + token) > len(self.text):
self.error("UnexpectedEOF", "Reached end of file")
return None
s = self.text[self.pos:self.pos+token]
self.pos += token
return s
else:
assert self.find([token])
self.pos += len(token)
if self.pos > len(self.text):
self.error("UnexpectedEOF", "Reached end of file")
return None
return token
def get(self, tokens, start=None, finish=None):
if start is not None:
self.eat(start)
content = ''
while not self.find(tokens):
s = self.eat(1)
if s is not None:
content += s
else: return content # reached EOF
if finish is not None:
self.eat(finish)
return content
r_tag = re.compile(r'(?<!\{)\{(?!\{)([^}]+)\}')
r_name = re.compile(r'^[A-Za-z0-9-]+$')
r_uri = re.compile(r'^[A-Za-z][A-Za-z0-9+.-]*:[^<>"]+$')
r_emdash = re.compile(r'[A-Za-z0-9"]--(?=[A-Za-z0-9"{])')
r_alpha = re.compile(r'[A-Za-z]+')
def makeID(s, current):
s = (''.join(r_alpha.findall(s)) or 'id') + str(len(s))
while s in current:
s += 'n'
return s
class TextParser(Parser):
LIST = 0
HEADING = 1
PRE = 2
QUOT = 3
PARAGRAPH = 4
LI_START = '* '
LI_OPEN = '\n* '
PRE_START = '{{{\n'
PRE_END = '\n}}}'
QUOT_START = '[[[\n'
QUOT_END = '\n]]]'
H_START = '@ '
SEPERATOR = '\n\n'
def __init__(self, write=None, error=None, exists=None):
Parser.__init__(self, write=write, error=error)
if exists is None:
exists = lambda: True
self.exists = exists
self.rawlinks = []
self.ids = []
def __call__(self, s):
self.text = s
self.normalize()
self.parse()
def normalize(self):
self.text = self.text.strip() # ('\t\r\n ')
self.text = self.text.replace('\r\n', '\n')
self.text = self.text.replace('\r', '\n')
self.text = re.sub(r'(?sm)\n[ \t]*\n', '\n\n', self.text)
def parse(self):
blocks = []
while 1:
blocks.append(self.blockElement())
if self.find([Parser.EOF]): break
for block in blocks:
blocktype, values = block[0], block[1:]
{self.LIST: self.listElement,
self.HEADING: self.headingElement,
self.PRE: self.preElement,
self.QUOT: self.quotElement,
self.PARAGRAPH: self.paragraphElement
}[blocktype](*values)
def blockElement(self):
self.whitespace()
if self.find([self.LI_START]):
content = self.get([self.SEPERATOR, Parser.EOF], self.LI_START)
content = tuple(content.split('\n* '))
return (self.LIST,) + content
elif self.find([self.H_START]):
content = self.get(['\n', Parser.EOF], self.H_START)
return (self.HEADING, content)
elif self.find([self.PRE_START]):
content = self.get([self.PRE_END], self.PRE_START, self.PRE_END)
return (self.PRE, content)
elif self.find([self.QUOT_START]):
content = self.get([self.QUOT_END], self.QUOT_START, self.QUOT_END)
if self.find([' - ']):
citation = self.get(['\n', Parser.EOF], ' - ')
if not (r_uri.match(citation) and citation):
self.error('CitationURIError', # @@ allow other stuff?
'Citation (%s) must be a URI.' % citation)
else: citation = None
return (self.QUOT, content, citation)
else: return (self.PARAGRAPH, self.get([self.SEPERATOR, Parser.EOF]))
def whitespace(self):
while self.find(' \t\n'):
self.eat(1)
def listElement(self, *items):
self.write('<ul>')
self.write('\n')
for item in items:
self.write('<li>')
self.write(self.wikiParse(item))
self.write('</li>')
self.write('\n')
self.write('</ul>')
self.write('\n')
def headingElement(self, content):
content = self.wikiParse(content)
newid = makeID(content, self.ids)
self.ids.append(newid)
self.write('<h2 id="%s">' % newid)
self.write(content)
self.write('</h2>')
self.write('\n')
def preElement(self, content):
self.write('<pre>')
self.write('\n')
self.write(self.wikiParse(content, level=0))
self.write('\n')
self.write('</pre>')
self.write('\n')
def quotElement(self, content, cite):
self.write('<blockquote')
if cite:
cite = self.iriParse(cite)
cite = cgi.escape(cite, quote=1) # @@
self.write(' cite="%s"' % cite)
self.write('>')
self.write('\n')
self.write('<pre class="quote">') # @@
self.write('\n')
self.write(self.wikiParse(content, level=0))
self.write('\n')
self.write('</pre>')
self.write('\n')
self.write('</blockquote>')
self.write('\n')
def paragraphElement(self, content):
self.write('<p>')
self.write(self.wikiParse(content))
self.write('</p>')
self.write('\n')
def wikiParse(self, s, level=None):
if level is None:
level = 1
# @@ use a proper parser, or catch the matches
pos, result = 0, ''
while pos < len(s):
m = r_tag.match(s[pos:])
if m:
span = m.span()
result += self.tag(s[pos:pos+span[1]], level=level)
pos += span[1] - span[0]
else:
m = r_emdash.match(s[pos:])
if m and (level > 0): # unicode must be explicit in <pre>
result += s[pos] + '—' # u'\u2014'.encode('utf-8')
pos += 3
elif (s[pos] == '{') and (s[pos+1:pos+2] != '{') and (level > 0):
if (s < 10): area = s[0:pos+10]
else: area = s[pos-10:pos+10]
msg = "The '{' must be escaped as '{{' in %r" % area
raise "WikiParseError", msg
elif (s[pos:pos+2] == '{{'): # d8uv bug "and (level > 0): "
result += '{'
pos += 2
elif s[pos] == '&':
result += '&'
pos += 1
elif s[pos] == '<':
result += '<'
pos += 1
else:
result += s[pos]
pos += 1
return result
def iriParse(self, uri):
r_unicode = re.compile(r'\{U\+([1-9A-F][0-9A-F]{1,5})\}')
def escape(m):
bytes = unichr(int(m.group(1), 16)).encode('utf-8')
return ''.join(['%%%02X' % ord(s) for s in bytes])
return r_unicode.sub(escape, uri)
def unicodeify(self, s):
if len(s) not in (2, 4, 6):
raise ValueError, 'Must be of length 2, 4, or 6'
for letter in 'abcdef':
if letter in s:
raise ValueError, 'Unicode escapes must be lower-case'
i = int(s.lstrip('0'), 16)
raw = [0x9, 0xA, 0xD] + list(xrange(0x20, 0x7E))
del raw[raw.index(0x2D)], raw[raw.index(0x5D)], raw[raw.index(0x7D)]
if i in raw: return chr(i) # printable - '-]}'
elif i > 0x10FFFF:
raise ValueError, 'Codepoint is out of range'
return '&#x%s;' % s
def tag(self, s, level=None):
if level is None:
level = 1 # @@ { {U+..}?
s = s[1:-1] # @@ or s.strip('{}')
if s.startswith('U+'):
try: result = self.unicodeify(s[2:])
except ValueError: result = cgi.escape('{%s}' % s)
elif s == '$timenow':
result = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
elif s == '$datenow':
result = time.strftime('%Y-%m-%d', time.gmtime())
elif level < 1:
result = '{' + self.wikiParse('%s}' % s)
elif s.startswith('* '):
result = '<strong>%s</strong>' % s[2:]
elif s.startswith('#'):
i = s.find(' ')
href, title = s[:i], s[i+1:]
result = '<a href="%s">%s</a>' % (href, title)
elif not re.compile(r'[A-Za-z0-9_.-]').match(s):
result = cgi.escape('{%s}' % s)
else:
self.rawlinks.append(s)
words = s.split(' ')
words = [word.strip() for word in words if word.strip()]
if ('/' not in words[0]) and (':' not in words[0]): # @@!
wn = ''.join(words)
uri = './%s' % wn
if not self.exists(wn):
cls = ' class="nonexistent"'
else: cls = ''
else: uri, s, cls = words[0], ' '.join(words[1:]), ''
uri, s = cgi.escape(uri, quote=1), cgi.escape(s)
result = '<a href="%s"%s>%s</a>' % (uri, cls, s)
return result
| true
|
3493e974018bb57ba17c9eb86228e4314b56baa7
|
Python
|
refresh6724/APS
|
/Jungol/Lv1_LCoder_Python/pyb0_리스트2/Main_JO_906_리스트2_자가진단5.py
|
UTF-8
| 505
| 3.546875
| 4
|
[] |
no_license
|
# 10개의 정수를 입력받아 100 미만의 수 중 가장 큰 수와
# 100 이상의 수 중 가장 작은 수를 출력하는 프로그램을 작성하시오.
# (입력되는 정수의 범위는 1 이상 10000 미만이다. 각각의 경우, 만약 해당하는 수가 없을 때에는 100을 출력한다.)
a = list(map(int, input().split()))
lt100 = list(filter(lambda x: x < 100, a))
gte100 = list(filter(lambda x: x>= 100, a))
print(max(lt100) if lt100 else 100, min(gte100) if gte100 else 100)
| true
|
dc347924d3fb737055550098015a41dfde6343c3
|
Python
|
byu-dml/d3m-dynamic-neural-architecture
|
/dna/data.py
|
UTF-8
| 23,907
| 2.84375
| 3
|
[] |
no_license
|
import json
import os
import random
import tarfile
import typing
from collections import defaultdict
import itertools
import numpy as np
import pandas as pd
import torch
import torch.utils.data
from dna.utils import get_values_by_path
def group_json_objects(json_objects: typing.List[typing.Dict], group_key: str) -> dict:
"""
Groups JSON data by group_key.
Parameters:
-----------
json_objects: List[Dict], JSON compatible list of objects.
group_key: str, json_objects is grouped by group_key. group_key must be a
key into each object in json_objects and the corresponding value must
be hashable. group_key can be a '.' delimited string to access deeply
nested fields.
Returns:
--------
A dict with key being a group and the value is a list of indices into
json_objects.
"""
grouped_objects = {}
for i, obj in enumerate(json_objects):
group = obj
for key_part in group_key.split('.'):
group = group[key_part]
if not group in grouped_objects:
grouped_objects[group] = []
grouped_objects[group].append(i)
return grouped_objects
def group_data_using_grouped_indices(
data: typing.List[typing.Dict], grouped_data_indices: dict, groups: list
) -> dict:
"""
Takes `data`, `grouped_data_indices` (a mapping of group names to data indices),
and `groups` (the names of the groups of data we want to keep), and returns a
mapping of group names to the actual data instances (not just the indices), to
make the data more easily useable.
"""
grouped_data = defaultdict(list)
for group in groups:
for i in grouped_data_indices[group]:
grouped_data[group].append(data[i])
return grouped_data
def flatten_grouped_data(
grouped_data: dict,
) -> typing.List[typing.Dict]:
"""
Flattens a mapping of group names to group members to just
a list of members.
"""
# Concatenate all the group lists into one list.
return list(itertools.chain.from_iterable(grouped_data.values()))
def get_coverage(
data: list,
coverage_key: str
) -> set:
"""
Gets a set of all unique values found under `coverage_key`
for all the data within `data`.
"""
coverage_path = coverage_key.split(".")
return set(get_values_by_path(data, coverage_path))
def ensure_coverage(
train_data_grouped: dict,
test_data_grouped: dict,
coverage_key: str,
seed: int
) -> tuple:
"""
Takes the group split `split_data_by_group` has found and ensures that
all unique values of `coverage_key` are found at least once in the training
set. Mutates the train and test sets to ensure coverage.
"""
rng = random.Random()
rng.seed(seed)
train_coverage = get_coverage(flatten_grouped_data(train_data_grouped), coverage_key)
test_coverage = get_coverage(flatten_grouped_data(test_data_grouped), coverage_key)
while len(test_coverage - train_coverage) > 0:
# The test set has unique values for the coverage key that are not
# found inside the training set.
# 1. Find groups in the test set that have primitives not used in
# the training set.
test_data_grouped_not_covered = {
group_name: instances
for group_name, instances
in test_data_grouped.items()
if len(get_coverage(instances, coverage_key) - train_coverage) > 0
}
# 2. Randomly select one of those groups, and randomly select a training
# set group, and swap them.
test_group_name, test_group_instances = rng.choice(list(test_data_grouped_not_covered.items()))
train_group_name, train_group_instances = rng.choice(list(train_data_grouped.items()))
del train_data_grouped[train_group_name]
train_data_grouped[test_group_name] = test_group_instances
del test_data_grouped[test_group_name]
test_data_grouped[train_group_name] = train_group_instances
# 3. Repeat that process until there are no primitives in the test set
# that are not also present in the training set.
train_coverage = get_coverage(flatten_grouped_data(train_data_grouped), coverage_key)
test_coverage = get_coverage(flatten_grouped_data(test_data_grouped), coverage_key)
return train_data_grouped, test_data_grouped
def split_data_by_group(
data: typing.List[typing.Dict],
group_by_key: str,
coverage_key: str,
test_size: typing.Union[int, float],
seed: int,
):
grouped_data_indices = group_json_objects(data, group_by_key)
groups = list(grouped_data_indices.keys())
if 0 < test_size < 1:
test_size = int(round(test_size * len(groups)))
if test_size <= 0 or len(groups) <= test_size:
raise ValueError('invalid test size: {}'.format(test_size))
rng = random.Random()
rng.seed(seed)
rng.shuffle(groups)
train_groups = groups[test_size:]
assert len(train_groups) == len(groups) - test_size
train_data_grouped = group_data_using_grouped_indices(data, grouped_data_indices, train_groups)
test_groups = groups[:test_size]
assert len(test_groups) == test_size
test_data_grouped = group_data_using_grouped_indices(data, grouped_data_indices, test_groups)
train_data_grouped, test_data_grouped = ensure_coverage(
train_data_grouped, test_data_grouped, coverage_key, seed
)
train_data = flatten_grouped_data(train_data_grouped)
test_data = flatten_grouped_data(test_data_grouped)
return train_data, test_data
def _extract_tarfile(path):
assert tarfile.is_tarfile(path)
dirname = os.path.dirname(path)
with tarfile.open(path, 'r:*') as tar:
members = tar.getmembers()
if len(members) != 1:
raise ValueError('Expected tar file with 1 member, but got {}'.format(len(members)))
tar.extractall(os.path.dirname(path))
extracted_path = os.path.join(dirname, tar.getmembers()[0].name)
return extracted_path
def get_data(path):
if tarfile.is_tarfile(path):
path = _extract_tarfile(path)
with open(path, 'r') as f:
data = json.load(f)
return data
class DropMissingValues:
def __init__(self, values_to_drop=[]):
self.values_to_drop = values_to_drop
def fit(
self, data: typing.List[typing.Dict[str, typing.Union[int, float]]]
):
for key, is_missing in pd.DataFrame(data).isna().any().iteritems():
if is_missing:
self.values_to_drop.append(key)
def predict(
self, data: typing.List[typing.Dict[str, typing.Union[int, float]]]
):
for instance in data:
for key in self.values_to_drop:
instance.pop(key, None)
return data
class StandardScaler:
"""
Transforms data by subtracting the mean and scaling by the standard
deviation. Drops columns that have 0 standard deviation. Clips values to
numpy resolution, min, and max.
"""
def __init__(self):
self.means = None
self.stds = None
def fit(
self, data: typing.List[typing.Dict[str, typing.Union[int, float]]]
):
values_map = {}
for instance in data:
for key, value in instance.items():
if key not in values_map:
values_map[key] = []
values_map[key].append(value)
self.means = {}
self.stds = {}
for key, values in values_map.items():
self.means[key] = np.mean(values)
self.stds[key] = np.std(values, ddof=1)
def predict(
self, data: typing.List[typing.Dict[str, typing.Union[int, float]]]
):
if self.means is None or self.stds is None:
raise Exception('StandardScaler not fit')
transformed_data = []
for instance in data:
transformed_instance = {}
for key, value in instance.items():
if self.stds[key] != 0: # drop columns with 0 std dev
transformed_instance[key] = (value - self.means[key]) / self.stds[key]
transformed_data.append(transformed_instance)
return transformed_data
def encode_dag(dag: typing.Sequence[typing.Sequence[typing.Any]]):
"""
Converts a directed acyclic graph DAG) to a string. If two DAGs have the same encoding string, then they are equal.
However, two isomorphic DAGs may have different encoding strings.
Parameters
----------
dag: typing.List[typing.List[typing.Any]]
A representation of a dag. Each element in the outer list represents a vertex. Each inner list or vertex
contains a reference to the outer list, representing edges.
"""
return ''.join(''.join(str(edge) for edge in vertex) for vertex in dag)
def filter_metafeatures(metafeatures: dict, metafeature_subset: str):
landmarker_key_part1 = 'ErrRate'
landmarker_key_part2 = 'Kappa'
metafeature_keys = list(metafeatures.keys())
if metafeature_subset == 'landmarkers':
for metafeature_key in metafeature_keys:
if landmarker_key_part1 not in metafeature_key and landmarker_key_part2 not in metafeature_key:
metafeatures.pop(metafeature_key)
elif metafeature_subset == 'non-landmarkers':
for metafeature_key in metafeature_keys:
if landmarker_key_part1 in metafeature_key or landmarker_key_part2 in metafeature_key:
metafeatures.pop(metafeature_key)
return metafeatures
def preprocess_data(train_data, test_data, metafeature_subset: str):
for instance in train_data:
instance['pipeline_id'] = instance['pipeline']['id']
for instance in test_data:
instance['pipeline_id'] = instance['pipeline']['id']
train_metafeatures = []
for instance in train_data:
metafeatures = filter_metafeatures(instance['metafeatures'], metafeature_subset)
train_metafeatures.append(metafeatures)
for step in instance['pipeline']['steps']:
step['name'] = step['name'].replace('.', '_')
test_metafeatures = []
for instance in test_data:
metafeatures = filter_metafeatures(instance['metafeatures'], metafeature_subset)
test_metafeatures.append(metafeatures)
for step in instance['pipeline']['steps']:
step['name'] = step['name'].replace('.', '_')
# drop metafeature if missing for any instance
dropper = DropMissingValues()
dropper.fit(train_metafeatures)
train_metafeatures = dropper.predict(train_metafeatures)
test_metafeatures = dropper.predict(test_metafeatures)
# scale data to unit mean and unit standard deviation
scaler = StandardScaler()
scaler.fit(train_metafeatures)
train_metafeatures = scaler.predict(train_metafeatures)
test_metafeatures = scaler.predict(test_metafeatures)
# convert from dict to list
for instance, mf_instance in zip(train_data, train_metafeatures):
instance['metafeatures'] = [value for key, value in sorted(mf_instance.items())]
pipeline_dag = (step['inputs'] for step in instance['pipeline']['steps'])
instance['pipeline_structure'] = encode_dag(pipeline_dag)
for instance, mf_instance in zip(test_data, test_metafeatures):
instance['metafeatures'] = [value for key, value in sorted(mf_instance.items())]
pipeline_dag = (step['inputs'] for step in instance['pipeline']['steps'])
instance['pipeline_structure'] = encode_dag(pipeline_dag)
return train_data, test_data
class Dataset(torch.utils.data.Dataset):
"""
A subclass of torch.utils.data.Dataset for handling simple JSON structed
data.
Parameters:
-----------
data: List[Dict], JSON structed data.
features_key: str, the key into each element of data whose value is a list
of features used for input to a PyTorch network.
target_key: str, the key into each element of data whose value is the
target used for a PyTorch network.
device": str, the device onto which the data will be loaded
"""
def __init__(
self, data: typing.List[typing.Dict], features_key: str,
target_key: str, y_dtype: typing.Any, device: str
):
self.data = data
self.features_key = features_key
self.target_key = target_key
self.y_dtype = y_dtype
self.device = device
def __getitem__(self, item: int):
x = torch.tensor(self.data[item][self.features_key], dtype=torch.float32, device=self.device)
y = torch.tensor(self.data[item][self.target_key], dtype=self.y_dtype, device=self.device)
return x, y
def __len__(self):
return len(self.data)
class RandomSampler(torch.utils.data.Sampler):
"""
Samples indices uniformly without replacement.
Parameters
----------
n: int
the number of indices to sample
seed: int
used to reproduce randomization
"""
def __init__(self, n, seed):
self.n = n
self._indices = list(range(n))
self._random = random.Random()
self._random.seed(seed)
def __iter__(self):
self._random.shuffle(self._indices)
return iter(self._indices)
def __len__(self):
return self.n
class GroupDataLoader(object):
"""
Batches a dataset for PyTorch Neural Network training. Partitions the
dataset so that batches belong to the same group.
Parameters:
-----------
data: List[Dict], JSON compatible list of objects representing a dataset.
dataset_class must know how to parse the data given dataset_params.
group_key: str, pipeline run data is grouped by group_key and each
batch of data comes from only one group. group_key must be a key into
each element of the pipeline run data. the value of group_key must be
hashable.
dataset_class: Type[torch.utils.data.Dataset], the class used to make
dataset instances after the dataset is partitioned.
dataset_params: dict, extra parameters needed to instantiate dataset_class
batch_size: int, the number of data points in each batch
drop_last: bool, default False. whether to drop the last incomplete batch.
shuffle: bool, default True. whether to randomize the batches.
"""
def __init__(
self, data: typing.List[typing.Dict], group_key: str,
dataset_class: typing.Type[torch.utils.data.Dataset], dataset_params: dict,
batch_size: int, drop_last: bool, shuffle: bool, seed: int
):
self.data = data
self.group_key = group_key
self.dataset_class = dataset_class
self.dataset_params = dataset_params
self.batch_size = batch_size
self.drop_last = drop_last
self.shuffle = shuffle
self.seed = seed
self._random = random.Random()
self._random.seed(seed)
self.old_indices = []
self._init_dataloaders()
self._init_group_metadataloader()
def _init_dataloaders(self):
"""
Groups self.data based on group_key. Creates a
torch.utils.data.DataLoader for each group, using self.dataset_class.
"""
# group the data
grouped_data = group_json_objects(self.data, self.group_key)
# create dataloaders
self._group_dataloaders = {}
for group, group_indices in grouped_data.items():
self.old_indices += group_indices
group_data = [self.data[i] for i in group_indices]
group_dataset = self.dataset_class(group_data, **self.dataset_params)
new_dataloader = self._get_data_loader(
group_dataset
)
self._group_dataloaders[group] = new_dataloader
def _get_data_loader(self, data):
if self.shuffle:
sampler = RandomSampler(len(data), self._randint())
else:
sampler = None
dataloader = torch.utils.data.DataLoader(
dataset = data,
sampler = sampler,
batch_size = self.batch_size,
drop_last = self.drop_last
)
return dataloader
def _randint(self):
return self._random.randint(0,2**32-1)
def _init_group_metadataloader(self):
"""
Creates a dataloader which randomizes the batches over the groups. This
allows the order of the batches to be independent of the groups.
"""
self._group_batches = []
for group, group_dataloader in self._group_dataloaders.items():
self._group_batches += [group] * len(group_dataloader)
if self.shuffle:
self._random.shuffle(self._group_batches)
def get_group_ordering(self):
"""
Returns the indices needed to invert the ordering on the input data generated by the grouping mechanism. This
method does not work if shuffle or drop last has been set to true.
"""
if self.shuffle or self.drop_last:
raise NotImplementedError('cannot ungroup data when shuffle is true or drop_last is true')
return np.argsort(np.array(self.old_indices))
def __iter__(self):
return iter(self._iter())
def _iter(self):
group_dataloader_iters = {}
for group in self._group_batches:
if not group in group_dataloader_iters:
group_dataloader_iters[group] = iter(
self._group_dataloaders[group]
)
x_batch, y_batch = next(group_dataloader_iters[group])
# since all pipeline are the same in this group, just grab one of them
pipeline = self._group_dataloaders[group].dataset.data[0]["pipeline"]
yield (group, pipeline, x_batch), y_batch
raise StopIteration()
def __len__(self):
return len(self._group_batches)
class PMFDataLoader(object):
def __init__(
self, data, n_x, n_y, pipeline_encoder, dataset_encoder, pipeline_id_mapper, dataset_id_mapper, device="cuda:0"
):
# assign functions for mapping
self.pipeline_id_mapper = pipeline_id_mapper
self.dataset_id_mapper = dataset_id_mapper
self.encode_pipeline = pipeline_encoder
self.dataset_encoder = dataset_encoder
# encode the pipeline dataset mapping
x_data = self.encode_pipeline_dataset(data)
y_data = [instance["test_f1_macro"] for instance in data]
# Build the matrix using the x and y data
self.matrix = torch.zeros([n_x, n_y], device=device)
for index, value in enumerate(y_data):
self.matrix[x_data[index]["pipeline_id_embedding"]][x_data[index]["dataset_id_embedding"]] = value
self.used = False
self.n = len(y_data)
def __len__(self):
return 1
def __iter__(self):
# only return one object: the matrix
if not self.used:
yield(None, self.matrix)
raise StopIteration()
def encode_pipeline_dataset(self, data):
"""
Creates the embeddings for the dataset
"""
try:
x_data = []
for instance in data:
x_data.append({"pipeline_id_embedding": self.encode_pipeline(instance["pipeline"]["id"]),
"dataset_id_embedding": self.dataset_id_mapper[instance["dataset_id"]]})
return x_data
except KeyError as e:
raise KeyError("Pipeline/Dataset ID was not in the mapper. Perhaps the pipeline/dataset id was not in the training set? Error: {}".format(e))
def get_predictions_from_matrix(self, x_data, matrix):
predictions = []
for index, item in enumerate(x_data):
predict_value = matrix[self.encode_pipeline(item["pipeline_id"])][self.dataset_id_mapper[item["dataset_id"]]].item()
predictions.append(predict_value)
return predictions
class PMFDataset(Dataset):
# needed to encode the pipelines and datasets for the embeded layers. Used with GroupDataLoader.
def __init__(
self, data: typing.List[typing.Dict], features_key: str,
target_key: str, y_dtype: typing.Any, device: str, encoding_function
):
super().__init__(
data, features_key, target_key, y_dtype, device
)
self.dataset_encoding_function = encoding_function
def __getitem__(self, item: int):
x = self.dataset_encoding_function(self.data[item][self.features_key]).to(self.device)
y = torch.tensor(self.data[item][self.target_key], dtype=self.y_dtype, device=self.device)
return x, y
class RNNDataset(Dataset):
def __init__(self, data: dict, features_key: str, target_key: str, y_dtype, device: str):
super().__init__(data, features_key, target_key, y_dtype, device)
self.pipeline_key = 'pipeline'
self.steps_key = 'steps'
def __getitem__(self, index):
(x, y) = super().__getitem__(index)
item = self.data[index]
encoded_pipeline = torch.tensor(
item[self.pipeline_key][self.steps_key], dtype=torch.float32, device=self.device
)
return (encoded_pipeline, x, y)
class RNNDataLoader(GroupDataLoader):
def __init__(
self, data: dict, group_key: str, dataset_params: dict, batch_size: int, drop_last: bool, shuffle: bool,
seed: int, primitive_to_enc: dict, pipeline_key: str, steps_key: str, prim_name_key: str,
pipeline_structures: dict = None
):
super().__init__(data, group_key, RNNDataset, dataset_params, batch_size, drop_last, shuffle, seed)
self.pipeline_structures = pipeline_structures
if not self._pipelines_encoded(data, pipeline_key, steps_key):
self._encode_pipelines(data, primitive_to_enc, pipeline_key, steps_key, prim_name_key)
def _encode_pipelines(self, data, primitive_name_to_enc, pipeline_key, steps_key, prim_name_key):
for instance in data:
pipeline = instance[pipeline_key][steps_key]
encoded_pipeline = self._encode_pipeline(pipeline, primitive_name_to_enc, prim_name_key)
instance[pipeline_key][steps_key] = encoded_pipeline
@staticmethod
def _encode_pipeline(pipeline, primitive_to_enc, prim_name_key):
# Create a tensor of encoded primitives
encoding = []
for primitive in pipeline:
primitive_name = primitive[prim_name_key]
try:
encoded_primitive = primitive_to_enc[primitive_name]
except():
raise KeyError('A primitive in this data set is not in the primitive encoding')
encoding.append(encoded_primitive)
return encoding
@staticmethod
def _pipelines_encoded(data, pipeline_key, steps_key):
primitive_in_pipeline = data[0][pipeline_key][steps_key][0]
return type(primitive_in_pipeline) == np.ndarray
def _iter(self):
group_dataloader_iters = {}
for group in self._group_batches:
if not group in group_dataloader_iters:
group_dataloader_iters[group] = iter(self._group_dataloaders[group])
# Get a batch of encoded pipelines, metafeatures, and targets
(pipeline_batch, x_batch, y_batch) = next(group_dataloader_iters[group])
if self.pipeline_structures is not None:
# Get the structure of the pipelines in this group so the RNN can parse the pipeline
group_structure = self.pipeline_structures[group]
yield ((group_structure, pipeline_batch, x_batch), y_batch)
else:
# Don't return a pipeline structure and the RNN will have to treat it like a straight pipeline
yield((pipeline_batch, x_batch), y_batch)
raise StopIteration()
| true
|
5e745c581185ba0debf88046f3f3ef06748eaec9
|
Python
|
Donnyvdm/dojo19
|
/team_9/cocos/test/test_multiplex_layer.py
|
UTF-8
| 1,816
| 2.546875
| 3
|
[
"LGPL-2.1-only",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-SA-2.0",
"BSD-3-Clause"
] |
permissive
|
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 1.1, s, q"
tags = "MultiplexLayer"
autotest = 0
import pyglet
from cocos.director import *
from cocos.menu import *
from cocos.scene import *
from cocos.layer import *
from cocos.actions import Delay, CallFunc
class MainMenu(Menu):
def __init__( self ):
# call superclass with the title
super( MainMenu, self ).__init__("MultiplexLayer")
l = []
l.append( MenuItem('Options', self.on_new_game ) )
l.append( MenuItem('Quit', self.on_quit ) )
self.create_menu( l )
if autotest:
self.do( Delay(1) + CallFunc(self.on_new_game))
# Callbacks
def on_new_game( self ):
self.parent.switch_to( 1 )
def on_quit( self ):
pyglet.app.exit()
class OptionMenu(Menu):
def __init__( self ):
super( OptionMenu, self ).__init__("MultiplexLayer")
l = []
l.append( MenuItem('Fullscreen', self.on_fullscreen) )
l.append( MenuItem('OK', self.on_quit) )
self.create_menu( l )
# Callbacks
def on_fullscreen( self ):
pass
def on_quit( self ):
self.parent.switch_to( 0 )
description = """
Demostrates MultiplexLayer, a layer which can hold many layers, showing
one of them at a time and handling navigation between layers.
Activate 'Options' to switch to the 'options' layer.
"""
def main():
print(description)
director.init( resizable=True)
scene =Scene(
MultiplexLayer( MainMenu(), OptionMenu() )
)
director.run( scene )
if __name__ == '__main__':
main()
| true
|
6d6c2da12c2ade11b00087f0ac32c145e1a11468
|
Python
|
sportwang/convex-optimization
|
/l1-hw-王协盼-1601214718/代码/l1_cvx_mosek.py
|
UTF-8
| 645
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 25 04:42:26 2016
@author: hadoop
"""
from datetime import datetime
import numpy as np
from scipy.sparse import random
import cvxpy as cvx
def l1_cvx_mosek(n,A,b,mu) :
x = cvx.Variable(n,1)
exp = 0.5* cvx.square(cvx.norm((A*x-b),2)) + mu * cvx.norm(x,1)
obj = cvx.Minimize(exp)
pro = cvx.Problem(obj)
start_time = datetime.now()
pro.solve(solver='MOSEK')
end_time = datetime.now()
print"used time :",(end_time-start_time).seconds
print"prob status :" ,pro.status
print"optimal value :", pro.value
#print "optimal var :" ,x.value
| true
|
9235dbb19af34c1b3890fbd0aaad14bea407f24e
|
Python
|
minhthe/practice-algorithms-and-data-structures
|
/Dequeue SlidingWindow 2Pointers/3sumSmaller.py
|
UTF-8
| 786
| 3.578125
| 4
|
[] |
no_license
|
'''
https://www.lintcode.com/problem/3sum-smaller/description
'''
class Solution:
"""
@param nums: an array of n integers
@param target: a target
@return: the number of index triplets satisfy the condition nums[i] + nums[j] + nums[k] < target
"""
def threeSumSmaller(self, nums, target):
# Write your code here
n = len(nums)
cnt = 0
nums.sort()
for i in range(2, n):
left , right = 0, i - 1
while(left < right) :
if nums[left] + nums[right] + nums[i] < target:
cnt += right - left
left += 1
else:
right -= 1
return cnt
| true
|
bd52c6b81e0e169541abe676cb2a8ea939d229e3
|
Python
|
JonathanRaiman/rsm
|
/daichi_rsm/utils/utils.py
|
UTF-8
| 273
| 2.984375
| 3
|
[] |
no_license
|
def convert_lexicon_file_to_lexicon(path):
reverse_lexicon = []
lexicon = {}
with open(path, 'r') as lexicon_file:
for index, line in enumerate(lexicon_file):
reverse_lexicon.append(line.rstrip())
lexicon[line.rstrip()] = index
return (lexicon, reverse_lexicon)
| true
|
5c4af949931245aa1659dfee8649c7e8d5c521ee
|
Python
|
aakarshgupta97/106B-Research-Project
|
/src/Particle.py
|
UTF-8
| 2,532
| 3.265625
| 3
|
[] |
no_license
|
__author__ = 'Aakarsh Gupta'
from graphics import Circle
from graphics import Point
from random import Random
import numpy as np
class Particle:
def __init__(self, window, p=Point(0, 0), isSheep=False):
self.particle = None
self.drawn = False
self.color = "RED"
self.position = p
self.x = p.getX()
self.y = p.getY()
self.isSheep = isSheep
self.K = 0.5
self.size = 3
self.dX = 0
self.dY = 0
self.win = window
self.particleTurnCount = 0
def setCoord(self, x, y):
self.x = x
self.y = y
def setColor(self, color):
self.color = color
if self.particle:
self.particle.setFill(color)
def setSize(self, size):
self.size = size
if self.drawn:
self.undraw()
self.draw()
def draw(self):
self.particle = Circle(Point(self.x, self.y), self.size)
self.particle.setFill(self.color)
self.particle.draw(self.win)
self.drawn = True
def undraw(self):
self.particle.undraw()
self.drawn = False
def setParticleMovement(self, dogs):
if dogs is None:
r = Random()
self.dX = 0.5 * r.randrange(-2.0, 2.0)
self.dY = 0.5 * r.randrange(-2.0, 2.0)
else:
vel = np.zeros(2)
sheep_pos = np.array([self.position.getX(), self.position.getY()])
for dog in dogs:
xy = np.array([dog.position.getX(), dog.position.getY()])
vel += ((sheep_pos - xy) * 1e5 / np.linalg.norm(sheep_pos - xy) ** 3)
vel *= self.K
# print(np.linalg.norm(vel))
if np.linalg.norm(vel) <= 0.015:
vel = np.array([0.0, 0.0])
self.dX = vel[0]
self.dY = vel[1]
self.move()
def potential(self, dogs):
vel = 0.0
sheep_pos = np.array([self.position.getX(), self.position.getY()])
for dog in dogs:
xy = np.array([dog.position.getX(), dog.position.getY()])
vel += (1.0/np.linalg.norm(sheep_pos-xy)**2)
return self.K * vel
def move_with_vel(self, vel_x, vel_y):
self.dX = vel_x
self.dY = vel_y
self.move()
def move(self):
self.particle.move(self.dX, self.dY)
self.particle.undraw()
self.position = Point(self.position.getX()+self.dX, self.position.getY()+self.dY)
self.particle.draw(self.win)
| true
|
0607760b08b094bfe0c05824bd4888025b3de0e1
|
Python
|
Yuuki-Yoda/Normal-Distribution
|
/Log-normal.py
|
UTF-8
| 1,556
| 3.40625
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
# 定义标准正态
def cdf(x):
return norm.cdf(x, loc=0, scale=1)
def pdf(x):
return norm.pdf(x, loc=0, scale=1)
# 绘图函数(μ*float, σ*float)
def lognormal(mu, sig):
if sig <= 0:
print("Scale Error")
return
if sig < 1.5:
x = np.arange(0.0000001, np.exp(mu + 2 * sig), np.exp(mu + 2 * sig) / 1000)
else:
x = np.arange(0.0000001, np.exp(mu) + 3, (np.exp(mu) + 3) / 1000) # 范围,默认1000点,根据sig范围划分不同作图区域
y1 = norm.pdf(np.log(x), loc=mu, scale=sig) / x # 作出4个值的序列
y2 = []
for t in x:
y_2 = cdf((np.log(t) - mu) / sig)
y2.append(y_2)
y3 = []
for t in x:
y_3 = (1 - cdf((np.log(t) - mu) / sig))
y3.append(y_3)
y4 = []
for t in x:
y_4 = pdf((np.log(t) - mu) / sig) / t / (sig * (1 - cdf((np.log(t) - mu) / sig)))
y4.append(y_4)
plt.figure(figsize=(10, 8)) # 绘图
plt.subplot(2, 2, 1)
plt.plot(x, y1, label="PDF", color='b')
plt.title("PDF")
plt.subplot(2, 2, 2)
plt.plot(x, y2, label="CDF", color='g')
plt.title("CDF")
plt.subplot(2, 2, 3)
plt.plot(x, y3, label="R", color='y')
plt.title("R")
plt.subplot(2, 2, 4)
plt.plot(x, y4, label="λ", color='r')
plt.title("λ")
plt.suptitle("Log-normal Distribution")
plt.show()
return
# 实例
sig = float(input("sigma:"))
mu = float(input("mu:"))
lognormal(mu, sig)
| true
|
38bf0277ddb4ef56e3671ec30ac72ee7cd0ae35e
|
Python
|
aimendez/Udemy_Certification_Dashboards_Plotly
|
/Excercise_Solutions/Ex_5.py
|
UTF-8
| 1,897
| 3.5625
| 4
|
[] |
no_license
|
#######################################################################################
# EXCERCISE 5: Boxplots Excercise
# Udemy Online Course: "Python Visualization Dashboards with Plotly's Dash Library"
# https://www.udemy.com/course/interactive-python-dashboards-with-plotly-and-dash/
# @Author: AIMendez
# Created on 16-06-2020
#######################################################################################
# Objective: Make a DataFrame using the Abalone dataset (../data/abalone.csv).
# Take two independent random samples of different sizes from the 'rings' field.
# HINT: np.random.choice(df['rings'],10,replace=False) takes 10 random values
# Use box plots to show that the samples do derive from the same population.
#######################################################################################
# Perform imports here:
import pandas as pd
import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
import os
cwd = os.getcwd()
# create a DataFrame from the .csv file:
df = pd.read_csv( cwd+'/data/abalone.csv')
print(df.head())
# take two random samples of different sizes:
sample_0 = np.random.choice(df['rings'],10,replace=False)
sample_1 = np.random.choice(df['rings'],10,replace=False)
# create a data variable with two Box plots:
fig = go.Figure()
trace0 = go.Box(
y = sample_0,
boxpoints = 'all', #'outliers' as second option
jitter = 0.1, #how spread the points are
pointpos = 0 #off set of points respect to box
)
trace1 = go.Box(
y = sample_1,
boxpoints = 'all',
jitter = 0.3,
pointpos = 0
)
fig.add_traces([trace0, trace1])
# add a layout
layout = go.Layout(
title = 'Excercise 5 Solution',
xaxis_title = 'rings',
)
fig.update_layout(layout)
# create a fig from data & layout, and plot the fig
pyo.plot(fig, filename='Ex_5.html')
| true
|
5fc1339beac0107c891e87b82435911044225ae4
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p02779/s996292507.py
|
UTF-8
| 139
| 2.6875
| 3
|
[] |
no_license
|
import sys
input = sys.stdin.readline
n, s = int(input()), list(map(int, input().split()))
print('YES' if len(set(s)) == len(s) else 'NO')
| true
|
0d6d54d74407071e939b7f4ff2b97713c5cd710d
|
Python
|
aewens/database
|
/aes.py
|
UTF-8
| 941
| 2.671875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
from os import urandom
from hmac import new as HMAC
from hashlib import sha256
from base64 import urlsafe_b64encode as ub64e, urlsafe_b64decode as ub64d
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
def encrypt(message, key=None, nonce=None):
if key is None:
key = AESGCM.generate_key(bit_length=256)
if nonce is None:
nonce = urandom(12)
data = message.encode()
mac = HMAC(key, data + nonce, digestmod=sha256).digest()
aesgcm = AESGCM(key)
encrypted = aesgcm.encrypt(nonce, data, mac)
splitter = "%%%".encode()
return key, splitter.join([nonce, encrypted, mac])
def decrypt(key, source):
splitter = "%%%".encode()
nonce, encrypted, mac = source.split(splitter)
aesgcm = AESGCM(key)
data = aesgcm.decrypt(nonce, encrypted, mac)
verify = HMAC(key, data + nonce, digestmod=sha256).digest()
if mac != verify:
return None
return data.decode()
| true
|
a29d1fabb9803d1747d55b7e0608d7acbdd12aa5
|
Python
|
JamesMensah/blablamower
|
/Mower.py
|
UTF-8
| 2,275
| 3.375
| 3
|
[] |
no_license
|
import logging
from Exceptions import WrongFormatInputFileException
class Mower(object):
def __init__(self, x=int, y=int, orientation=str):
self.x = x
self.y = y
self.orientation = orientation
def __str__(self):
return "Mower(" + str(self.x) + ";" + str(self.y) + ";" + self.orientation + ")"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if self.x == other.x and self.y == other.y and self.orientation == other.orientation:
return True
else:
return False
def __copy__(self):
return type(self)(self.x, self.y, self.orientation)
def move(self, instruction=None):
if instruction == "F":
if self.orientation == "N":
self.y = self.y + 1
elif self.orientation == "S":
self.y = self.y - 1
elif self.orientation == "E":
self.x = self.x + 1
elif self.orientation == "W":
self.x = self.x - 1
else:
logging.error("This orientation is not valid")
raise WrongFormatInputFileException
elif instruction == "L":
if self.orientation == "N":
self.orientation = "W"
elif self.orientation == "S":
self.orientation = "E"
elif self.orientation == "W":
self.orientation = "S"
elif self.orientation == "E":
self.orientation = "N"
else:
logging.error("This orientation is not valid")
raise WrongFormatInputFileException
elif instruction == "R":
if self.orientation == "N":
self.orientation = "E"
elif self.orientation == "S":
self.orientation = "W"
elif self.orientation == "W":
self.orientation = "N"
elif self.orientation == "E":
self.orientation = "S"
else:
logging.error("This orientation is not valid")
raise WrongFormatInputFileException
else:
logging.error("This instruction is not valid")
raise WrongFormatInputFileException
| true
|
a53e3f5fe807968bf041f0b5c0a075ab9dce38b4
|
Python
|
MrHamdulay/csc3-capstone
|
/examples/data/Assignment_8/llyjam001/question3.py
|
UTF-8
| 960
| 4.375
| 4
|
[] |
no_license
|
"""Assignment 8 Question 3
James Lloyd
4 May 2014"""
#Retrieving message
message = input ("Enter a message:\n")
def encrypt (message):
"""Function to shift letters by plus one"""
#Setting the base case
if message == '':
return ''
#Changing the first character to code the adding to the code of the rest of the string.
else:
#Setting to keep spaces
if message [0] == ' ':
return ' ' + encrypt (message [1:])
#setting z to a
elif message [0] == "z":
return "a" + encrypt (message [1:])
else:
if 97 <= ord (message [0]) <= 121:
ASCII = (ord (message [0])) + 1
code = chr (ASCII)
return code + encrypt (message [1:])
else:
return message [0] + encrypt (message [1:])
#Printing encrypted message
print ("Encrypted message:")
print (encrypt (message))
| true
|
e1dd6941e01fac1529084f539f25ddd0eb88e8df
|
Python
|
Nenphys/SofiControl
|
/connection_db.py
|
UTF-8
| 6,206
| 2.53125
| 3
|
[] |
no_license
|
__author__ = 'Chapo'
import MySQLdb
import serial
import time
import sys
import bitState
messageToSend = ''
respuesta = ''
data_toPrint = ''
cmd =''
def SendCommand(cmd_cfg):
global tenSec, Rx, messageToSend, errorCounter, respuesta,data_toPrint,cmd
data_toPrint = ""
print("TxST: SendCommand Thread Running ...")
port.flushOutput()
command = cmd_cfg
Rx = True
data_toPrint = command[:-1]
print("[{}]TxST: Tx Data->[{}]".format(time.clock(), data_toPrint))
port.write(command)
while Rx:
try:
MessageFromSerial = port.readline()
# Remove last 3 chars (CR LF)
data_toPrint = MessageFromSerial[:-2]
if data_toPrint[3] != 'X':
respuesta = data_toPrint
print("[{}]RxST: Rx Data->[{}]".format(time.clock(), data_toPrint))
cmd ==""
Rx = False
except serial.SerialException as e:
print("Error: ...{0}".format(e))
respuesta = "Error de Comunicacion"
Rx = False
except IndexError as i:
print("Error: ...{0}".format(i))
respuesta = "Error de Comunicacion"
Rx = False
except TimeoutError as t:
print("Error: ...{0}".format(t))
respuesta = "Error de Comunicacion"
Rx = False
global port
port = serial.Serial("/dev/ttyAMA0", baudrate=19200, timeout=1)
print("Python-MySQL connection: OK")
while True:
try:
db = MySQLdb.connect(host="localhost", user="admin", passwd="petrolog", db="sofi")
cursor = db.cursor(MySQLdb.cursors.DictCursor)
time.sleep(.5)
cursor.execute("SELECT * FROM Eventos")
rowEvent = cursor.fetchone()
tx_db = rowEvent["tx"]
refresh_db= rowEvent ["refresh"]
if refresh_db == 1:
#traemos SP
cmd = "01S?2\x0D"
SendCommand(cmd)
if respuesta !="Error de Comunicacion":
resp_s2 = respuesta
print (resp_s2[5:9])
print (resp_s2[9:13])
print (resp_s2[41:45])
print (resp_s2[37:41])
cursor.execute("UPDATE Eventos SET dia_de_paro={0}, hora_paro={1}, dia_de_arranque={2}, hora_arranque={3}, refresh=0".format(resp_s2[9:13],resp_s2[41:45],resp_s2[5:9],resp_s2[37:41]))
db.commit()
respuesta = ""
#traemos el estado del motor
cmd= "01E\x0D"
SendCommand(cmd)
cmd_e = respuesta
if respuesta !="Error de Comunicacion":
print (cmd_e[18:20])
print(bitState.getBitState(cmd_e[18:20],7))
if bitState.getBitState(cmd_e[18:20],7) == "true":
print ("prendido")
cursor.execute("UPDATE Estado SET estado=1")
db.commit()
respuesta = ""
else:
print ("Apagado")
cursor.execute("UPDATE Estado SET estado=0")
db.commit()
respuesta = ""
if tx_db == 1:
id_evento = rowEvent["id_evento"]
dia_de_paro = rowEvent["dia_de_paro"]
cmd = "01SX2000{0}\x0D".format(dia_de_paro)
SendCommand(cmd)
hora_paro = str(rowEvent["hora_paro"])
print("HORA STRING {0}".format(hora_paro))
if len(hora_paro) > 3:
cmd = "01SXA{0}\x0D".format(hora_paro)
SendCommand(cmd)
elif len(hora_paro) == 1:
cmd = "01SXA000{0}\x0D".format(hora_paro)
SendCommand(cmd)
elif len(hora_paro) == 2:
cmd = "01SXA00{0}\x0D".format(hora_paro)
SendCommand(cmd)
else:
cmd = "01SXA0{0}\x0D".format(hora_paro)
SendCommand(cmd)
dia_de_arranque = rowEvent["dia_de_arranque"]
cmd = "01SX1000{0}\x0D".format(dia_de_arranque)
SendCommand(cmd)
hora_arranque = str(rowEvent["hora_arranque"])
print("HORA STRING {0}".format(hora_arranque))
if len(hora_arranque) > 3:
cmd = "01SX9{0}\x0D".format(hora_arranque)
SendCommand(cmd)
elif len(hora_arranque) == 1:
cmd = "01SX9000{}\x0D".format(hora_arranque)
SendCommand(cmd)
elif len(hora_arranque) == 2:
cmd = "01SX900{0}\x0D".format(hora_arranque)
SendCommand(cmd)
else:
cmd = "01SX90{0}\x0D".format(hora_arranque)
SendCommand(cmd)
if respuesta != "Error de Comunicacion":
cursor.execute("UPDATE Eventos SET tx=0 WHERE id_evento={0}".format(id_evento))
db.commit()
print(
"Eventos Guardados: DiaP: {0} Hora: {1} DiaE {2} HoraE: {3}".format(dia_de_paro, hora_paro, dia_de_arranque,
hora_arranque))
else:
cursor.execute("SELECT * FROM Comandos ")
rowCmd = cursor.fetchone()
print("Sin Eventos Nuevos")
try:
resp_cmd = rowCmd['resp_cmd']
except TypeError:
print ("Tabla de Comandos Vacia")
else:
print(resp_cmd)
if resp_cmd == '0':
cmd_enviar = rowCmd['cmd']
cmd = "{0}\x0D".format(cmd_enviar)
SendCommand(cmd)
cursor.execute("UPDATE Comandos SET resp_cmd=\"{0}\"".format(respuesta))
db.commit()
cursor.execute("SELECT * FROM Comandos ")
temp= cursor.fetchone()
print ("YA CALLESE!!!!!!!!!!!!!!!! {0}".format(temp['resp_cmd']))
respuesta = ""
cursor.close()
db.close()
except:
print ("Unexpected error:", sys.exc_info()[0])
break
print("Error Cerrando BD")
cursor.close()
db.close()
| true
|
11649412a5ecaed2e15d3bd5bfd6f206de1cd5f5
|
Python
|
zeusone/machine
|
/pro/test.py
|
UTF-8
| 564
| 3.40625
| 3
|
[] |
no_license
|
#i=1
#print (type(i))
#from random import randrange
#num = randrange(1,9);
#print (num)
#a = input('X : ')
#if int(a) > 0:
# print (a)
#else:
# print (-int(a))
#for x in range(1, 11):
# print (x)
#for ind in 'python':
# if ind == 'h':
# continue
# else:
# print (ind)
tuple_1 = ('shanghai', 'beijing')
tuple_2 = ('nanjing', 'wuhan', 'xiamen')
#tuple_3 = (tuple_1, tuple_2)
#print (tuple_3[1][2])
#a, b, c = tuple_2
#print (a, b)
def addFunc(x, y):
return x + y
x = int(input())
y = int(input())
print (addFunc(x, y))
| true
|
851efce6e752533d4b784ca7e2fb86555b25de27
|
Python
|
aruna09/practice-datasets
|
/Electric-Meters/hsv.py
|
UTF-8
| 1,521
| 2.84375
| 3
|
[] |
no_license
|
import cv2
import numpy as np
#reading the image
img = cv2.imread('/home/icts/practice-datasets/Electric-Meters/Electric-meters/MAN_5001816631_20170819_OK.jpg')
#median blur to remove the noise and other small characters
median = cv2.medianBlur(img, 3)
#converting to hsv
hsv = cv2.cvtColor(median, cv2.COLOR_BGR2HSV)
#defining the green mask and extracting the screen
mask = cv2.inRange(hsv, (65,60,60), (80, 255,255))
res = cv2.bitwise_and(img, img, mask=mask)
#converting to grayscale and thresholding
gray_img = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
flag, thresh = cv2.threshold(gray_img, 128, 255, cv2.THRESH_BINARY_INV)
#doing the final morphological transformations
kernel = np.ones((3,3), np.uint8)
erosion = cv2.erode(thresh,kernel,iterations = 1)
dilation = cv2.dilate(erosion,kernel,iterations = 4)
final_image = cv2.morphologyEx(dilation, cv2.MORPH_OPEN, kernel)
#cv2.imshow("Image", final_image)
#using canny edge detector to detect edges
edges = cv2.Canny(final_image, 0, 200, 255)
#cv2.imshow("Image", edges)
#finding contours
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
idx=0
final_contours = []
#final computation of bounding rects and calulation of RoI's
for c in contours:
x,y,w,h = cv2.boundingRect(c)
if h>55 and w<64:
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0),2)
cv2.imshow("Image", img)
final_contours.append(c)
roi=img[y:y+h, x:x+w]
cv2.imwrite(str(idx) + '.jpg', roi)
idx=idx+1
cv2.waitKey(0)
cv2.destroyAllWindows()
| true
|
41f05a3335b1ffcfa4e037a71605fd4bc9f917c0
|
Python
|
rdranch/scripts
|
/afkPy.py
|
UTF-8
| 7,198
| 2.625
| 3
|
[] |
no_license
|
import win32api, keyboard, multiprocessing, ctypes
from json import dumps, loads
from time import sleep
from os import path, getcwd, startfile
from random import randint, uniform, shuffle
from win32gui import GetWindowText, GetForegroundWindow
from psutil import process_iter
# TODO add randomization to looking around
# Knifing is not dealt with
default_config = {
"toggle":"f2",
"reload":"r",
"up":"w",
"lean_left":"q",
"lean_right":"e",
"left":"a",
"down":"s",
"right":"d",
"crouch":"z",
"prone":"left ctrl",
"interact":"f",
"ping":"v",
"gadget":"`",
}
cx = win32api.GetSystemMetrics(0) # Get screen width
cy = win32api.GetSystemMetrics(1) // 2 # Get screen height
def macro(config, path):
'''Main macro script which will run a series of actions found in the config file to automatically move.
Will infinitely run until forced shut or process is terminated.
Arguments
---------
Config : Dictionary
Key/action mapping
Path : string
The path of R6 executable
'''
# Checks if Rainbow Six is open, will restart it if not.
while True:
while "RainbowSix.exe" not in [p.name() for p in process_iter()]:
print("[!] Siege crashed or closed. Stopping bot while it restarts.")
startfile(path)
sleep(120)
print("[*] Continuing...")
keyboard.press('enter')
sleep(0.03)
keyboard.release('enter')
# Shuffles order in which actions are done
random_list = ["reload", "up", "left", "down", "right", "crouch"]
shuffle(random_list)
# Main activity section
keyboard.press(config[random_list[0]])
sleep(uniform(0.3, 1))
keyboard.release(config[random_list[0]])
sleep(uniform(0.3, 1))
keyboard.press(config[random_list[1]])
sleep(uniform(0.3, 1))
keyboard.release(config[random_list[1]])
sleep(uniform(0.3, 1))
keyboard.press(config[random_list[2]])
sleep(uniform(0.3, 1))
keyboard.release(config[random_list[2]])
sleep(uniform(0.3, 1))
keyboard.press(config[random_list[3]])
sleep(uniform(0.3, 1))
keyboard.release(config[random_list[3]])
sleep(uniform(0.3, 1))
keyboard.press(config[random_list[4]])
sleep(uniform(0.3, 1))
keyboard.release(config[random_list[4]])
sleep(uniform(0.3, 1))
keyboard.press(config[random_list[5]])
sleep(uniform(0.3, 1))
keyboard.release(config[random_list[5]])
sleep(uniform(0.3, 1))
keyboard.press(config["lean_left"])
sleep(uniform(0.3, 1))
keyboard.release(config["lean_left"])
sleep(uniform(0.3, 1))
keyboard.press(config["lean_right"])
sleep(uniform(0.3, 1))
keyboard.release(config["lean_right"])
sleep(uniform(0.3, 1))
keyboard.press(config["lean_right"])
sleep(uniform(0.3, 1))
keyboard.release(config["lean_right"])
sleep(uniform(0.3, 1))
# ADS/UNAds
for _ in range(2):
win32api.mouse_event(0x0008, 0, 0, 0, 0) # Right click press
sleep(uniform(0.3, 1))
win32api.mouse_event(0x0010, 0, 0, 0, 0) # Right click release
sleep(uniform(0.3, 1))
# Sit down / stand up
for _ in range(2):
keyboard.press(config["prone"])
sleep(uniform(0.3, 1))
keyboard.release(config["prone"])
sleep(uniform(0.5, 1))
# Drop/pickup bomb, ping
keyboard.press(config["interact"])
sleep(uniform(0.3, 1))
keyboard.release(config["interact"])
sleep(uniform(0.3, 1))
keyboard.press(config["ping"])
sleep(uniform(0.3, 1))
keyboard.release(config["ping"])
sleep(uniform(0.3, 1))
# Pull/put out gadget
keyboard.press(config["gadget"])
sleep(uniform(0.6, 1))
win32api.mouse_event(0x0800, 0, 0, 120, 0) # SCROLL UP - ONE WHEEL CLICK IS 120
keyboard.release(config["gadget"])
sleep(1.5)
win32api.mouse_event(0x0001, 0, -1000, 0, 0) # Moves cursor to top of screen
sleep(0.5)
win32api.mouse_event(0x0002, 0, 0, 0, 0) # Left click press
sleep(uniform(0.3, 1))
win32api.mouse_event(0x0004, 0, 0, 0, 0) # Left click release
sleep(uniform(0.3, 1))
win32api.mouse_event(0x0020, 0, 0, 0, 0) # Middle click press
sleep(uniform(0.3, 1))
win32api.mouse_event(0x0040, 0, 0, 0, 0) # Middle click release
sleep(uniform(0.3, 1))
win32api.mouse_event(0x0001, 0, cy, 0 ,0) # Centers crosshair
keyboard.press('enter')
sleep(uniform(0.3, 1))
keyboard.press('enter')
sleep(0.03)
keyboard.release('enter')
sleep(uniform(0.3, 1))
keyboard.release('enter')
sleep(0.03)
# Spin 4x to the right
for _ in range(4):
win32api.mouse_event(0x0001, cx//4, 0, 0, 0)
sleep(uniform(0.3, 1))
keyboard.press('esc')
sleep(uniform(0.3, 1))
keyboard.release('esc')
sleep(0.03)
keyboard.press('esc')
sleep(0.03)
keyboard.release('esc')
sleep(uniform(0.3, 1))
def check_open():
'''Checks all process names to see if RainbowSix is open
If R6 is found, it will return the path.
Returns
-------
String : executable path
'''
print("[*] Waiting for Rainbow Six to be opened.")
while "RainbowSix.exe" not in [p.name() for p in process_iter()]:
sleep(2)
for proc in process_iter():
if proc.name() == "RainbowSix.exe":
return proc.exe()
if __name__ == "__main__":
# Create title for exe
ctypes.windll.kernel32.SetConsoleTitleW("Bot coded by Darkon (Darkinator#3932) AKA The.Don_ (v.1.0)")
multiprocessing.freeze_support()
# Check if config file is created
if not path.exists("botSettings.ini"):
with open("botSettings.ini", "w") as f:
f.write(dumps(default_config, indent=4))
print(f"[*] botSettings.ini created in {getcwd()}")
config = default_config
else:
with open("botSettings.ini", "r") as f:
config = loads(f.read())
toggle = config['toggle']
# Run bot
try:
path = check_open()
print(f"[*] Press {toggle} to activate/deactive bot. CTRL+C to EXIT.")
while True:
if keyboard.is_pressed(toggle):
print("[*] BOT ACTIVATED")
proc = multiprocessing.Process(target=macro, args=(config, path))
proc.start()
keyboard.wait(toggle)
print("[*] BOT DEACTIVATED")
proc.terminate()
sleep(0.5)
except KeyboardInterrupt:
print("[*] EXITING")
exit(0)
| true
|
a3ff108e8cd3b49abc11923636ae3460dee4f397
|
Python
|
AustinAmannVaughan/betAPP
|
/betApp.py
|
UTF-8
| 4,699
| 3.171875
| 3
|
[] |
no_license
|
import csv
import tkinter as tk
class Team:
name = ""
wins = 0
losses = 0
spreadW = 0
spreadL = 0
oppW = []
oppL = []
def __init__(self,name,wins,losses,spreadW,spreadL, recW, recL):
self.name = name
self.wins = wins
self.losses = losses
self.spreadW = spreadW
self.spreadL = spreadL
self.oppW = recW
self.oppL = recL
window = tk.Tk()
button = tk.Button(
text="Click me!",
width=25,
height=5,
bg="blue",
fg="yellow",
)
t = []
w = []
l = []
spr = []
sprW = []
sprL = []
path = 'txt.csv'
file=open( path, "r")
reader = csv.reader(file)
for line in reader:
t.append(line[0])
w.append(line[1])
l.append(line[2])
sprW.append(line[4])
sprL.append(line[5])
teams =[]
num1 =-1
num2 = -1
while(num1<0 and num2<0):
value = input("Please enter a team name: \n")
opp = input("Please enter an opponent team name: \n")
if(value== 'Arizona Cardinals'):
path = 'az.csv'
num1 = 1
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Baltimore Ravens'):
path = 'bal.csv'
num1 = 3
elif(value=='Buffalo Bills'):
path = 'buf.csv'
num1 = 4
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
elif(value=='Atlanta Falcons'):
path = 'atl.csv'
num1 = 2
if(opp== 'Arizona Cardinals'):
num2 = 1
elif(opp=='Atlanta Falcons'):
num2 = 2
elif(opp=='Baltimore Ravens'):
num2 = 3
elif(opp=='Buffalo Bills'):
num2 = 4
elif(opp=='Carolina Panthers'):
num2 = 5
elif(opp=='Chicago Bears'):
num2 = 6
elif(opp=='Cincinnati Bengals'):
num2 = 7
elif(opp=='Cleveland Browns'):
num2 = 8
elif(opp=='Dallas Cowboys'):
num2 = 9
elif(opp=='Denver Broncos'):
num2 = 10
elif(opp=='Detroit Lions'):
num2 = 11
elif(opp=='Green Bay Packers'):
num2 = 12
elif(opp=='Houston Texans'):
num2 = 13
elif(opp=='Indiannapolis Colts'):
num2 = 14
elif(opp=='Jacksonville Jaguars'):
num2 = 15
elif(opp=='Kansas City Chiefs'):
num2 = 16
elif(opp=='Oakland Raiders'):
num2 = 17
elif(opp=='Los Angeles Chargers'):
num2 = 18
elif(opp=='Los Angeles Rams'):
num2 = 19
elif(opp=='Miami Dolphins'):
num2 = 20
elif(opp=='Minnesota Vikings'):
num2 = 21
elif(opp=='New England Patriots'):
num2 = 22
elif(opp=='New Orleans Saints'):
num2 = 23
elif(opp=='New York Giants'):
num2 = 24
elif(opp=='New York Jets'):
num2 = 25
elif(opp=='Philadelphia Eagles'):
num2 = 26
elif(opp=='Pittsburgh Steelers'):
num2 = 27
elif(opp=='San Francisco 49ers'):
num2 = 28
elif(opp=='Seattle Seahawks'):
num2 = 29
elif(opp=='Tampa Bay Buccaneers'):
num2 = 30
elif(opp=='Tennesee Titans'):
num2 = 31
elif(opp=='Washington Redskins'):
num2 = 32
else:
print("Please Enter a Valid Team")
recW = []
recL = []
file=open( path, "r")
reader = csv.reader(file)
for line in reader:
recW.append(line[2])
recL.append(line[3])
for i in range(1,len(t)):
s = t[i]
team1 = Team(s,w[i],l[i],sprW[i], sprL[i], recW[num2], recL[num2])
teams.append(team1)
for i in range(0,len(teams)):
if(teams[i].name==value):
if(int(teams[i].spreadW) >2):
print("Team good against spread\n")
else:
print("Team bad against spread\n")
print("Wins:"+teams[i].wins)
print("Losses: "+teams[i].losses)
print("Spread Wins: "+teams[i].spreadW)
print("Spread Losses: "+teams[i].spreadL)
print("Wins against " +opp+" "+teams[i].oppW)
print("Losses against " +opp+" "+teams[i].oppL)
| true
|
abe7c5be532e6afcdb6f4ddd43b94dc8f1de7db0
|
Python
|
globalista/sudoku_solver
|
/methods.py
|
UTF-8
| 1,465
| 3.234375
| 3
|
[] |
no_license
|
def input_to_matrix(vstup):
matrix = []
with open(vstup) as f:
for line in f:
line1 = line.strip().split()
if line1:
final_line = []
for j in line1:
if j in {'1', '2', '3', '4', '5', '6', '7', '8', '9'}:
final_line.append(int(j))
else:
final_line.append(0)
matrix.append(final_line)
return matrix
def areas_containing_the_box(box, list_of_all_areas):
'''
make list of areas containing the box
:param box:
:param list_of_all_areas:
:return: list of areas containing the box
'''
areas_containing = []
for area in list_of_all_areas:
if box in area.field:
areas_containing.append(area)
return areas_containing
def set_value_and_delete_it_from_areas(box, list_of_all_areas, value):
'''
:param box:
:param list_of_all_areas:
:param value:
:return: set value to the box and delete the value in all other areas containing
'''
if box.set_value(value):
for area in areas_containing_the_box(box, list_of_all_areas):
area.remove_from_possible_values(value)
def load_input(field, matrix, areas):
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if matrix[i][j]:
set_value_and_delete_it_from_areas(field.field[i*9+j], areas, matrix[i][j])
| true
|
dfdd717ad0f875cea581a20d53c02ef7cd7b3a4c
|
Python
|
rexkwong-py/2022-calendar.py
|
/2022calendar.py
|
UTF-8
| 378
| 2.71875
| 3
|
[] |
no_license
|
import calendar
print(calendar.month(2022,1))
print(calendar.month(2022,2))
print(calendar.month(2022,3))
print(calendar.month(2022,4))
print(calendar.month(2022,5))
print(calendar.month(2022,6))
print(calendar.month(2022,7))
print(calendar.month(2022,8))
print(calendar.month(2022,9))
print(calendar.month(2022,10))
print(calendar.month(2022,11))
print(calendar.month(2022,12))
| true
|
3d0d39359e574f8d0ee3c027e0c44450af038025
|
Python
|
rdguerrerom/MolecularModelling
|
/For_Naaman/Polarizability_analysis.py
|
UTF-8
| 1,982
| 3.390625
| 3
|
[] |
no_license
|
import numpy as np
from numpy import linalg as LA
HS_25_polarizability = np.array([[ -616.06509371, -338.23860565, 168.61275949],
[ -339.53209953, -3271.1258288, 49.97796199],
[ 169.52823124, 49.92681813, -3248.46416257]])
HS_13_polarizability = np.array( [[ -632.12055991, 361.8850252, -105.08752708],
[ 362.5104567, -2561.1274773, -86.79063915],
[ -105.24760956, -86.84916529, -2747.83281691]])
HS_4_polarizability = np.array( [[ -835.28272287, 9.73158473, 5.52674842],
[ 9.73998582, -1875.59484557, 2.56536624],
[ 5.51324309, 2.56795261, -1894.69617352]])
HS_40_polarizability = np.array( [[ -862.9462555, -51.67449766, 13.62415435],
[ -51.70552815, -2339.69793078, 6.70147548],
[ 13.61809558, 6.69942881, -2349.18932917]])
def normalize_rows(x: np.ndarray):
"""
function that normalizes each row of the matrix x to have unit length.
Args:
``x``: A numpy matrix of shape (n, m)
Returns:
``x``: The normalized (by row) numpy matrix.
"""
return x/LA.norm(x, ord=2, axis=1, keepdims=True)
def polarizability_analisys_norm(Pol):
_Pol = normalize_rows(Pol)
xx, yy, zz = _Pol.diagonal()
isotropy = (xx+yy+zz)/3
anisotropy = (0.5 * ((xx-yy)**2 + (yy-zz)**2 + (zz-xx)**2))**0.5
print('Isotropic polarizability: {:}'.format(isotropy))
print('Polarizability anisotropy: {:}'.format(anisotropy))
print('Nromalized dipole polarizability tensor: {:}'.format(_Pol))
if __name__ == '__main__':
print("RESULTS IN DECREASING ANISOTROPY")
print("================================")
print("HS_25")
print("----")
polarizability_analisys_norm(HS_25_polarizability)
print("HS_13")
print("-----")
polarizability_analisys_norm(HS_13_polarizability)
print("HS_40")
print("-----")
polarizability_analisys_norm(HS_40_polarizability)
print("HS_4")
print("----")
polarizability_analisys_norm(HS_4_polarizability)
| true
|
55235514a1c5fe2ac4d4fd9aea14d728ea8cc8f2
|
Python
|
timtim1342/HSE-Programming
|
/hw2/homework2.py
|
UTF-8
| 140
| 3.796875
| 4
|
[] |
no_license
|
a = str(input('Input word:')).replace("з","").replace("З","").replace("я","").replace("Я","")
for i in reversed(a):
print(i,end='')
| true
|
0f3f4fb5203478eaf9d6760a9e36af200b259bcd
|
Python
|
steph-mcd/udacity-data-lake-with-spark
|
/etl.py
|
UTF-8
| 6,553
| 2.5625
| 3
|
[] |
no_license
|
import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql.types import StructType as R, StructField as Fld, DoubleType as Dbl, StringType as Str, IntegerType as Int, DateType as Date
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config.get('AWS','AWS_ACCESS_KEY_ID')
os.environ['AWS_SECRET_ACCESS_KEY']=config.get('AWS','AWS_SECRET_ACCESS_KEY')
def create_spark_session():
"""
- Establishes Spark Session
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""
- Reads in song data files
- Creates songs table by transforming log files and writing to s3 as parquet
- Creates artists table by transforming log files and writing to s3 as parquet
"""
# get filepath to song data file
song_data =input_data + 'song_data/A/A/A/*.json'
# read song data file
df = spark.read.json(song_data)
df.createOrReplaceTempView('df')
# extract columns to create songs table
songs_table = spark.sql("""
SELECT DISTINCT song_id,
artist_id,
title,
year,
duration
FROM df
WHERE song_id IS NOT NULL
and artist_id is not null
and year is not null
""")
songs_table.head()
# write songs table to parquet files partitioned by year and artist
songs_table.write.mode('overwrite').partitionBy("year", "artist_id").parquet(output_data+'songs_table/')
df.createOrReplaceTempView('df')
# extract columns to create artists table
artists_table = spark.sql("""
SELECT DISTINCT artist_id,
artist_name,
artist_location,
artist_latitude,
artist_longitude
FROM df
WHERE artist_id IS NOT NULL
""")
# write artists table to parquet files
artists_table.write.mode('overwrite').parquet(output_data+'artists_table/')
def process_log_data(spark, input_data, output_data):
"""
- Reads in log data files and song data files
- Creates users table by transforming log files and writing to s3 as parquet
- Creates time table by transforming log files and writing to s3 as parquet
- Creates songplays table by transforming log and song files and writing to s3 as parquet
"""
# get filepath to log data file
log_data = input_data + 'log_data/*/*/*.json'
# read log data file
df = spark.read.json(log_data)
# filter by actions for song plays
df = df.filter(df.page=='NextSong')
df.select(col("ts").cast("timestamp"));
df.createOrReplaceTempView('df')
# extract columns for users table
users_table = spark.sql("""
SELECT userId as user_id,
firstName as first_name,
lastName as last_name,
gender,
level
FROM df
WHERE userId IS NOT NULL
GROUP BY userId, level, gender, firstName, lastName
""")
# write users table to parquet files
users_table.write.mode('overwrite').parquet(output_data + 'users_table/')
# extract columns to create time table
time_table = spark.sql("""
SELECT to_timestamp(ts/1000) as start_time,
hour(to_timestamp(ts/1000)) as hour,
day(to_timestamp(ts/1000)) as day,
weekofyear(to_timestamp(ts/1000)) as week,
month(to_timestamp(ts/1000)) as month,
year(to_timestamp(ts/1000)) as year,
dayofweek(to_timestamp(ts/1000)) as weekday
FROM df
WHERE ts IS NOT NULL
""")
# write time table to parquet files partitioned by year and month
time_table.write.mode('overwrite').parquet(output_data + 'time_table/')
# read in song data to use for songplays table
song_data =input_data + 'song_data/A/A/A/*.json'
# read song data file
song_df = spark.read.json(song_data)
song_df.createOrReplaceTempView('song_df')
print(song_df.schema)
# extract columns from joined song and log datasets to create songplays table
songplays_table = spark.sql("""
SELECT monotonically_increasing_id() as songplay_id,
to_timestamp(log.ts/1000) as start_time,
month(to_timestamp(log.ts/1000)) as month,
year(to_timestamp(log.ts/1000)) as year,
log.userId as user_id,
log.level as level,
song.song_id,
song.artist_id,
log.sessionId as session_id,
log.location,
log.userAgent as user_agent
FROM df log
JOIN song_df song on log.song = song.title and log.artist = song.artist_name
""")
# write songplays table to parquet files partitioned by year and month
songplays_table.write.mode('overwrite').parquet(output_data + 'songplays_table/')
def main():
"""
- calls function to establish spark session
- Calls function to process song files
- Calls function to process log files
"""
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://udacity-datalake-steph-m/"
process_log_data(spark, input_data, output_data)
process_song_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| true
|
0e15bb314749b3c4fd95447cf896171a1d315d9e
|
Python
|
mysqlbin/python_note
|
/2020-08-01-Python-ZST-4200/01-字符串与正则/2020-08-03-regular.py
|
UTF-8
| 1,731
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/local/bin/python3
#coding=utf-8
""" a|b """
import re
str = "13202095158,13302095158,13402095158"
pattern = re.compile(r'1(32|33)\d')
print(pattern.findall(str))
""" 输出:['32','33'] """
import re
str = "13202095158,13302095158,13402095158"
pattern = re.compile(r'(1(32|33)\d{8})')
print(pattern.findall(str))
""" 输出:[('13202095158', '32'), ('13302095158', '33')] """
import re
str = "13202095158,13302095158,13402095158"
pattern = re.compile(r'((32|33)\d{8})')
print(pattern.findall(str))
""" 输出:[('3202095158', '32'), ('3302095158', '33')] """
"""[158] 匹配1或者5或者8"""
import re
str = "13202095158,133020988"
pattern = re.compile(r'[158]')
print(pattern.findall(str))
""" 输出:['1', '5', '1', '5', '8', '1', '8', '8'] """
""" [^0-8] 匹配非1到8 """
import re
str = "13202095158,133020988"
pattern = re.compile(r'[^0-8]')
print(pattern.findall(str))
""" 输出:['9', ',', '9'] """
""" () 正则分组:匹配规则有里几个()号,就有几组 """
import re
str = "13202095158,13302095158,13402095158"
pattern = re.compile(r'(32|33)\d')
print(pattern.findall(str))
""" 输出:['32','33'] """
import re
str = "13202095158,13302095158,13402095158"
pattern = re.compile(r'(1(32|33)\d)')
print(pattern.findall(str))
""" 输出:[('1320', '32'), ('1330', '33')] """
import re
str = "13202095158,13302095158,13402095158"
pattern = re.compile(r'(1(32|33)\d+)')
print(pattern.findall(str))
""" 输出:[('13202095158', '32'), ('13302095158', '33')] """
import re
str = "13202095158,13302095158,13402095158"
pattern = re.compile(r'(1(32|33)(\d+))')
print(pattern.findall(str))
""" 输出:[('13202095158', '32', '02095158'), ('13302095158', '33', '02095158')] """
| true
|
fc45353f41a8410f38c75957bca830636fe82bfc
|
Python
|
RobertRelyea/adv_homework4
|
/scripts/world.py
|
UTF-8
| 1,956
| 3.421875
| 3
|
[] |
no_license
|
import numpy as np
import matplotlib.pyplot as plt
from obstacles import Square, Circle
from robot import solve_coeffs, generate_path
from plot_utils import *
# Populate world with obstacles
A = Square(center=(-1.25,0.625), length=0.4)
B = Circle(center=(-1.625,-0.3), radius=0.25)
C = Circle(center=(0.75,0), radius=0.125)
D = Circle(center=(1.125,0), radius=0.125)
# Define knot points
knots = [[30, 250],
[50, 230],
[90, 250],
[235, 235],
[180, 360]]
# Start and end times
ts = 0
tf = len(knots) - 1
# Generate paths between all knot points with a time interval of 1
path = []
for t in range(tf):
# Calculate coefficients for 3rd order joint trajectories
c1 = solve_coeffs(t, t+1, knots[t][0], knots[t+1][0])
c2 = solve_coeffs(t, t+1, knots[t][1], knots[t+1][1])
# Calculate 100 points along trajectory during time interval
path += generate_path(t, t+1, c1, c2)
# Plot everything in cartesian space
plt.figure(1)
plot_obstacles([A,B,C,D])
plot_path_fk(path)
plot_knots_fk(knots)
plt.title('Cartesian Space')
plt.xlabel('X')
plt.ylabel('Y')
plt.grid(True)
plt.axis('equal')
plt.savefig("../figures/cart.png")
# Plot everything in joint space
plt.figure(2)
plot_obstacles_ik([A,B,C,D])
plot_path(path)
plot_knots(knots)
plt.title('Joint Space')
plt.xlabel('Theta 1 (Degrees)')
plt.ylabel('Theta 2 (Degrees)')
plt.grid(True)
plt.axis('equal')
plt.savefig("../figures/joint.png")
# Plot theta 1 as a function of time
t = np.linspace(ts, tf, len(path))
plt.figure(3)
path = np.array(path)
plt.scatter(t, path[:,0])
plt.title('Theta 1 vs Time')
plt.xlabel('Time')
plt.ylabel('Theta 1 (Degrees)')
plt.grid(True)
plt.savefig("../figures/theta1.png")
# Plot theta 2 as a function of time
plt.figure(4)
path = np.array(path)
plt.scatter(t, path[:,1])
plt.title('Theta 2 vs Time')
plt.xlabel('Time')
plt.ylabel('Theta 2 (Degrees)')
plt.grid(True)
plt.savefig("../figures/theta2.png")
plt.show()
| true
|
b921986b11576101abfe2aacc3c0568a5f30059a
|
Python
|
JaiRaga/FreeCodeCamp-Data-Structures-and-Algorithms-in-Python
|
/Basic_Algorithm_Scripting/cToF.py
|
UTF-8
| 236
| 4.21875
| 4
|
[] |
no_license
|
# Algorithm to convert temperature in celcius to fahrenheit
def convertToF(celcius):
fahrenheit = celcius * (9 / 5) + 32
return fahrenheit
print(convertToF(-30))
print(convertToF(30))
print(convertToF(20))
print(convertToF(0))
| true
|
399f22d3d704178ea68cfd50946294c7ca771dcd
|
Python
|
LifeLaboratory/Task_platform_backend
|
/task_lesson/api/task/pass_task.py
|
UTF-8
| 2,466
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
import json
from django.http import HttpResponse
from task_lesson.api.helpers import names
from task_lesson.models import Task as TaskModel
from task_lesson.models import Solution as SolutionModel
from task_lesson.api.helpers.checkers import set_types
from task_lesson.api.task.task import Task
from task_lesson.api.helpers.database import get_event_task
from task_lesson.api.helpers.database import get_team_user_in_event
from datetime import datetime as dt
class PassTask(Task):
@staticmethod
def check_flag(data):
"""
Метод сверяет присланный флаг с флагом из базы данных по заданному таску в событии
:param data:
:return:
"""
try:
task = TaskModel.objects.get(task=data[names.TASK], eventtask__task=data[names.TASK],
flag=data[names.TASK_FLAG], eventtask__event=data[names.EVENT])
except TaskModel.DoesNotExist:
print("Task not found")
return
if task is not None:
return True, 200
else:
return False, 400
@staticmethod
def insert_solution(data, status):
"""
Производит запись в таблицу solution
status = True - успешно сдан флаг
status = False - неудачная попытка
:param data:
:param status:
:return:
"""
event = data.get(names.EVENT)
user = data.get(names.USER)
event_task = get_event_task(event, data[names.TASK])
team_user = get_team_user_in_event(user, event)
if event_task is not None and team_user is not None:
event_task = SolutionModel(teamuser_id=team_user, eventtask_id=event_task, status=status, date=dt.now())
event_task.save()
def pass_task(self, responce):
"""
Функция для сдачи флагов
собирает в себе вызовы всех методов, необходимые для сдачи флага
:param responce:
:return:
"""
self.row_data = json.loads(responce.body.decode('utf-8'))
data = self.parse_data(self.row_data, names.PASS_TASK_FIELDS)
set_types(data)
checker, status = self.check_flag(data)
self.insert_solution(data, checker)
return HttpResponse(status=status)
| true
|
0cc90ffcb380f388bb70745c3451aa05babe89bb
|
Python
|
Jigar710/Python_Programs
|
/Decorator/test4.py
|
UTF-8
| 138
| 2.5625
| 3
|
[] |
no_license
|
a = 10
b = 20
print(locals())
print("=================================")
print(locals.__doc__)
print("=================================")
| true
|
14706e2046281a91f7ca610cf7f14fe71f24158b
|
Python
|
AlbertoCastelo/Neuro-Evolution-BNN
|
/tests_non_automated/probabilistic_programming/create_network.py
|
UTF-8
| 5,132
| 3.015625
| 3
|
[] |
no_license
|
import theano
import numpy as np
import pymc3 as pm
def construct_nn(x, y, config):
'''
Follows Twiecki post: https://twiecki.io/blog/2016/06/01/bayesian-deep-learning/
'''
n_hidden = 3
# Initialize random weights between each layer
w_1_init = np.random.randn(config.n_input, n_hidden).astype(theano.config.floatX)
b_1_init = np.random.randn(n_hidden).astype(theano.config.floatX)
w_2_init = np.random.randn(n_hidden, config.n_output).astype(theano.config.floatX)
b_2_init = np.random.randn(config.n_output).astype(theano.config.floatX)
# init_out = np.random.randn(n_hidden).astype(floatX)
with pm.Model() as neural_network:
# Weights from input to hidden layer
weights_in_1 = pm.Normal('w_in_1', 0, sd=1,
shape=(config.n_input, n_hidden),
testval=w_1_init)
# Bias from input to hidden layer
bias_1 = pm.Normal('b_in_1', 0, sd=1,
shape=n_hidden,
testval=b_1_init)
# Weights from 1st to 2nd layer
weights_1_2 = pm.Normal('w_1_2', 0, sd=1,
shape=(n_hidden, config.n_output),
testval=w_2_init)
# Bias from 1st to 2nd layer
bias_2 = pm.Normal('b_in_2', 0, sd=1,
shape=config.n_output,
testval=b_2_init)
# # Weights from hidden layer to output
# weights_2_out = pm.Normal('w_2_out', 0, sd=1,
# shape=(n_hidden,),
# testval=init_out)
# Build neural-network using tanh activation function
x_1 = pm.math.dot(x, weights_in_1) + bias_1
act_1 = pm.math.tanh(x_1)
x_2 = pm.math.dot(act_1, weights_1_2) + bias_2
act_2 = pm.math.tanh(x_2)
# Regression -> Normal likelihood
out = pm.Normal('out', act_2, observed=y,
total_size=x.shape[0] # IMPORTANT for minibatches
)
return neural_network
# Trick: Turn inputs and outputs into shared variables.
# It's still the same thing, but we can later change the values of the shared variable
# (to switch in the test-data later) and pymc3 will just use the new data.
# Kind-of like a pointer we can redirect.
# For more info, see: http://deeplearning.net/software/theano/library/compile/shared.html
def construct_2_hidden_nn(x, y, config):
'''
Follows Twiecki post: https://twiecki.io/blog/2016/06/01/bayesian-deep-learning/
'''
n_hidden = 10
# Initialize random weights between each layer
w_1_init = np.random.randn(config.n_input, n_hidden).astype(theano.config.floatX)
b_1_init = np.random.randn(n_hidden).astype(theano.config.floatX)
w_2_init = np.random.randn(n_hidden, n_hidden).astype(theano.config.floatX)
b_2_init = np.random.randn(n_hidden).astype(theano.config.floatX)
# w_3_init = np.random.randn(n_hidden, config.n_output).astype(theano.config.floatX)
# b_3_init = np.random.randn(config.n_output).astype(theano.config.floatX)
with pm.Model() as neural_network:
# Weights from input to hidden layer
weights_in_1 = pm.Normal('w_in_1', 0, sd=1,
shape=(config.n_input, n_hidden),
testval=w_1_init)
# Bias from input to hidden layer
bias_1 = pm.Normal('b_in_1', 0, sd=1,
shape=n_hidden,
testval=b_1_init)
# Weights from 1st to 2nd layer
weights_1_2 = pm.Normal('w_1_2', 0, sd=1,
shape=(n_hidden, n_hidden),
testval=w_2_init)
# Bias from 1st to 2nd layer
bias_2 = pm.Normal('b_in_2', 0, sd=1,
shape=n_hidden,
testval=b_2_init)
# weights_2_3 = pm.Normal('w_2_3', 0, sd=1,
# shape=(n_hidden, config.n_output),
# testval=w_3_init)
#
# # Bias from 1st to 2nd layer
# bias_3 = pm.Normal('b_3', 0, sd=1,
# shape=config.n_output,
# testval=b_3_init)
# # Weights from hidden layer to output
# weights_2_out = pm.Normal('w_2_out', 0, sd=1,
# shape=(n_hidden,),
# testval=init_out)
# Build neural-network using tanh activation function
x_1 = pm.math.dot(x, weights_in_1) + bias_1
act_1 = pm.math.tanh(x_1)
x_2 = pm.math.dot(act_1, weights_1_2) + bias_2
act_2 = pm.math.tanh(x_2)
# x_3 = pm.math.dot(act_2, weights_2_3) + bias_3
# act_3 = pm.math.tanh(x_3)
# Regression -> Normal likelihood
out = pm.Normal('out', act_2, observed=y,
total_size=x.shape[0] # IMPORTANT for minibatches
)
return neural_network
| true
|
921b64da3df3c25b25de18d78711c9395f9b0115
|
Python
|
sean1792/socket-http
|
/client.py
|
UTF-8
| 758
| 2.703125
| 3
|
[] |
no_license
|
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#addr = input("Please enter the Server IP:")
addr ='192.168.1.9'
h=socket.gethostbyname(addr)
print(h)
port = 8000
#f = input("enter file name:")
f='index.html'
header = 'GET /'
header += f
header+=' HTTP/1.1\r\nHost: '+addr+'\r\nConnection: close\r\n\r\n'
print(header)
s.connect((addr,port))
#print(s.recv(1024).decode())
while True:
s.send(header.encode())
data = s.recv(1024)
file=open('example.html','w')
print (data.decode())
print(data)
g= data.decode().split('\r\n\r\n')[1]
file.write(g)
print('\n\n\n\n\n\n')
print(g)
#print (data.decode().split()[5])
#a=input('stop(y/n)?')
a='y'
if a=='y':
break
s.close()
| true
|
e8e9af0807742e5c52e187ffeaba47f655ad016b
|
Python
|
timcera/tsgettoolbox
|
/src/tsgettoolbox/functions/rivergages.py
|
UTF-8
| 2,117
| 2.875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
rivergages US station:USACE river gages
"""
import pandas as pd
from toolbox_utils import tsutils
from tsgettoolbox.ulmo.usace.rivergages.core import (
get_station_data,
get_station_parameters,
get_stations,
)
__all__ = ["rivergages"]
# def get_station_data(station_code, parameter, start=None, end=None,
# min_value=None, max_value=None):
def rivergages(station_code, parameter, start_date=None, end_date=None):
"""US:station:::USACE river gages
Stage and flow from systems managed by the U.S. Army Corps of Engineers.
Parameters
----------
station_code : str
The station code for the station.
parameter : str
Parameter code.
start_date
The start date of the desired time-series.
end_date
The end data of the desired time-series.
"""
tstations = get_stations()
if station_code not in tstations:
raise ValueError(
tsutils.error_wrapper(
f"""
Station code {station_code} not in available stations:
{tstations.keys}
"""
)
)
tparameters = get_station_parameters(station_code)
if parameter not in tparameters:
raise ValueError(
tsutils.error_wrapper(
f"""
Parameter code {parameter} not in available parameters at
station {station_code}: {tparameters}
"""
)
)
df = get_station_data(
station_code,
parameter,
start=pd.to_datetime(start_date),
end=pd.to_datetime(end_date),
)
df = pd.DataFrame.from_dict(df, orient="index")
df.sort_index(inplace=True)
df.index.name = "Datetime"
df.columns = [f"{station_code}_{parameter}"]
return df
if __name__ == "__main__":
# import time
#
# r = ulmo_df('blah',
# 'upperbasin')
#
# print('BIVOI_HL')
# print(r)
#
r = rivergages("BIVO1", "HL", start_date="2015-11-04", end_date="2015-12-05")
print("BIVOI HL")
print(r)
| true
|
375a9a7f8283ada1d75b93972a24b3a70e792b86
|
Python
|
gabriellaec/desoft-analise-exercicios
|
/backup/user_274/ch160_2020_06_22_17_46_52_535308.py
|
UTF-8
| 231
| 3.3125
| 3
|
[] |
no_license
|
import math
a = 0
b = i
for i in range(91):
c = math.radians(i)
x = math.sin(c)
x = math.degrees(x)
y = (4*i*(180-x))/(40500 - x*(180-x))
e = abs(x-y)
if e > a:
a = e
b = i
print(b)
| true
|
947d3bcf9fadc3bf2a9a1650d24a30d91d69e894
|
Python
|
hoggard/stepik---auto-tests-course
|
/Lessons/Part2_lesson2_step6.py
|
UTF-8
| 751
| 2.71875
| 3
|
[] |
no_license
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import time
import math
link = "http://SunInJuly.github.io/execute_script.html"
def calc(x):
return str(math.log(abs(12*math.sin(x))))
try:
browser = webdriver.Chrome()
browser.get(link)
x = browser.find_element_by_id("input_value").text
y = calc(int(x))
browser.find_element_by_id("answer").send_keys(y)
button = browser.find_element_by_css_selector("body div.container form button")
browser.execute_script("return arguments[0].scrollIntoView(true);", button)
browser.find_element_by_id("robotCheckbox").click()
browser.find_element_by_id("robotsRule").click()
button.click()
finally:
time.sleep(10)
browser.quit()
| true
|
959e2de5c726988c5262763169aa25884e717937
|
Python
|
ktzhao/hoppy
|
/hoppy/nicer/cli/niget_yyyymm.py
|
UTF-8
| 1,265
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import os
import argparse
from argparse import ArgumentParser
import pandas as pd
__author__ = 'Teruaki Enoto'
__version__ = '0.01'
# v0.01 : 2020-08-01 : original version
def get_parser():
"""
Creates a new argument parser.
"""
parser = argparse.ArgumentParser('niget_yyyymm.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
help description
"""
)
version = '%(prog)s ' + __version__
parser.add_argument('--version', '-v', action='version', version=version,
help='show version of this command')
parser.add_argument('--csvfile', '-c', type=str, default="nicer_target_segment_table.csv",
help='csvfile')
parser.add_argument('--obsid', '-o', type=str, default=None,
help='target ObsID (default=None)')
return parser
def get_yyyymm(csvfile,obsid):
print("--input csvfile: {}".format(csvfile))
print("--obsid: {}".format(obsid))
df = pd.read_csv(csvfile,comment='#')
df['Observation ID'] = df['Observation ID'].astype(str).str.zfill(10)
str_start_timeutc = df['Start TimeUTC'][df['Observation ID'] == obsid]
print(str_start_timeutc)
def main(args=None):
parser = get_parser()
args = parser.parse_args(args)
get_yyyymm(args.csvfile,obsid=args.obsid)
if __name__=="__main__":
main()
| true
|
4134e930f0745d1af6d271d02e5b16cfa58dc06c
|
Python
|
waltercoan/ALPC1ano2018
|
/fatorial.py
|
UTF-8
| 157
| 3.71875
| 4
|
[] |
no_license
|
print("Digite o numero base")
base = int(input())
fat = 1
while base >= 1:
print(base)
fat = fat * base
base = base - 1
print("Resultado", fat)
| true
|
618ca87dd8efc0a0bbd00fca92426800f4069f59
|
Python
|
nicksavers/raiden
|
/raiden/encoding/format.py
|
UTF-8
| 4,032
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
from collections import namedtuple, Counter
try: # py3k
from functools import lru_cache
except ImportError:
from repoze.lru import lru_cache
__all__ = ('Field', 'BYTE', 'namedbuffer', 'buffer_for',)
Field = namedtuple(
'Field',
('name', 'size_bytes', 'format_string', 'encoder'),
)
BYTE = 2 ** 8
@lru_cache(10) # caching so the factory returns the same object
def pad(size_bytes):
name = 'pad_{}'.format(size_bytes)
format_string = '{}x'.format(size_bytes)
return Field(name, size_bytes, format_string, None)
def make_field(name, size_bytes, format_string, encoder=None):
if size_bytes < 0:
raise ValueError('negative size_bytes')
return Field(
name,
size_bytes,
format_string,
encoder,
)
def buffer_for(klass):
''' Returns a new buffer of the appropriate size for klass. '''
return bytearray(klass.size)
def namedbuffer(buffer_name, fields_spec): # noqa (ignore ciclomatic complexity)
''' Wraps a buffer instance using the field spec.
The field spec specifies how many bytes should be used for a field and what
is the encoding / decoding function.
'''
if not len(buffer_name):
raise ValueError('buffer_name is empty')
if not len(fields_spec):
raise ValueError('fields_spec is empty')
if any(field.size_bytes < 0 for field in fields_spec):
raise ValueError('negative size_bytes')
if any(len(field.name) < 0 for field in fields_spec):
raise ValueError('field missing name')
if any(count > 1 for count in Counter(field.name for field in fields_spec).values()):
raise ValueError('repeated field name')
size = sum(field.size_bytes for field in fields_spec)
fields = list()
name_slice = dict()
name_field = dict()
start = 0
for field in fields_spec:
end = start + field.size_bytes
# don't create a slices and attributes for paddings
if not field.name.startswith('pad_'):
name_slice[field.name] = slice(start, end)
name_field[field.name] = field
fields.append(field.name)
start = end
# big endian format
fields_format = '>' + ''.join(field.format_string for field in fields_spec)
def __init__(self, data):
if len(data) < size:
raise ValueError('data buffer is too small')
# XXX: validate or initialize the buffer?
self.data = data
def __getattr__(self, name):
if name in name_slice:
slice_ = name_slice[name]
field = name_field[name]
value = self.data[slice_]
if field.encoder:
value = field.encoder.decode(value)
return value
raise AttributeError
def __setattr__(self, name, value):
if name in name_slice:
slice_ = name_slice[name]
field = name_field[name]
if field.encoder:
field.encoder.validate(value)
value = field.encoder.encode(value, field.size_bytes)
length = len(value)
if length > field.size_bytes:
msg = 'value with length {length} for {attr} is too big'.format(
length=length,
attr=name,
)
raise ValueError(msg)
elif length < field.size_bytes:
pad_size = field.size_bytes - length
pad_value = b'\x00' * pad_size
value = pad_value + value
self.data[slice_] = value
else:
super(self.__class__, self).__setattr__(name, value)
attributes = {
'__init__': __init__,
'__slots__': ('data',),
'__getattr__': __getattr__,
'__setattr__': __setattr__,
'fields': fields,
'fields_spec': fields_spec,
'name': buffer_name,
'format': fields_format,
'size': size,
}
return type(buffer_name, (), attributes)
| true
|
68c6a765a158ae565cd310609e5f969d7834c096
|
Python
|
couchbaselabs/mobile-testkit
|
/libraries/testkit/parallelize.py
|
UTF-8
| 2,166
| 2.578125
| 3
|
[] |
no_license
|
import concurrent.futures
from libraries.testkit import settings
import copyreg
import types
from threading import Thread
from keywords.utils import log_info
from keywords.utils import log_debug
# This function is added to use ProcessExecutor
# concurrent.futures.
#
def _pickle_method(m):
if m.__self__ is None:
return getattr, (m.__self__.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
copyreg.pickle(types.MethodType, _pickle_method)
# Using Process Pool
def parallel_process(objects, method, *args):
with concurrent.futures.ProcessPoolExecutor(max_workers=settings.MAX_REQUEST_WORKERS) as executor:
futures = {executor.submit(getattr(obj, method), *args): obj for obj in objects}
for future in concurrent.futures.as_completed(futures):
if concurrent.futures.as_completed(futures):
obj = futures[future]
try:
log_debug("Object {} method {} output {}".format(obj, method, future.result()))
except Exception as exception:
log_info('Generated an exception : {} : {}'.format(obj, exception))
# Using Thread Pool
def in_parallel(objects, method, *args):
result = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_REQUEST_WORKERS) as executor:
futures = {executor.submit(getattr(obj, method), *args): obj for obj in objects}
for future in concurrent.futures.as_completed(futures):
if concurrent.futures.as_completed(futures):
obj = futures[future]
try:
result[obj] = future.result()
log_debug("Object {} method {} output {}".format(obj, method, result[obj]))
except Exception as exception:
log_info('Generated an exception : {} : {}'.format(obj, exception))
raise ValueError('in_parallel: got exception', exception, obj)
return result
def run_async(function, *args, **kwargs):
thread = Thread(target=function, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
| true
|
5aa5c58612743d7afe30d7930680b6cac456aec5
|
Python
|
sahana/SAMBRO
|
/languages/th.py
|
UTF-8
| 2,630
| 2.65625
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
# -*- coding: utf-8 -*-
{
'Add New Organization': 'เพิ่มองค์กรใหม่',
'Add Organization': 'เพิ่มองค์กร',
'agree': 'เห็นด้วย',
'all of it': 'ทั้งหมด',
'Dear %(person_name)s': 'เรียน %(person_name)s',
'Delete Catalog': 'ลบแคตตาล็อก',
'Delete Catalog Item': 'ลบรายการแคตตาล็อก',
'Delete Organization': 'ลบองค์กร',
'Department Catalog': 'Department แคตตาล็อก',
'disagree': 'ไม่เห็นด้วย',
'Edit Catalog': 'Edit แคตตาล็อก',
'Edit Catalog Item': 'Edit แคตตาล็อก Item',
'most of it': 'ส่วนใหญ่',
'no': 'ไม่ใช่',
'no change': 'ไม่มีความเปลี่ยนแปลง',
'not at all': 'ไม่เลย',
'part of it': 'บางส่วน',
'significant change': 'เปลี่ยนแปลงไปอย่างมาก',
'some change': 'เปลี่ยนแปลงไปบางส่วน',
'strongly agree': 'เห็นด้วยอย่างยิ่ง',
'strongly disagree': 'ไม่เห็นด้วยอย่างยิ่ง',
'Thai': 'ภาษาไทย',
'Thank you for taking this survey and helping us to increase the quality of our trainings.': 'ขอขอบคุณที่เข้าร่วมการสำรวจครั้งนี้และช่วยเราในการเพิ่มคุณภาพการฝึกอบรมของเรา',
'Thank you for your participation.': 'ขอบคุณสำหรับการร่วมตอบแบบสอบถามนี้',
'The information collected through this questionnaire will be treated as confidential.': 'ข้อมูลที่ถูกรวบรวมจากแบบสอบถามนี้จะถูกเก็บเป็นความลับ',
'We have 8 simple questions for you, this should not take more than 15mn of your time.': 'เรามีคำถามง่ายๆจำนวน 8 ข้อให้คุณตอบ ซึ่งใช้เวลาไม่เกิน 15 นาที',
'yes': 'ใช่',
'You are receiving this email as a participant of %(event_name)s held in %(location)s on %(date)s.': 'คุณได้รับอีเมล์ฉบับนี้ในฐานะผู้มีส่วนร่วมใน %(event_name)s ซึ่งจัดขึ้นที่ %(location)s ในวันที่ %(date)s.',
}
| true
|
fa7bb2e37bd4339a2dbc4ab761d64b08bbca8caf
|
Python
|
pfuntner/toys
|
/bin/procchain
|
UTF-8
| 1,288
| 2.609375
| 3
|
[] |
no_license
|
#! /usr/bin/env python3
import re
import os
import logging
import argparse
import subprocess
parser = argparse.ArgumentParser(description='Show process chain from current process')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Enable debugging')
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s')
log = logging.getLogger()
log.setLevel(logging.DEBUG if args.verbose else logging.WARNING)
cmd = ['ps', '-eo', 'pid,ppid,etime,args']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
rc = p.wait()
log.debug(f'{cmd}: {rc}, {stdout!r}, {stderr!r}')
regexp = re.compile(r'^\s*(\d+)\s+(\d+)')
processes = []
lines = stdout.decode('utf-8').splitlines()
headings = lines[0] if lines else ''
curr = os.getpid()
while True:
for line in lines[1:]:
log.debug(f'processing: {line!r}')
match = regexp.search(line)
if match:
(pid, ppid) = (int(match.group(1)), int(match.group(2)))
if curr == pid:
processes.insert(0, line)
curr = ppid
break
else:
break
if processes:
print(headings)
print('\n'.join(processes))
| true
|
051b6cf31779c316082ce6d217f8e6a91d5328df
|
Python
|
sandyskim/bioinfo-algos
|
/hp2/basic_hasher.py
|
UTF-8
| 9,896
| 2.703125
| 3
|
[] |
no_license
|
import sys
import argparse
import numpy as np
import time
import zipfile
from collections import defaultdict
import pickle
from multiprocessing import Pool
import os.path
from os import path
start_time = time.time()
def parse_reads_file(reads_fn):
"""
:param reads_fn: the file containing all of the reads
:return: outputs a list of all paired-end reads
HINT: This might not work well if the number of reads is too large to handle in memory
"""
try:
with open(reads_fn, 'r') as rFile:
print("Parsing Reads")
first_line = True
count = 0
all_reads = []
for line in rFile:
count += 1
if count > 3000000:
break
if count % 1000 == 0:
print(count, " reads done")
if first_line:
first_line = False
continue
ends = line.strip().split(',')
all_reads.append(ends)
return all_reads
except IOError:
print("Could not read file: ", reads_fn)
return None
def parse_ref_file(ref_fn):
"""
:param ref_fn: the file containing the reference genome
:return: a string containing the reference genome
"""
try:
with open(ref_fn, 'r') as gFile:
print("Parsing Ref")
first_line = True
ref_genome = ''
for line in gFile:
if first_line:
first_line = False
continue
ref_genome += line.strip()
return ref_genome
except IOError:
print("Could not read file: ", ref_fn)
return None
"""
TODO: Use this space to implement any additional functions you might need
"""
def generate_kmers(k, read):
return [read[i:i+k] for i in range(0, len(read), k)]
def kmers_dict(k, input_reads):
dkmers = {}
for read in input_reads:
dkmers[read] = generate_kmers(k, read)
return dkmers
def index_genome(k, genome):
indices = defaultdict(list)
for i in range(len(genome)-k):
sub = genome[i:i+k]
indices[sub].append(i)
return indices
def calc_diff(seq1, seq2):
return sum(1 for s1, s2 in zip(seq1, seq2) if s1 != s2)
def correct_snps(snps):
print('correcting!')
freq = {}
for snp in snps:
if tuple(snp) not in freq:
freq[tuple(snp)] = 1
else:
freq[tuple(snp)] += 1
corrected_snps = []
for key, value in freq.items():
if value > 26:
corrected_snps.append(list(key))
return corrected_snps
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
def is_noncontig_substr(needle, haystack, ref_index):
i = 0
l = len(needle)
inserted = ""
first_index = None
for j in range(len(haystack)):
if i < l and haystack[j] == needle[i]:
i += 1
else:
if first_index == None:
first_index = ref_index + j
inserted += haystack[j]
if i == l:
return [inserted, first_index]
return None
def find_snps(input_reads, reference, indices, dkmers):
print('finding snps!')
snps = {}
corrected_snps= []
for read in input_reads:
kmers = dkmers[read]
for kmer in kmers:
if kmer in indices:
index = indices[kmer]
j = read.find(kmer)
for ind in index:
if(ind-j < 0 or ind-j+len(read) > len(reference)):
continue
difference = calc_diff(reference[ind-j:ind-j+len(read)], read)
if difference < 4:
for i in range(len(read)):
if read[i] != reference[ind+i-j]:
if (reference[ind+i-j], read[i], ind+i-j) not in snps:
snps[(reference[ind+i-j], read[i], ind+i-j)] = 1
else:
snps[(reference[ind+i-j], read[i], ind+i-j)] += 1
if snps[(reference[ind+i-j], read[i], ind+i-j)] == 26:
corrected_snps.append([reference[ind+i-j], read[i], ind+i-j])
return corrected_snps
def find_indels(k, input_reads, reference, indices, dkmers):
print('finding indels!')
ins = []
dels = []
for read in input_reads:
kmers = dkmers[read]
if kmers[0] and kmers[2] in indices:
index1 = indices[kmers[0]]
index3 = indices[kmers[2]]
for ind1 in index1:
if ind1+32 not in index3:
for ind3 in index3:
if 2*k < ind3-ind1 < 2*k+4:
deleted_candidate = is_noncontig_substr(kmers[1], reference[ind1+k:ind3], ind1+k)
if deleted_candidate != None and deleted_candidate not in dels:
dels.append(deleted_candidate)
elif 0 < ind3-ind1 < 2*k:
inserted_candidate = is_noncontig_substr(reference[ind1+k:ind3], kmers[1], ind1+k)
if inserted_candidate != None and inserted_candidate not in ins:
ins.append(inserted_candidate)
if kmers[0] and kmers[1] in indices:
index1 = indices[kmers[0]]
index2 = indices[kmers[1]]
for ind1 in index1:
if ind1+16 not in index2:
for ind2 in index2:
if k < ind2-ind1 < k+4:
deleted_candidate = is_noncontig_substr("", reference[ind1+k:ind2], ind1+k)
if deleted_candidate != None and deleted_candidate not in dels:
dels.append(deleted_candidate)
if kmers[1] and kmers[2] in indices:
index2 = indices[kmers[1]]
index3 = indices[kmers[2]]
for ind2 in index2:
if ind2+16 not in index3:
for ind3 in index3:
if k < ind3-ind2 < k+4:
deleted_candidate = is_noncontig_substr("", reference[ind2+k:ind3], ind2+k)
if deleted_candidate != None and deleted_candidate not in dels:
dels.append(deleted_candidate)
return ins, dels
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='basic_hasher.py takes in data for homework assignment 2 consisting '
'of a genome and a set of reads and aligns the reads to the reference genome, '
'then calls SNPS and indels based on this alignment.')
parser.add_argument('-g', '--referenceGenome', required=True, dest='reference_file',
help='File containing a reference genome.')
parser.add_argument('-r', '--reads', required=True, dest='reads_file',
help='File containg sequencing reads.')
parser.add_argument('-o', '--outputFile', required=True, dest='output_file',
help='Output file name.')
parser.add_argument('-t', '--outputHeader', required=True, dest='output_header',
help='String that needs to be outputted on the first line of the output file so that the\n'
'online submission system recognizes which leaderboard this file should be submitted to.\n'
'This HAS to be one of the following:\n'
'1) practice_W_3_chr_1 for 10K length genome practice data\n'
'2) practice_E_1_chr_1 for 1 million length genome practice data\n'
'3) hw2undergrad_E_2_chr_1 for project 2 undergrad for-credit data\n'
'4) hw2grad_M_1_chr_1 for project 2 grad for-credit data\n')
args = parser.parse_args()
reference_fn = args.reference_file
reads_fn = args.reads_file
input_reads = parse_reads_file(reads_fn)
if input_reads is None:
sys.exit(1)
reference = parse_ref_file(reference_fn)
if reference is None:
sys.exit(1)
"""
TODO: Call functions to do the actual read alignment here
"""
reads = []
for pairs in input_reads:
for read in pairs:
reads.append(read)
dkmers = kmers_dict(10, reads)
indices = index_genome(10, reference)
snps = find_snps(reads, reference, indices, dkmers)
dkmers = kmers_dict(16, reads)
indices = index_genome(16, reference)
indels = find_indels(16, reads, reference, indices, dkmers)
insertions = indels[0]
deletions = indels[1]
output_fn = args.output_file
zip_fn = output_fn + '.zip'
with open(output_fn, 'w') as output_file:
output_file.write('>' + args.output_header + '\n>SNP\n')
for x in snps:
output_file.write(','.join([str(u) for u in x]) + '\n')
output_file.write('>INS\n')
for x in insertions:
output_file.write(','.join([str(u) for u in x]) + '\n')
output_file.write('>DEL\n')
for x in deletions:
output_file.write(','.join([str(u) for u in x]) + '\n')
with zipfile.ZipFile(zip_fn, 'w') as myzip:
myzip.write(output_fn)
print("--- %s seconds ---" % (time.time() - start_time))
| true
|
f0381972dec40230f1513243abb75f63ee126110
|
Python
|
shhuan/algorithms
|
/py/google/cj2015/round1C/__init__.py
|
UTF-8
| 825
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
created by huash06 at 2015-05-26 10:51
"""
__author__ = 'huash06'
import datetime
import sys
sys.stdin = open('input/-small-practice.in', 'r')
sys.stdout = open('output/A-small-practice.out', 'w')
# sys.stdin = open('input/A-large-practice.in', 'r')
# sys.stdout = open('output/A-large-practice.out', 'w')
class Solution:
def __init__(self):
pass
def test(self):
pass
def readInput(self):
pass
def readMockInput(self):
pass
def solve(self):
pass
startTime = datetime.datetime.now()
T = int(input())
for ti in range(1, T + 1):
solution = Solution()
solution.readInput()
res = solution.solve()
print('Case #{}: {}'.format(ti, res))
sys.stderr.write('Time Cost:{}\n'.format(datetime.datetime.now() - startTime))
| true
|
05a6badccfab16946579f40cf3e8c968f33d8b3e
|
Python
|
Hironobu-Kawaguchi/atcoder
|
/Codeforces/Codeforces1466_e_TLE.py
|
UTF-8
| 1,227
| 2.875
| 3
|
[] |
no_license
|
# https://codeforces.com/contest/1466/problem/E
MOD = 10**9+7
P = 60 # 2**60まで
def main():
n = int(input())
x = list(map(int, input().split()))
cnt = [0]*P # 2進数で合算 sum(f(x,c))
for i in range(n):
for j in range(P):
cnt[j] += (x[i] >> j) & 1
ans = 0
for i in range(n):
exp_or, exp_and = 0, 0
for j in range(P):
if (x[i] >> j) & 1: # f(xj,c)==1
exp_or += (1 << j) % MOD * n
exp_and += (1 << j) % MOD * cnt[j]
else: # f(xj,c)==0
exp_or += (1 << j) % MOD * cnt[j]
exp_or %= MOD
exp_and %= MOD
ans += (exp_and * exp_or) % MOD
ans %= MOD
print(ans)
return
t = int(input())
for i in range(t):
main()
# TLE 解法
# MOD = 10**9+7
# def main():
# n = int(input())
# x = list(map(int, input().split()))
# ans = 0
# for i in range(n):
# for j in range(n):
# for k in range(n):
# ans += (x[i]&x[j]) * (x[j]|x[k])
# ans %= MOD
# print(ans)
# return
# t = int(input())
# for i in range(t):
# main()
| true
|
39554e170380e34831a4204e87d108ba009c33d0
|
Python
|
havenshi/leetcode
|
/141. Linked List Cycle.py
|
UTF-8
| 863
| 3.515625
| 4
|
[] |
no_license
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head or not head.next: # 0 or 1 item
return False
slow = head
fast = head
# >2 item, omit the first time that slow == fast
slow = slow.next
fast = fast.next.next
while fast and fast.next: # do not judge slow, since fast ahead of slow is true
slow = slow.next # slow cursor move one step
fast = fast.next.next # fast cursor move two steps
if fast == slow:
return True # fast cross circle twice and reaches the slow
return False # fast reaches the end
| true
|
26e4122eafb87981861cc958e74b03b43f1a40ca
|
Python
|
kaurrachneet6/ShowAndTell-neural-captioning
|
/Flickr/NIC_Test_5GRU.py
|
UTF-8
| 22,137
| 2.5625
| 3
|
[] |
no_license
|
'''
CS 598 Final Project: Show and Tell
Neural Image Captioning
Team Members: Avinash, Ankit Kumar, Daksh Saraf, Rachneet Kaur
Script to test the Encoder and Decoder model for Flickr Dataset
'''
import torch
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
from torch.autograd import Variable
import h5py
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
import random
from collections import Counter
import nltk
import pickle
from PIL import Image
import torch.utils.data as data
import os
import pandas as pd
from datetime import datetime
import time
from nltk.translate.bleu_score import sentence_bleu
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.bleu_score import SmoothingFunction
#Defining the dictionary for the needed paths and parameters
parameters = {'batch_size':32,
#Larger Batch sizes give out of memory errors on Blue Waters, hence choose 32
'shuffle':True,
'num_workers':32,
'data_dir':'/u/training/tra402/flickr30k_images',
'output_dir': '/u/training/tra402/scratch/Project/Flickr30_5GRU/Output',
'train_ann_path': '/u/training/tra402/train_annotations.tsv',
'test_ann_path': '/u/training/tra402/test_annotations.tsv',
'val_ann_path':'/u/training/tra402/val_annotations.tsv',
'vocab_path':'vocab.pkl',
'train_img_dir':'/u/training/tra402/flickr30k_images',
'test_img_dir': '/u/training/tra402/flickr30k_images',
'vocab_threshold':5}
#Class to build the vocabulary and assign start and end token whenever necessary
class DatasetVocabulary(object):
def __init__(self):
self.word_to_index = {}
self.index_to_word = {}
self.index = 0
def adding_new_word(self, word):
#Adding a new word to the vocabulary, if it already doesn't exist
if not word in self.word_to_index:
self.word_to_index[word] = self.index
self.index_to_word[self.index] = word
self.index += 1
def __call__(self, word):
if not word in self.word_to_index:
#If word does not exist in vocabulary, then return unknown token
return self.word_to_index['<unk>']
return self.word_to_index[word]
def __len__(self):
#Returns the length of the vocabulary
return len(self.word_to_index)
def start_token(self):
#Returns the start token
return '<start>'
def end_token(self):
#Returns the end token
return '<end>'
#Function for creating the vocabulary for the MSCOCO dataset
def creating_vocabulary(json_file):
annotations = pd.read_table(json , sep='\t', header=None, names=['image', 'caption'])
for i in range(annotations.shape[0]):
caption = str(annotations['caption'][i])
#Converting all the words to lower case and tokenizing them
tokens = nltk.tokenize.word_tokenize(caption.lower())
Counter().update(tokens)
for index, word_id in enumerate(vocab_word_ids):
#Converting all the words to lower case and tokenizing them
captions_tokens = nltk.tokenize.word_tokenize(str(coco_json.anns[word_id]['caption']).lower())
Counter().update(captions_tokens)
#We only consider the words which appear more than a particular threshold
vocabulary_words = []
for vocab_word, vocab_word_count in Counter().items():
if vocab_word_count >=parameter_dict['vocab_threshold']:
vocabulary_words.append(vocab_word)
vocabulary_dataset = DatasetVocabulary()
vocabulary_dataset.adding_new_word('<pad>')
vocabulary_dataset.adding_new_word('<start>')
vocabulary_dataset.adding_new_word('<end>')
vocabulary_dataset.adding_new_word('<unk>')
for index, vocab_word in enumerate(vocabulary_words):
vocabulary_dataset.adding_new_word(vocab_word)
return vocabulary_dataset
vocab_path = os.path.join(parameters['output_dir'], parameters['vocab_path'])
#Loading the vocabulary from the vocabulary file
def get_vocab(vocab_path):
if(os.path.isfile(vocab_path)):
#If the file is already craeted and exists
with open(vocab_path, 'rb') as f:
vocab = pickle.load(f)
print('vocab loaded from pickle file')
else:
#Else creating the vocabulary file
vocab = creating_vocabulary(json=os.path.join(parameters['train_ann_path']), threshold=parameters['vocab_threshold'])
with open(vocab_path, 'wb') as f:
pickle.dump(vocab, f)
return vocab
class FlickrDataset(data.Dataset):
def __init__(self, data_path, ann_path, vocab, transform=None):
self.data_path = data_path
self.annotation_path = ann_path
self.vocab = vocab
self.transform = transform
self.annotations = pd.read_table(self.annotation_path , sep='\t', header=None, names=['image', 'caption'])
self.annotations['image_num'] = self.annotations['image'].map(lambda x: x.split('#')[1])
self.annotations['image'] = self.annotations['image'].map(lambda x: x.split('#')[0])
def __getitem__(self, index):
annotations = self.annotations
vocab = self.vocab
caption = annotations['caption'][index]
img_id = annotations['image'][index]
image = Image.open(os.path.join(self.data_path, img_id)).convert('RGB')
if self.transform is not None:
image = self.transform(image)
tokens = nltk.tokenize.word_tokenize(str(caption).lower())
caption = []
caption.append(vocab('<start>'))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab('<end>'))
target = torch.Tensor(caption)
return img_id, image, target
def __len__(self):
return self.annotations.shape[0]
def create_batch(data):
'''
Function to create batches from images and it's corresponding real captions
'''
#Sorting
data.sort(key=lambda x: len(x[2]), reverse=True)
#Retrieving the images and their corresponding captions
dataset_images, dataset_captions = zip(*data)
#Stacking the images together
dataset_images = torch.stack(dataset_images, 0)
#Writing the lengths of the image captions to a list
caption_lengths = []
for caption in dataset_captions:
caption_lengths.append(len(caption))
target_captions = torch.zeros(len(dataset_captions), max(caption_lengths)).long()
for index, image_caption in enumerate(dataset_captions):
caption_end = caption_lengths[index]
#Computing the length of the particular caption for the index
target_captions[index, :caption_end] = image_caption[:caption_end]
#Returns the images, captions and lengths of captions according to the batches
return dataset_images, target_captions, caption_lengths
def get_data_transforms():
'''
Function to apply data Transformations on the dataset
'''
data_trans = transforms.Compose([transforms.Resize((224, 224)),
#Resize to 224 because we are using pretrained Imagenet
transforms.RandomHorizontalFlip(),
#Random horizontal flipping of images
transforms.RandomVerticalFlip(),
#Random vertical flipping of images
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
#Normalizing the images
#Returning the transformed images
return data_trans
def get_data_loader(annotations_path, data_path, vocab, data_transforms, parameters):
''' Function to load the required dataset in batches '''
dataset = FlickrDataset(data_path, annotations_path, vocab, data_transforms)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=parameters['batch_size'],
shuffle=parameters['shuffle'],
num_workers=parameters['num_workers'],
collate_fn=collate_fn)
return data_loader
class Resnet(nn.Module):
''' Class for defining the CNN architecture implemetation'''
def __init__(self):
super(Resnet, self).__init__()
self.resultant_features = 80
#Loading the pretrained Resnet model on ImageNet dataset
#We tried Resnet50/101/152 as architectures
resnet_model = models.resnet101(pretrained=True)
self.model = nn.Sequential(*list(resnet_model.children())[:-1])
#Training only the last 2 layers for the Resnet model i.e. linear and batchnorm layer
self.linear_secondlast_layer = nn.Linear(resnet_model.fc.in_features, self.resultant_features)
#Last layer is the 1D batch norm layer
self.last_layer = nn.BatchNorm1d(self.resultant_features, momentum=0.01)
#Initializing the weights using normal distribution
self.linear_secondlast_layer.weight.data.normal_(0,0.05)
self.linear_secondlast_layer.bias.data.fill_(0)
def forward(self, input_x):
''' Defining the forward pass of the CNN architecture model'''
input_x = self.model(input_x)
#Converting to a pytorch variable
input_x = Variable(input_x.data)
#Flattening the output of the CNN model
input_x = input_x.view(input_x.size(0), -1)
#Applying the linear layer
input_x = self.linear_secondlast_layer(input_x)
return input_x
def create_caption_word_format(tokenized_version, dataset_vocabulary):
''' Function to convert the tokenized version of sentence
to a sentence with words from the vocabulary'''
#Defining the start token
start_word = [dataset_vocabulary.word_to_index[word] for word in [dataset_vocabulary.start_token()]]
#Defining the end token
end_word = lambda index: dataset_vocabulary.index_to_word[index] != dataset_vocabulary.end_token()
#Creating the sentence in list format from the tokenized version of the result
caption_word_format_list = []
for index in takewhile(end_word, tokenized_version)
if index not in start_word:
caption_word_format_list.append(dataset_vocabulary.index_to_word[index])
#Returns the sentence with words from the vocabulary
return ' '.join(caption_word_format_list)
class RNN(torch.nn.Module):
''' Class to define the RNN implementation '''
def __init__(self, embedding_length, hidden_units, vocabulary_size, layer_count):
super(RNN, self).__init__()
#Defining the word embeddings based on the embedding length = 512 and vocabulary size
self.embeddings = nn.Embedding(vocabulary_size, embedding_length)
#Defining the hidden unit to be LSTM unit or GRU unit with hidden_units no. of units
self.unit = nn.GRU(embedding_length, hidden_units, layer_count, batch_first=True)
#Defining the last linear layer converting to the vocabulary_size
self.linear = nn.Linear(hidden_units, vocabulary_size)
def forward(self, CNN_feature, image_caption, caption_size):
''' Defining the forward pass of the RNN architecture model'''
#Creating the embeddings for the image captions
caption_embedding = self.embeddings(image_caption)
torch_raw_embeddings = torch.cat((CNN_feature.unsqueeze(1), caption_embedding), 1)
torch_packed_embeddings = nn.utils.rnn.pack_padded_sequence(torch_raw_embeddings, caption_size, batch_first=True)
torch_packed_embeddings_unit= self.unit(torch_packed_embeddings)[0]
tokenized_predicted_sentence = self.linear(torch_packed_embeddings_unit[0])
#Return the predicted sentence in the tokenized version which need to be converted to words
return tokenized_predicted_sentence
def sentence_index(self, CNN_feature):
#Defining the maximum caption length
caption_max_size = 25
#Defining the RNN hidden state to be None in the beginning
RNN_hidden_state = None
#Defining the input for the RNN based on the CNN features
RNN_data = CNN_feature.unsqueeze(1)
#To return the predicted sentence tokenized version
predicted_sentence_index = []
for i in range(caption_max_size):
#Predicting each next hidden state and word based on the RNN model
next_state, RNN_hidden_state = self.unit(RNN_data, RNN_hidden_state)
#Linear layer
result_state = self.linear(next_state.squeeze(1))
#Predicted word based on the model
predicted_tokenized_word = result_state.max(1)[1]
#Appending the index for the word
predicted_sentence_index.append(predicted_tokenized_word)
#Applying the embeddings to the predicted word in tokenized version
RNN_data = self.embeddings(predicted_tokenized_word)
RNN_data = RNN_data.unsqueeze(1)
#Stacking all the predicted tokenized words
predicted_sentence_index = torch.stack(predicted_sentence_index, 1).squeeze()
#Returning the tokenized version of the predicted sentence
return predicted_sentence_index
def create_checkpoint(CNN_encoder, RNN_decoder, optimizer, epoch, step, losses_train, parameter_dict):
''' Function to create a checkpoint for the trained models and their corresponding
evaluated metrics '''
#Saving the .ckpt model file
model_file = 'model_'+str(epoch+1)+'.ckpt'
#Saving the .ckpt file for the metrics of the trained model
metrics_file = 'model_'+str(epoch+1)+'_metrics.ckpt'
#Saving the dictionary corresponding to the trained model inorder to retrain again
torch.save({'encoder_state_dict': CNN_encoder.state_dict(),
'decoder_state_dict': RNN_decoder.state_dict(),
'optimizer_state_dict':optimizer.state_dict(),
'epoch':epoch,
'step':step},
os.path.join(parameter_dict['output_dir'], model_file))
#Saving the loss files in an output directory to analyse for hyperparameter exploration
torch.save({'losses_train': losses_train},
os.path.join(parameter_dict['output_dir'], metrics_file))
#Defining the dictionary for the performing hyperparameter exploration on the parameters
training_parameters = {'embedding_length':512,
#Selecting the embedding length
'num_hiddens':512,
#Setting the number of hidden units in hidden layers
'learning_rate':1e-3,
#Setting the initial learning rate
'num_epochs':100,
#Running the model for num_epochs
'num_layers':5}
#Defining the number of layers for the RNN architecture
#Defining the vocabulary
vocabulary = get_vocab(vocabulary_path)
#Defining the CNN architecture model
CNN_encoder = Resnet(training_parameters['embedding_length'])
#Defining the RNN architecture model
RNN_decoder = RNN(training_parameters['embedding_length'],
training_parameters['num_hiddens'],
len(vocabulary),
training_parameters['num_layers'])
loss_function = nn.CrossEntropyLoss()
params = list(decoder.parameters()) + list(encoder.linear.parameters()) + list(encoder.batchnorm.parameters())
optimizer = torch.optim.SGD(params, lr = 0.01, momentum = 0.9)
#Defining the loss function as cross entropy
loss_function = nn.CrossEntropyLoss()
#Collecting the CNN_encoder and RNN_decoder parameters together
collected_params = list(RNN_decoder.parameters()) + list(CNN_encoder.linear.parameters()) + list(CNN_encoder.batchnorm.parameters())
#Defining the optimizer (ADAM/SGD with momentum)
optimizer = torch.optim.SGD(collected_params, lr = 0.01, momentum = 0.9)
#Transfering the models to the Blue Waters
CNN_encoder.cuda()
RNN_decoder.cuda()
max_bleu_greedy = np.array([0.0, 0.0, 0.0])
min_bleu_greedy = np.array([1.0, 1.0, 1.0])
max_bleu_beam = np.array([0.0, 0.0, 0.0])
min_bleu_beam = np.array([1.0, 1.0, 1.0])
max_img = np.array([None, None, None])
min_img = np.array([None, None, None])
max_captions_pred_greedy = np.array([None, None, None])
min_captions_pred_greedy = np.array([None, None, None])
max_captions_target_greedy = np.array([None, None, None])
min_captions_target_greedy = np.array([None, None, None])
max_captions_pred_beam = np.array([None, None, None])
min_captions_pred_beam = np.array([None, None, None])
max_captions_target_beam = np.array([None, None, None])
min_captions_target_beam = np.array([None, None, None])
#Function to test the models
def test_model(test_data_loader, parameter_dict)
#To evaluate the models, loading the pretrained models, loading the CNN, RNN and optimizer states
state_dict = torch.load(os.path.join(parameter_dict['output_dir'], 'model_24.ckpt'))
#Loading the model from one of the last few epochs
CNN_encoder.load_state_dict(state_dict['encoder_state_dict'])
RNN_decoder.load_state_dict(state_dict['decoder_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
#Setting the models in the testing mode
CNN_encoder.eval()
RNN_decoder.eval()
#print(epoch)
#Defining the list of the testing losses over steps
test_loss_list = []
#Defining the list of BLEU 1 and BLEU 4 scores calculates using the nltk library
BLEU1 = []
BLEU4 = []
for index, (dataset_image, image_caption, caption_length) in enumerate(test_data_loader, start = 0):
#print(index)
#Converting the image and the corresponding caption to Pytorch Variables
#and sending to Blue Waters
dataset_image = Variable(dataset_image).cuda()
image_caption = Variable(image_caption).cuda()
#print("Data done")
target_image_caption = nn.utils.rnn.pack_padded_sequence(image_caption, caption_length, batch_first=True)[0]
#Forward pass of the necoder model to retrieve the CNN features
CNN_feature = CNN_encoder(dataset_image)
#print("Encoded")
#Forward pass of the RNN_decoder model to retrieve the tokenized sentence
RNN_tokenized_sentence = RNN_decoder(CNN_feature, image_caption, caption_length)
#print("Decoded")
loss_value = loss_function(RNN_tokenized_sentence, target_image_caption)
#Appending the training loss to the list
test_loss_list.append(loss_value.data[0])
#print(captions)
#print("captions done")
#Tokenized version of the predicted sentencxe
RNN_tokenized_sentence_prediction = RNN_decoder.sentence_index(CNN_feature)
RNN_tokenized_sentence_prediction = RNN_tokenized_sentence_prediction.cpu().data.numpy()
predicted_words = create_caption_word_format(RNN_tokenized_sentence_prediction,vocabulary, False)
#print(predicted_words)
#Tiokenized version of the original caption
original_sentence_tokenized = image_caption.cpu().data.numpy()
original_sentence = create_caption_word_format(original_sentence_tokenized,vocabulary, True)
#print("Target wordss")
#print(target_words)
#Defining the BLEU 1 and BLEU 4 scores based on the nltk library
sf = SmoothingFunction()
bleu4 = corpus_bleu(target_words, predicted_words, weights=(0.25, 0.25, 0.25, 0.25),smoothing_function = sf.method4)
bleu1 = corpus_bleu(target_words, predicted_words, weights=(1, 0, 0, 0),smoothing_function = sf.method4)
BLEU4.append(bleu4)
BLEU1.append(bleu1)
#print(bleu4)
#print(bleu1)
if((bleu4 > max_bleu_greedy).astype(int).sum() > 0):
idx = max_bleu_greedy.argmin()
max_bleu_greedy[idx] = bleu4
max_img[idx] = images.cpu()
max_captions_pred_greedy[idx] = predicted_words
max_captions_target_greedy[idx] = target_words
if((bleu4 < min_bleu_greedy).astype(int).sum() > 0):
idx = min_bleu_greedy.argmax()
min_bleu_greedy[idx] = bleu4
min_img[idx] = images.cpu()
min_captions_pred_greedy[idx] = predicted_words
min_captions_target_greedy[idx] = target_words
print('[Epoch: %d, %d] Loss: %0.2f BLEU1: %0.2f BLEU4: %0.2f'% (epoch+1, step, test_loss, np.mean(BLEU1)*100,np.mean(BLEU4)*100))
torch.save(stored, "captions_"+str(epoch+1)+".ckpt")
epoch_test_loss.append(np.mean(losses_test))
torch.save({'step_wise_test_loss': losses_test,
'epoch':epoch,
'epoch_wise_test_loss': epoch_test_loss,
'BLEU1': BLEU1,
'BLEU4': BLEU4},
'validation_loss_'+str(epoch+1)+'.ckpt')
torch.save(stored, "captions_"+str(epoch+1)+".ckpt")
torch.save({'max_bleu_greedy': max_bleu_greedy,
'min_bleu_greedy': min_bleu_greedy,
'max_img': max_img,
'min_img': min_img,
'max_captions_pred_greedy': max_captions_pred_greedy,
'min_captions_pred_greedy': min_captions_pred_greedy,
'max_captions_target_greedy': max_captions_target_greedy,
'min_captions_target_greedy': min_captions_target_greedy},
os.path.join('test_res_'+str(epoch+1)+'.ckpt'))
test_data_loader = get_data_loader(annotations_path=os.path.join(parameters['val_ann_path']),
data_path=os.path.join(parameters['test_img_dir']),
data_transforms=get_data_transforms(),
parameters=parameters,
vocab=vocab)
test_model(parameters=parameters, test_data_loader=test_data_loader)
| true
|
24fbd248f7dc79de1c55b51b69f7202af23dfac7
|
Python
|
oljikeboost/Tracking
|
/data_utils.py
|
UTF-8
| 13,676
| 2.53125
| 3
|
[] |
no_license
|
import os
import json
import glob
import shutil
import numpy as np
from sklearn.cluster import KMeans
import cv2
from collections import Counter
from tqdm import tqdm
def get_color(lbl):
if lbl==0:
return (0,0,255)
elif lbl==1:
return (0,255,0)
else:
return None
def post_process_cls(all_cls, all_tracks):
### First, we need to get the set of all the tracks
### After which, to find its corrsponding classes
### And transform/interpolate the classes list
from collections import Counter
id_to_cls_list = {}
for en, (cls, track_id) in enumerate(zip(all_cls, all_tracks)):
if track_id in id_to_cls_list:
id_to_cls_list[track_id].append(cls)
else:
id_to_cls_list[track_id] = [cls]
id_to_cls_val = {}
for track_id, cls_lst in id_to_cls_list.items():
cls_lst = np.array(cls_lst).flatten().tolist()
cnt = Counter(cls_lst)
mst_cmn = cnt.most_common()[0][0]
id_to_cls_val[track_id] = int(mst_cmn)
output = []
for en, track_id in enumerate(all_tracks):
output.append(id_to_cls_val[track_id])
return output, id_to_cls_val
def get_all_team_classes(id_dict, anno_dirs):
print("Clustering all teams in progress...")
### Create global dict which maps global player track to its new global team class
global_id_to_cls_val = {}
all_cls = list(range(0, 2 * len(anno_dirs)))
def chunks(l, n):
n = max(1, n)
return [l[i:i + n] for i in range(0, len(l), n)]
cls_chunks = chunks(all_cls, 2)
for anno_en, anno_dir in enumerate(tqdm(anno_dirs)):
### Process a new game
all_jsons = sorted(glob.glob(anno_dir + '/*.json'))
orig_dir = os.path.join('../../data/playerTrackingFrames', os.path.basename(anno_dir))
### Create the corresponding history of labels and histograms
all_hists = []
all_labels = []
anno_error = 0
box_cnt = 0
for en, single_json in enumerate(all_jsons):
data = json.load(open(single_json))
img_path = os.path.join(orig_dir, os.path.basename(single_json).replace('.json', '.jpg'))
img0 = cv2.imread(img_path)
h, w, _ = img0.shape
for i in range(len(data['shapes'])):
box_cnt += 1
label = data['shapes'][i]['label']
pts = np.array(data['shapes'][i]['points']).astype(int)
if pts[0][1] > pts[1][1] or pts[0][0] > pts[1][0]:
anno_error += 1
continue
player_label = id_dict[os.path.basename(anno_dir)][label]
center_y = int((pts[1][1] + pts[0][1]) / 2)
center_x = int((pts[1][0] + pts[0][0]) / 2)
img_box = img0[max(0, center_y - 30): min(h, center_y + 30),
max(0, center_x - 10): min(w, center_x + 10), :]
img_box = cv2.cvtColor(img_box, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([img_box], [0], None, [24],
[0, 300])
hist = cv2.normalize(hist, hist).flatten()
all_hists.append(hist)
all_labels.append(player_label)
concat_hists = np.concatenate(all_hists)
km = KMeans(n_clusters=2, init="k-means++", max_iter=10000).fit(all_hists)
proc_cls, id_to_cls_val = post_process_cls(km.labels_, all_labels)
print(anno_en, anno_dir, Counter(proc_cls), 100 * (anno_error / box_cnt))
for player_id, color_cls in id_to_cls_val.items():
curr_cls_subset = cls_chunks[anno_en]
global_id_to_cls_val[player_id] = curr_cls_subset[color_cls]
print('Clustering is finished!')
return proc_cls, global_id_to_cls_val
def get_all_team_classes2(id_dict, anno_dirs):
print("Clustering all teams in progress...")
### Create global dict which maps global player track to its new global team class
global_id_to_cls_val = {}
all_cls = list(range(0, 2 * len(anno_dirs)))
def chunks(l, n):
n = max(1, n)
return [l[i:i + n] for i in range(0, len(l), n)]
cls_chunks = chunks(all_cls, 2)
for anno_en, anno_dir in enumerate(tqdm(anno_dirs)):
### Process a new game
all_jsons = sorted(glob.glob(anno_dir + '/*.json'))
orig_dir = os.path.join('../../data/playerTrackingFrames2', os.path.basename(anno_dir))
if not os.path.exists(orig_dir):
orig_dir = os.path.join('../../data/playerTrackingFrames', os.path.basename(anno_dir))
### Create the corresponding history of labels and histograms
all_hists = []
all_labels = []
anno_error = 0
box_cnt = 0
for en, single_json in enumerate(all_jsons):
data = json.load(open(single_json))
img_path = os.path.join(orig_dir, os.path.basename(single_json).replace('.json', '.jpg'))
img0 = cv2.imread(img_path)
h, w, _ = img0.shape
for i in range(len(data['shapes'])):
box_cnt += 1
label = data['shapes'][i]['label']
if '_' in label: continue
pts = np.array(data['shapes'][i]['points']).astype(int)
if pts[0][1] > pts[1][1] or pts[0][0] > pts[1][0]:
anno_error += 1
continue
player_label = id_dict[os.path.basename(anno_dir)][label]
center_y = int((pts[1][1] + pts[0][1]) / 2)
center_x = int((pts[1][0] + pts[0][0]) / 2)
img_box = img0[max(0, center_y - 30): min(h, center_y + 30),
max(0, center_x - 10): min(w, center_x + 10), :]
img_box = cv2.cvtColor(img_box, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([img_box], [0], None, [24],
[0, 300])
hist = cv2.normalize(hist, hist).flatten()
all_hists.append(hist)
all_labels.append(player_label)
concat_hists = np.concatenate(all_hists)
km = KMeans(n_clusters=2, init="k-means++", max_iter=10000).fit(all_hists)
proc_cls, id_to_cls_val = post_process_cls(km.labels_, all_labels)
# print(anno_en, anno_dir, Counter(proc_cls), 100 * (anno_error/box_cnt))
for player_id, color_cls in id_to_cls_val.items():
curr_cls_subset = cls_chunks[anno_en]
global_id_to_cls_val[player_id] = curr_cls_subset[color_cls]
print('Clustering is finished!')
return proc_cls, global_id_to_cls_val
def get_all_classes(anno_dirs):
id_dict = {}
k_class = 1
for anno_dir in anno_dirs:
id_dict[os.path.basename(anno_dir)] = {}
curr_set = set()
all_jsons = sorted(glob.glob(anno_dir + '/*.json'))
for single_json in all_jsons:
data = json.load(open(single_json))
for i in range(len(data['shapes'])):
if '_' not in data['shapes'][i]['label']:
curr_set.add(data['shapes'][i]['label'])
num_classes = len(curr_set)
curr_classes = sorted(list(curr_set))
en = 0
while en < num_classes:
id_dict[os.path.basename(anno_dir)][curr_classes[en]] = k_class
en += 1
k_class += 1
print("The number of class is ", k_class)
print("The number of dirs is ", len(anno_dirs))
return id_dict
def create_mot_third_task(id_dict, id_to_cls_val):
gt_list = []
anno_dirs = glob.glob('../data/third_task/*')
for anno_dir in tqdm(anno_dirs):
all_jsons = sorted(glob.glob(anno_dir + '/*.json'))
### Iterate through all frames of current directory
cls_en = 0
gt_list = []
curr_labels = set()
for en, single_json in enumerate(all_jsons):
data = json.load(open(single_json))
### The following block of code creates the jersey_dict which maps track_id to [jersey_num, ball_possession]
jersey_dict = {}
for i in range(len(data['shapes'])):
label = data['shapes'][i]['label']
if '_' not in label: continue
lbl_split = label.split('_')
if 'j' in label:
_, track_id, jersey_num = lbl_split
if not track_id in jersey_dict:
jersey_dict[str(track_id)] = [jersey_num, 0]
else:
jersey_dict[str(track_id)][0] = jersey_num
elif 'b' in label:
_, track_id = lbl_split
if not track_id in jersey_dict:
jersey_dict[track_id] = [None, 1]
else:
jersey_dict[track_id][1] = 1
for i in range(len(data['shapes'])):
bbox = data['shapes'][i]['points']
label = data['shapes'][i]['label']
if '_' in label: continue
curr_labels.add(label)
if bbox[0][0] > bbox[1][0] or bbox[0][1] > bbox[1][1]:
continue
track_label = id_dict[os.path.basename(anno_dir)][label]
team_lbl = id_to_cls_val[track_label]
jersey_num, ball_poc = jersey_dict.get(label, [None, 0])
anno_line = [en+1, track_label,
int(bbox[0][0]), int(bbox[0][1]),
int(bbox[1][0] - bbox[0][0]), int(bbox[1][1] - bbox[0][1]),
1, 1, team_lbl, ball_poc]
anno_str = ','.join([str(x) for x in anno_line])
gt_list.append(anno_str)
### Create the output GT dir
output_dir = os.path.join('../data/mot_data/images/train/', os.path.basename(anno_dir))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_dir = os.path.join(output_dir, 'gt')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
### Write the detection to the file gt.txt
with open(os.path.join(output_dir, 'gt.txt'), 'w') as f:
for x in gt_list:
f.writelines(x + '\n')
def create_mot_first_second_task(id_dict, id_to_cls_val):
gt_list = []
anno_dirs = glob.glob('../data/raw_data/*')
jersey_dir = '../data/second_task/'
for dr_en, anno_dir in enumerate(tqdm(anno_dirs)):
jersey_anno = os.path.join(jersey_dir, os.path.basename(anno_dir))
all_jsons = sorted(glob.glob(anno_dir + '/*.json'))
### Iterate through all frames of current directory
cls_en = 0
gt_list = []
curr_labels = set()
for en, single_json in enumerate(all_jsons):
data = json.load(open(single_json))
jersey_file = os.path.join(jersey_anno, os.path.basename(single_json).replace('frame_', ''))
if os.path.exists(jersey_file):
jersey_data = json.load(open(jersey_file))
### Map each track for current frame to its existing information, such as Ball Pocession, Jersey Number, Position on Court
jersey_dict = {}
for i in range(len(jersey_data['shapes'])):
bbox = jersey_data['shapes'][i]['points']
label = jersey_data['shapes'][i]['label']
lbl_split = label.split('_')
if 'j' in label:
_, track_id, jersey_num = lbl_split
if not track_id in jersey_dict:
jersey_dict[str(track_id)] = [jersey_num, 0]
else:
jersey_dict[str(track_id)][0] = jersey_num
elif 'b' in label:
_, track_id = lbl_split
if not track_id in jersey_dict:
jersey_dict[track_id] = [None, 1]
else:
jersey_dict[track_id][1] = 1
for i in range(len(data['shapes'])):
bbox = data['shapes'][i]['points']
label = data['shapes'][i]['label']
curr_labels.add(label)
if bbox[0][0] > bbox[1][0] or bbox[0][1] > bbox[1][1]:
continue
track_label = id_dict[os.path.basename(anno_dir)][label]
team_lbl = id_to_cls_val[track_label]
if os.path.exists(jersey_file):
jersey_num, ball_poc = jersey_dict.get(label, [None, 0])
anno_line = [en+1, track_label,
int(bbox[0][0]), int(bbox[0][1]),
int(bbox[1][0] - bbox[0][0]), int(bbox[1][1] - bbox[0][1]),
1, 1, team_lbl, ball_poc]
anno_str = ','.join([str(x) for x in anno_line])
gt_list.append(anno_str)
### Create the output GT dir
output_dir = os.path.join('../data/mot_data/images/train/', os.path.basename(anno_dir))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_dir = os.path.join(output_dir, 'gt')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
### Write the detection to the file gt.txt
with open(os.path.join(output_dir, 'gt.txt'), 'w') as f:
for x in gt_list:
f.writelines(x + '\n')
| true
|
096a1f103afafb9ddb83ac91ee271f9abf58728f
|
Python
|
Kishan-Jasani/Python
|
/02_Object_Oriented_Programming/09_Abstraction.py
|
MacCentralEurope
| 1,536
| 4.6875
| 5
|
[] |
no_license
|
# What is Abstraction?
'''
Abstraction in Python is the process of hiding the real implementation of an application from the user and emphasizing only on usage of it.
For example, consider you have bought a new electronic gadget.
Along with the gadget, you get a user guide, instructing how to use the application, but this user guide has no info regarding the internal working of the gadget.
Another example is, when you use TV remote, you do not know how pressing a key in the remote changes the channel internally on the TV.
You just know that pressing volume key will increase the volume.
'''
# Why Do We Need Abstraction?
# --> Through the process of abstraction in Python, a programmer can hide all the irrelevant data/process of an application in order to reduce complexity and increase efficiency.
#Next, let us see how to achieve abstraction in Python programs with Example-1.
#Example-1
class Mobile:
def __init__(self, brand, price):
print("Inside constructor")
self.brand = brand
self.price = price
def purchase(self):
print("Purchasing a mobile")
print("This mobile has brand", self.brand, "and price", self.price)
print("Mobile-1")
mob1=Mobile("Apple", 20000)
mob1.purchase()
print("Mobile-2")
mob2=Mobile("Samsung",3000)
mob2.purchase()
#When we invoke the purchase() on a mobile object, we dont have to know the details of the method to invoke it.
#This ability to use something without having to know the details of how it is working is called as abstraction.
| true
|
dbaf2674cf1deb535de3f0e39e585be8dc38c997
|
Python
|
samdf96/myrepo
|
/clump_code_1/initialize.py
|
UTF-8
| 12,174
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 28 15:26:55 2018
@author: sfielder
"""
"""
This piece of code is run to data analyze simulation data
Manual Inputs:
- Overwrite protection: boolean
- True or False for if data is overwritten when code runs
- flist argument: string
- This argument points to where the data is stored
- data_check_list: list of strings
- Decides what simulation time checks to use
- config_dir: string
- Points to where the config files are stored
- data_dir: string
- Points to where the data should be saved
Returns:
Tree like directory structure matching different simulation input parameters
All input parameters are stored via directory naming in the Fiducialxx convention
Directory Tree Output Structure:
tree_top / data_dir / config_x / Fiducialxx / Time Stamp / Output Files /
tree_top - this is where the config files are stored for input
data_dir - this is set at the highest level of the tree for output
config_x - this is to seperate different input parameters for the analyzer
Fiducialxx - this is for different simulation data sets
Time Stamp - this is for all the checkpoints in the simulation
Output Files: Generated Currently:
- FITS File
- Contains all the necessary clump information for the simulation
- Projection Plots for axial directions with overlayed center
of mass coordinates for all the clumps found
- Header_info.txt
- Contains information about all the clumps found in each config
directory found by glob.glob
- PDF Plots
- A regular j/j plot
- A colormapped j/j plot with reference to log_10(mass)
Notes:
- Both of these are auto saved within the directory of where
the fits files are located: /time stamp/
- j Comparison Plots by Fiducial run per time step
- These are saved in a created directory in the / Fiducialxx/
level.
Notes:
- SEE OVERWRITE PROTECTION BELOW.
"""
import clump_code_1 as cc
import header_printer as hp
import glob
import yaml
import io
import os
from definitions import jComparisonPlotter
from definitions import FiducialPlotter
# Logging Info Here:
import logging
import logging.config
#INPUTS HERE
#Creates a list of directories with the appropriate files for analysis
# THIS WILL NEED TO BE CHANGED FOR THE NEWER DESIGN SIMULATIONS
flist = glob.glob('/mnt/bigdata/erosolow/Orion2/*F*/data.*.hdf5')
#This is to filter out the timestamps that we want to analyze over
data_check_list = ['0060','0070','0080','0090','0100']
#This is where the config files are
tree_top_dir = '/home/sfielder/Documents/Clumps/'
data_dir = '/home/sfielder/Documents/Clumps/Output/'
#Load CONFIG FILE HERE
logging.config.fileConfig('logging.conf', defaults={'logfilename': data_dir+'output.log'})
# create logger
logger = logging.getLogger('initialize')
logger.info("Glob function has found the following to be sifted: ", flist)
logger.info("Data Check List has been set to: ", data_check_list)
logger.info("tree_top_dir has been set to: ", tree_top_dir)
logger.info("data_dir has been set to: ", data_dir)
"""
Overwrite Protection Here:
- header and timestep_plots have absolute status and will not even
start looking for files if value below is set to FALSE.
- analyzer and fiducial_plots have conditional status, and will look for
for already created directories and will skip those that are found
if the value below is set to FALSE.
- Setting any of the following values to TRUE will overwrite files
even if files and directories are found by the code to exist.
"""
overwrite_analyzer = False
overwrite_header = False
overwrite_timestep_plots = False
overwrite_fiducial_plots = False
logger.info("Overwrite Protection for Analyzer function has been set to: ",
overwrite_analyzer)
logger.info("Overwrite Protection for Header Printer function has been set to: ",
overwrite_header)
logger.info("Overwrite Protection for Timestep Plots function has been set to: ",
overwrite_timestep_plots)
logger.info("Overwrite Protection for Fiducial Plots function has been set to: ",
overwrite_fiducial_plots)
# =============================================================================
# =============================================================================
logger.info("Analysis Section Started.")
#Creating empty list for data sorting
flist_data = []
logger.debug("Filtering data by data_check_list_entry.")
for i in range(0,len(flist)):
main_string = flist[i].split("/")
out_string = main_string[-1].split(".")
time_stamp = out_string[1]
#This checks if timestamp
if any(x in time_stamp for x in data_check_list):
flist_data.append(flist[i])
#Sorting the Data by filename
flist_data.sort()
logger.info("Glob function has found the following to be analyzed: ",
flist_data)
#Make a list of all the yaml files found in data_dir
logger.info("Finding All Config Files.")
flist_config_yaml = glob.glob(tree_top_dir+'*.yaml')
flist_config_yaml.sort()
logger.info("The following files will be inputs to the analyzer: ",
flist_config_yaml)
carry_on_analyzer = False #Initialization Value - this is the default
logger.debug("Setting Initialization value for carry_on to: ",
carry_on_analyzer)
for i in range(0,len(flist_config_yaml)):
logger.debug("Currentl Working on Config File: ", flist_config_yaml[i])
#Grabs the config file name here
naming_string = flist_config_yaml[i].split("/")[-1].split(".")[0]
#Creating String for Directory
save_dir = data_dir + naming_string + '/'
#Checking if Directory Exists
if os.path.isdir(save_dir) == True:
logger.info("Save Directory: ", save_dir, " has been detected as existing.")
if overwrite_analyzer == True:
logger.info("Overwrite for Analyzer has been set to TRUE.")
logger.info("Carry On Value is being set to TRUE.")
carry_on_analyzer = True
else:
logger.info("Overwrite for Analyzer has been set to FALSE.")
logger.info("Carry On Value will remain as FALSE.")
else:
logger.info("Save Directory: ", save_dir, " has been detected as non-existent.")
logger.info("Save Directory: ", save_dir, " is being created.")
os.mkdir(save_dir)
logger.info("Setting Carry On Value to TRUE.")
carry_on_analyzer = True
#If Carry_on_Analyzer has been set to true, then run the analysis.
if carry_on_analyzer == True:
logger.info("Carry On Value has been detected as TRUE.")
logger.info("Proceeding with Analysis.")
#Importing Config File settings here
with io.open(flist_config_yaml[i], 'r') as stream:
data_loaded = yaml.load(stream)
logger.info("Config File has been opened and settings extracted.")
#Call main code here
#Testing first file here
for j in range(0,len(flist_data)):
logger.info("Currently working on file: ", flist_data[j])
logger.info("Invoking Analyzer function (cc).")
cc.Analyzer(flist_data[j],
data_loaded['l'],
data_loaded['cmin'],
data_loaded['step'],
data_loaded['beta'],
data_loaded['clump_sizing'],
save_dir)
else:
logger.info("Carry On Value has been detected as FALSE.")
logger.info("Skipping Analysis for current config file.")
logger.info("Analysis Section Complete.")
# =============================================================================
logger.info("Header Printer Section Started.")
if overwrite_header == True:
logger.info("Overwrite for Header Printer has been set to TRUE.")
# Call Header Printer Script to compute the .txt file needed for
# summary of analysis
# Grab the config_x directories, and puts them in a list
flist_config_dir = glob.glob(data_dir + 'config_*')
#Run Loop over all the config_x directories found that are in the list
for i in range(0,len(flist_config_dir)):
logger.info("Processing Summary File for directory: ", flist_config_dir[i])
logger.info("Invoking HeaderPrinter function from header_printer.py")
hp.HeaderPrinter(flist_config_dir[i])
else:
logger.info("Overwrite for Header Printer has been set to FALSE.")
logger.info("Skipping Header Printer Function.")
logger.info("Header Printer Section Completed.")
# =============================================================================
logger.info("Timestep Plots Section Started.")
if overwrite_timestep_plots == True:
logger.info("Overwrite for Timestep Plots has been set to TRUE.")
# Comparison Plots by specific timestep happens here:
flist_plots = glob.glob(data_dir + '**/*.fits', recursive=True)
flist_plots.sort()
logger.info("Files to loop over: ", flist_plots)
for i in range(0,len(flist_plots)):
current_file = flist_plots[i]
logger.info("Current File being worked on: ", current_file)
logger.info("Invoking jComparisonPlotter function.")
jComparisonPlotter(current_file)
else:
logger.info("Overwrite for Timestep Plots has been set to FALSE.")
logger.info("Skipping Timestep Plot Creation.")
logger.info("Timestep Plots Section Completed.")
# =============================================================================
logger.info("Fiducial Plots Section Started.")
# Comparison Plots over Fiducial Runs (by timestep) happens here:
carry_on_fiducial_plots = False #Initialize value - this is the default
logger.info("Carry On Value for Fiducial Plots is initialized as: ",
carry_on_fiducial_plots)
flist_config = glob.glob(data_dir+'config_*')
flist_config.sort()
logger.info("Output config_x subdirectory list found to be: ", flist_config)
for k in range(0,len(flist_config)): #Adding Loop for config directories
logger.info("Currently working on config: ", flist_config[k])
#Write in os function to create appropiate directory for Fiducial Plots
fid_dir = flist_config[k]+'/Fiducial_Plots/'
if os.path.isdir(fid_dir) == True:
logger.info("Save Directory: ", fid_dir, " has been detected to exist.")
if overwrite_fiducial_plots==True:
logger.info("Overwrite for Fiducial Plots has been set to TRUE.")
logger.info("Carry On Value is being set to TRUE.")
carry_on_fiducial_plots = True
else:
logger.info("Overwrite for Fiducial Plots has been set to FALSE.")
logger.info("Carry On Value will remain as FALSE.")
else:
logger.info("Save Directory: ", fid_dir, " has been detected as non-existent.")
logger.info("Creating Save Directory.")
os.mkdir(fid_dir)
logger.info("Setting Carry On Value to TRUE.")
carry_on_fiducial_plots = True
#If Carry On Value is True then continue with plotting
if carry_on_fiducial_plots == True:
logger.info("Carry On Value has been detected as TRUE.")
logger.info("Proceeding with Analysis.")
logger.info("Looping timesteps inputted by data_check_list.")
for i in range(0,len(data_check_list)):
flist = glob.glob(flist_config[k]+'/**/*'+data_check_list[i]+'*.fits',
recursive=True)
flist.sort()
data_check_list_print = data_check_list[i]
#Calling Main Function Here
logger.info("Current Timestep being worked on: ", data_check_list_print)
logger.info("Invoking FiducialPlotter function.")
FiducialPlotter(flist,fid_dir,data_check_list_print)
logger.info("Fiducial Plots Section Completed.")
# =============================================================================
| true
|
416f09dfbc35a793ba8c567f2d8ccf1ea106b143
|
Python
|
mcruggiero/Code_and_Presentations
|
/python/Taoist_Learner/Tao.py
|
UTF-8
| 9,980
| 3.421875
| 3
|
[] |
no_license
|
# This is a fun little script I wrote to memorize the major system and the Stephen
# Mitchell translation Tao. Please do not use this code without my consent.
#All rights reserved.
# 2019
# michael@mcruggiero.com
import pandas as pd
import numpy as np
import os
os.system('cls' if os.name == 'nt' else 'clear')
Tao = pd.read_csv("Tao3.csv")
Tao["SessionSeen"] = 0
Letter = "A. B. C. D. E. F. G. H. I. J. K. L.".split()
def Choice(a,b):
while True:
try:
print("\n")
choice = int(input("Please choose a number from " +str(a)+" to " +str(b)+ ": "))
if int(a) <= choice <= int(b):
print("\n")
return choice
break
else: print("Stick to the numbers, Sam.")
except: print("Stick to the numbers, Sam.")
def Settings(Size, QTypes, CutOff):
a = 1
while a == 1:
os.system('cls' if os.name == 'nt' else 'clear')
print("\nSetting Panel \n")
print("\nWhat would you like?\n")
print("\t1. Reset Scores\n")
print("\t2. Current Multiple Choice List Size is " + str(Size) + "\n")
print("\t3. Current Question Types is # " +str(QTypes + 1)+ "\n")
print("\t4. Fill In The blank vs Multiple Choice is " + str(int(100 * CutOff)) +"%"+ " correct\n")
print("\t5. View Tao with scores\n")
print("\t6. Back to Main Menu\n")
print("What would you like to change?")
setchoice = Choice(1,6)
if setchoice == 1:
Re = str(input("Are you sure you want to reset your scores? \n Type 'yes' to reset "))
if Re == "yes":
ResetScore(Tao)
input("Scores reset. Hit return to continue")
else: input("OK. scores not reset. Hit return to continue")
elif setchoice == 2:
print("How many choices for the multiple choice lists? ")
Size = Choice(1,12)
elif setchoice == 3:
print("\t1. 'TaoLine' \n")
print("\t2. 'MemoryFileSmall' \n")
print("\t3. 'MemoryFileSmall' & 'TaoLine' \n")
print("\t4. 'TaoLine' & 'MemoryFileSmall' & 'LineNumber' \n")
QTypes = Choice(1,4) - 1
elif setchoice == 4:
print("What is the percent threshold for fill in the blank vs. multiple choice? ")
CutOff = (Choice(0,100)/100)
elif setchoice == 5:
pd.set_option('display.max_rows', len(Tao))
A = Tao[["TaoLine", "MemoryFileSmall", "Points"]]
print(A.set_index('TaoLine'))
input("Hit return to continue")
if setchoice == 6:
a += 1
return Size, QTypes, CutOff
def Main():
Size = 6
quit = 1
QTypes = 2
CutOff = .5
while quit == 1:
os.system('cls' if os.name == 'nt' else 'clear')
print("\nWelcome to TaoLearner v.1 \n")
print("\nWhat would you like?\n")
print("\t1. Lookup Chapter\n")
print("\t2. Learn unshuffled lines in an interval\n")
print("\t3. Learn shuffled lines in an interval\n")
print("\t4. Learn an entire chapter\n")
print("\t5. Settngs\n")
print("\t6. Quit\n")
choice = Choice(1,6)
if choice == 1:
print("\nWhat Chapter? ")
N = Choice(1,81)
LookupChapter(Tao, N)
elif choice ==2:
print("\nWhat is the lowest line?")
Lower = Choice(0,len(Tao)-1)
print("\nWhat is the highest line?")
Upper = Choice(0,len(Tao)-1)
Sequence(Tao, Lower, Upper, CutOff, QTypes, Size)
elif choice == 3:
print("\nWhat is the lowest line?")
Lower = Choice(0,len(Tao)-1)
print("\nWhat is the highest line?")
Upper = Choice(0,len(Tao)-1)
Random(Tao, Lower, Upper, CutOff, QTypes, Size)
elif choice == 4:
print("\nWhat Chapter? ")
N = Choice(1,81)
ReviewChapter(Tao,N,CutOff, QTypes, Size)
elif choice == 5:
SettingSet = Settings(Size, QTypes, CutOff)
Size, QTypes, CutOff = SettingSet[0], SettingSet[1], SettingSet[2]
elif choice == 6:
quit = 0
def ListBuilder(List,N,Size):
while True: #This while is something of a compromise to handle repeating values
if len(List) < Size: Size = len(List)
if Size > 12: Size = 12
answer_list = []
while len(answer_list) < Size:
A = np.random.randint(len(List))
if A not in answer_list: answer_list.append(A)
if ((N%100,N%3) not in [(x%100,x%3) for x in answer_list]) or (N not in answer_list): #Checks to see if answer in list
real_choice = np.random.randint(0,Size)
answer_list[real_choice] = N
else: real_choice = answer_list.index(N)
#Build list of choices
complete_list = []
for i in range(len(answer_list)):
a = int(answer_list[i])
complete_list.append(List.iat[a,2])
if len(complete_list) == len(set(complete_list)):
complete_list = []
for i in range(len(answer_list)):
a = int(answer_list[i])
complete_list.append([Letter[i],
List.iat[a,0],
List.iat[a,2],
List.iat[a,4]])
return complete_list, complete_list[int(real_choice)][0]
break
def Tester(List, N, Size, Show, Score, QType):
Answer = ListBuilder(List, N, Size)
complete_list, real_choice = Answer[0], Answer[1]
Words = QType
if QType != "LineNumber": A = str(List.iat[int(N),4])
else: A = ""
if QType == "MemoryFileSmall": QType = int(2)
elif QType == "TaoLine": QType = int(0)
elif QType == "LineNumber": QType = int(4)
if Show == 1:
print("\n")
for i in range(len(complete_list)):
print(complete_list[i][0],complete_list[i][1+int(QType/2)])
Choice = input("\nType " + str(Words) + " # " + str(List.iat[int(N),9]) + " from Tao: " + A + " (q to quit): ")
if Choice == List.iat[N,QType] or \
((Show == 1) and (Choice == real_choice[0])):
Score += 2
print("\nPoints! \nTotal Score: " + str(Score) + "\n")
List.iat[N,6] += .5
elif Choice == "q":
quit()
else:
Score -= 3
print("\nWrong! The answer was: " + str(List.iat[N,QType]) + " \nTotal Score: " + str(Score) + "\n")
List.iat[N,6] -= .75
return Score
def Mul_Choice(List, N, Size, Show, Score, QTypes):
# N: real answer
# Size: size of choice list, max 12 or len(List)
# Show: printer
#Test if line number is new
if List.iat[N,5] == 0:
print("\nMemorize line # " + str(N) +" from "+ str(List.iat[N,4]) +":\n"+ str(List.iat[N,0]) +",\nFile: "+ str(List.iat[N,2]) + "\n")
List.iat[N,5] += 1
input("Press return continue")
else:
List.iat[N,5] += .1*QTypes + .33
List.iat[N,7] += 1
if QTypes == 0: Score = Tester(List, N, Size, Show, Score, "TaoLine")
elif QTypes == 1: Score = Tester(List, N, Size, Show, Score, "MemoryFileSmall")
elif QTypes == 2:
Score = Tester(List, N, Size, Show, Score, "MemoryFileSmall")
Score = Tester(List, N, Size, Show, Score, "TaoLine")
elif QTypes == 3:
Score = Tester(List, N, Size, Show, Score, "TaoLine")
Score = Tester(List, N, Size, Show, Score, "MemoryFileSmall")
Score = Tester(List, N, Size, Show, Score, "LineNumber")
if len(List) != len(Tao):
List = List[List["TaoLine"] == List.iat[N,0]]
Tao[Tao["TaoLine"] == List.iat[0,0]] = List
return Score
#User Callable
def LookupChapter(List, N):
#Prints out Chapters from Tao
List = List[List["Chapter"] == N]
print(List[["MemoryFileSmall", "TaoLine"]])
input("\nPress return when done")
def ResetScore(List):
List["TimesSeen"] = 0.0
List["SessionSeen"] = 0.0
List["Points"] = 0.0
Tao.to_csv("Tao3.csv", index=False)
def Random(List, Lower, Upper, CutOff, QTypes, Size):
#Limit: List stop value
#CutOff: % View
#Random: %Chance to see anyway
Score, i, Pic = 0, 0, []
while i < len(range(Lower,Upper + 1)):
a = np.random.randint(Lower,Upper + 1)
if a not in Pic:
Pic.append(a)
i += 1
for N in Pic:
if List.iat[N,6]/(List.iat[N,5] + 1) < CutOff:
A = Score
Score = Mul_Choice(List, N, Size, 1, Score, QTypes)
if Score - A < 2: Pic.append(N)
else:
A = Score
Score = Mul_Choice(List, N, Size, 0, Score, QTypes)
if Score - A < 2: Pic.append(N)
Tao.to_csv("Tao3.csv", index=False)
pd.set_option('display.max_rows', len(Tao))
A = Tao[Tao["SessionSeen"] > 0]
print(A[["TaoLine","TimesSeen","Points"]])
input("Press return")
return Score
def Sequence(List, Lower, Upper, CutOff, QTypes, Size):
#Limit: List stop value
#CutOff: % View
#Random: %Chance to see anyway
Score, Pic = 0, list(range(Lower,Upper + 1))
for N in Pic:
if List.iat[N,6]/(List.iat[N,5] + 1) < CutOff:
A = Score
Score = Mul_Choice(List, N, Size, 1, Score, QTypes)
if Score - A < 2: Pic.append(N)
else:
A = Score
Score = Mul_Choice(List, N, Size, 0, Score, QTypes)
if Score - A < 2: Pic.append(N)
Tao.to_csv("Tao3.csv", index=False)
pd.set_option('display.max_rows', len(Tao))
A = Tao[Tao["SessionSeen"] > 0]
print(A[["TaoLine","TimesSeen","Points"]])
input("\nPress return when done")
return Score
def ReviewChapter(List,N,CutOff, QTypes, Size):
List = List[List["Chapter"] == N]
return Sequence(List, 0, len(List) - 1, CutOff, QTypes, Size)
Main()
| true
|
ff4ac967dc7cc13e23ba1519ba0326789979d680
|
Python
|
s781825175/learnpython
|
/ExclusiveTimeofFunctions.py
|
UTF-8
| 754
| 3.0625
| 3
|
[] |
no_license
|
class Solution(object):
def exclusiveTime(self, n, logs):
"""
:type n: int
:type logs: List[str]
:rtype: List[int]
"""
a,b,c=[],[],[]
for i in logs:
n,typ,end=i.split(':')
n,end=int(n),int(end)
if typ == 'start':
a.append(end)
if typ == 'end':
b.append(end)
b=b[::-1]
for i in range(len(a)):
c.append(b[i]-a[i]+1)
for i in range(len(c)):
if i+1==len(c):
break
c[i]=c[i]-sum(c[i+1:])
return c
if __name__ == '__main__':
n=2
logs =["0:start:0","1:start:2","1:end:5","0:end:6"]
print(Solution().exclusiveTime(n,logs))
| true
|
1c9c5132460dc8d05ef3f6932ee0ed78e6692872
|
Python
|
Junebuggi/SYSC3010_TeamW4
|
/RoughCode/RPIsender_of_RoomSensorInfo/RoomRPI_Class.py
|
UTF-8
| 11,827
| 2.78125
| 3
|
[] |
no_license
|
#Author: Abeer Rafiq
#Modified: 11/24/2019 9:08 am
#Importing Packages
import socket, sys, time, json, serial, Adafruit_DHT
import RPi.GPIO as GPIO
from datetime import datetime, date
#Creating a room rpi class
class RoomRPI:
#The constructor
def __init__(self, port, server_ip_addrs):
#Setting port
self.__port = int(port)
#Setting room ID
self.__roomID = 1
#Setting socket to receive
self.__soc_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
recv_address = ('', self.__port)
self.__soc_recv.bind(recv_address)
self.__soc_recv.settimeout(2)
#Setting socket/addresses to send to the global rpi
self.__soc_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.__server_addrs = (server_ip_addrs, self.__port)
#Setting up led blinking
self.__receiveLED = 14
self.__sendLED = 15
self.__errorLED = 18
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.__receiveLED, GPIO.OUT)
GPIO.setup(self.__sendLED, GPIO.OUT)
GPIO.setup(self.__errorLED, GPIO.OUT)
#Setting up string for acknowldegements
self.__ackstr = "{'opcode':'0'}"
#Setting serial for arduino
self.__ser = serial.Serial('/dev/ttyACM0', timeout = 0.1)
self.__ser.flushInput()
#Setting up pins for temp/humidity sensor
self.__DHT_SENSOR = Adafruit_DHT.DHT22
self.__DHT_PIN = 4
#Setting up default sensor variables
self.__currentLight = 0
self.__currentSoilMoisture = 0
self.__currentWaterDistance = 0
self.__currentRoomHumidity = 0
self.__currentRoomTemperature = 0
#Setting timeout/end time values
self.__ack_timeout = 1
self.__ack_endTime = 4
print("\nRoom RPI Initialized")
#To blink a pin once
def blink(self, pin):
GPIO.output(pin,GPIO.HIGH)
time.sleep(1)
GPIO.output(pin,GPIO.LOW)
return
#To send msgs to the global server
def send_server_msg(self, message):
self.__soc_send.sendto(message, self.__server_addrs)
#Blink send LED
self.blink(self.__sendLED)
print("\nMessage sent to Server: " + message)
#Should be receiving an ack so timeout if no ack received
soc_recv.settimeout(self.__ack_timeout)
startTime = time.time()
endTime = self.__ack_endTime
while (True):
#If less than a endTime amount of time
if time.time() < (startTime + endTime):
try:
#Try Receving otherwise timeout and retry
print("Waiting for Acknowledgement . . .")
buf, address = soc_recv.recvfrom(self.__port)
except socket.timeout:
print("Receiving is Timed Out")
#Restart while loop (Retry)
continue
try:
#If buf is received, try to load it
buf = json.loads(buf)
if not len(buf):
#No ack received, retry
continue
else:
if (buf.get("opcode") == "0"):
#Ack recevied!
print("Acknowledgement Received")
return True
else:
#No ack received, retry
continue
except (ValueError, KeyError, TypeError):
#Ack not received, try again
continue
else:
#Failed to receive ack within a endTime amount of time
return False
return
#To check if temp/humidity sensor is working
def testRoomSensors(self):
if self.__currentRoomHumidity is None and self.__currentRoomTemperature is None:
#Call error detected to handle error
self.errorDetected('{"opcode" : "D", "sensorArray" : "0, 0, 0, 0, 0, 0, 1, 0, 0, 0"}')
#Blink LED
self.blink(self.__errorLED)
#Set temp and humidity to 0 if sensor isn't working
self.__currentRoomTemperature = 0
self.__currentRoomHumidity = 0
print("\nDHT22 has been Tested")
return
#To send error to the server
def errorDetected(self, error):
#If ack received return
if (self.send_server_msg(error) == False):
#If no ack received, try sending again
print("\nError sent again to server")
self.errorDetected(error)
return
#To get measurements from DHT22 sensor for humidity and temp
def collectRoomData(self):
self.__currentRoomHumidity, self.__currentRoomTemperature = \
Adafruit_DHT.read(self.__DHT_SENSOR, self.__DHT_PIN);
print("\nRoom Data Variables Updated")
return
#To set current pot sensor values to what has been detected by pot sensors
#Also to handle arduino's sensor errors
def getPotData(self, potData):
potID = int(potData.get('potID'))
#Arduino sensor values
self.__currentWaterDistance = potData.get('waterDistance')
self.__currentSoilMoisture = potData.get('soilMoisture')
self.__currentLight = potData.get('light')
#Arduino's sensor error variables
waterDistanceStatus = potData.get('waterDistanceStatus')
soilMoistureStatus = potData.get('soilMoistureStatus')
waterPumpStatus = potData.get('waterPumpStatus')
ldrStatus = potData.get('ldrStatus')
waterLow = potData.get('waterLow')
#If any status == 0, means there is an error, call errorDetected
#Set associating measurement to 0 if there is one
if ldrStatus == 0:
light = 0
self.errorDetected('{"opcode" : "D", "sensorArray" : "0, 0, 0, 0, 0, 1, 0, 0, 0, 0"}')
if soilMoistureStatus == 0:
soilMoisture = 0
self.errorDetected('{"opcode" : "D", "sensorArray" : "0, 0, 0, 0, 0, 0, 0, 1, 0, 0"}')
if waterDistanceStatus == 0:
self.waterDistance = 0
errorDetected('{"opcode" : "D", "sensorArray" : "0, 0, 0, 0, 0, 0, 0, 0, 1, 0"}')
if waterPumpStatus == 0:
self.errorDetected('{"opcode" : "D", "sensorArray" : "0, 0, 0, 0, 0, 0, 0, 0, 0, 1"}')
if waterLow == 0:
self.errorDetected('{"opcode" : "D", "sensorArray" : "0, 0, 0, 0, 1, 0, 0, 0, 0, 0"}')
print("\nPot Data Variables Updated")
return potID
#To create JSON with all data and send to global server
def sendSensoryData(self, potID):
alldata = '{"opcode": "9", "roomID": ' + str(self.__roomID) + \
', "potID": ' + str(potID) + ', "temperature": ' + \
str(self.__currentRoomTemperature) + ', "humidity": ' + \
str(self.__currentRoomHumidity) + ', "waterDistance": ' + \
str(self.__currentWaterDistance) + ', "waterDistanceStatus": ' + \
', "light": ' + str(self.__currentLight) + ', "soilMoisture": ' + \
str(self.__currentSoilMoisture) + '}'
#If ack received return
if (self.send_server_msg(alldata) == False):
#If no ack received, send data again
print("\nAll data sent again to server")
self.sendSensoryData(potID)
return
#To communicate to the arduino to send it's sensory data
def requestPotData(self):
print("\nRequesting Pot Data")
self.__ser.write(("E,").encode("utf-8"))
startTime = time.time()
while time.time() < (startTime + 2):
#readLine to get data
potData = ser.readline()
self.blink(self.__receiveLED)
if (len(potData) > 0):
potData = potData.decode().strip('\r\n')
#send acknowledgement
self.__ser.write(("0,").encode("utf-8"))
self.blink(self.__sendLED)
print("Received Pot Data: " + potData)
return potData
else:
#send error
self.__ser.write(("E,").encode("utf-8"))
return('{"opcode": null, "potID": null,"waterPumpStatus": null,"waterDistance": null,"waterDistanceStatus": null,"light": null,"ldrStatus": null,"soilMoisture": null,"soilMoistureStatus": null}')
#To create JSON to start water pump and communicate to arduino
def startWaterPump(self, pumpDuration):
if type(pumpDuration) is int and pumpDuration >= 1:
#Creating json, writing to serial
pumpMessage = "C," + str(pumpDuration)
self.blink(self.__sendLED)
self.__ser.write((pumpMessage).encode("utf-8"))
else:
#Error is raised
raise ValueError("Pump duration must be an integer AND must be greater than or equal to 1")
return
#Receives/returns buffer and sends ack
def receive(self):
#Receiving
print("\nWaiting to receive on port %d ... " % self.__port)
buf, address = self.__soc_recv.recvfrom(self.__port)
if(len(buf) > 0):
#Blink receive Led
self.blink(self.__receiveLED)
print ("Received %s bytes from '%s': %s " % (len(buf), address[0], buf))
#Sending ack
self.__soc_send.sendto(self.__ackstr, (address[0], self.__port))
#Blink send Led
self.blink(self.__sendLED)
print ("Sent %s to %s" % (self.__ackstr, (address[0], self.__port)))
#Give time for the ack sent to be acknowledged
time.sleep(self.__ack_endTime)
return buf
else:
return False
#Main function which receives json data/arduino data and invokes methods based on opcode
def main():
#Create room RPI object (port, server_ip_addrs)
roomRPI = RoomRPI(1000, '192.168.1.47')
startTime = time.time()
sendTime = 5
while True:
#Request arduino data after every 'sendTime' seconds
if time.time() > (startTime + sendTime):
message = self.requestPotData()
message = json.loads(message)
#Ensure the opcode received is 8 (arduino sent pot data)
if message.get('opcode') == '8':
#Update pot variables
potID = self.getPotData(message)
#Update room variables by getting DHT22 measurements
self.collectRoomData()
#Test if DHT22 is working
self.testRoomSensors()
#Send all data to server
self.sendSensoryData(potID)
#Recalculate time
startTime = time.time()
else:
#Check to see if server sent a start water pump msg
try:
message = roomRPI.receive()
#If no msg sent from server, time out
except socket.timeout, e:
err = e.args[0]
if err == 'timed out':
time.sleep(1)
print('\nReceiver timed out')
continue
if (message == False):
#If length of buffer is <1
continue
else:
message = json.loads(message)
#To start water pump
if (message.get('opcode') == "4"):
startWaterPump(int(message.get("pumpDuration")))
else:
continue
self.__soc_recv.shutdown(1)
self.__soc_send.shutdown(1)
return
if __name__== "__main__":
main()
| true
|
5da2d3b70d04e05497c00bb25d2c63bb1dc11965
|
Python
|
hi0t/Outtalent
|
/Leetcode/132. Palindrome Partitioning II/solution1.py
|
UTF-8
| 432
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
class Solution:
def minCut(self, s: str) -> int:
dp = [len(s) - i for i in range(len(s) + 1)]
p = [[False] * len(s) for j in range(len(s))]
for i in range(len(s) - 1, -1, -1):
for j in range(i, len(s)):
if s[i] == s[j] and (((j - i) < 2) or p[i + 1][j - 1]):
p[i][j] = True
dp[i] = min(1 + dp[j + 1], dp[i])
return dp[0] - 1
| true
|
15262ea9451c2c18bf690119cf5482f5acaa2d6a
|
Python
|
rpetit3/BIGSI
|
/scripts/jaccard_index.py
|
UTF-8
| 344
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env python
import begin
def load_all_kmers(f):
kmers = []
with open(f, 'r') as inf:
for line in inf:
kmer = line.strip()
kmers.append(kmer)
return set(kmers)
@begin.start
def run(f1, f2):
s1 = load_all_kmers(f1)
s2 = load_all_kmers(f2)
print(len(s1 & s2) / len(s1 | s2))
| true
|
f32f8fd0694c6961b6323705093ed4a7e7b13838
|
Python
|
lakiw/cripts
|
/cripts/hash_types/hash_type.py
|
UTF-8
| 680
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
## HashType base class.
#
# All hash type plugins shall derive from this class. Class-based plugins enable
# multiple instantiations of a particular type of hash format information with
# potentially different requirements
#
class HashType(object):
## Initializes the basic HashType plugin values.
#
# Derived classes should call this method, like so:
# \code
# class RandomHashType(HashType):
# def __init__(self):
# super().__init__()
# # ... plugin specific stuff ...
# \endcode
#
def __init__(self):
self.reference_id="DEFAULT"
self.display_name="Default"
additional_fields = []
| true
|
3c62c8db678f00246b9330b95fe252a6abeaef2f
|
Python
|
martinlyra/2DT301
|
/components/light_component.py
|
UTF-8
| 565
| 2.71875
| 3
|
[] |
no_license
|
import time
from components.basic.ComponentBuilder import BuilderHint
from components.basic.base_component import BaseComponent
@BuilderHint("light")
class LightComponent(BaseComponent):
state = 0
def toggle(self, override=None):
if override is None:
if self.state == 0:
self.state = 1
else:
self.state = 0
else:
self.state = override
for pin in self.pins:
if pin.is_output():
pin.write(self.state)
| true
|
a1c2b8f33b2443ba615901cb8b09b5631fc4cfa6
|
Python
|
ryosuke071111/algorithms
|
/AtCoder/ABC/rakugaki.py
|
UTF-8
| 660
| 2.6875
| 3
|
[] |
no_license
|
# a,b,x=map(int,input().split())
# print(b//x-(a-1)//x)
# print("".join(list(map(lambda x:x[0],list(input().split())))))
# ls=[input() for i in range(3)]
# print(ls[0][0]+ls[1][1]+ls[2][2])
# a,b,c=map(int,input().split())
# print('Yes' if a+b>=c else "No")
# n=int(input())
# a=int(input())
# print('Yes' if n%500<=a else "No")
# s=input()
# print(700+s.count('o')*100)
# n,m=map(int,input().split())
# ls=[0]*m
# for i in range(n):
# A=list(map(int,input().split()))[1:]
# for j in A:
# ls[j-1]+=1
# print(sum(list(map(lambda x:x//n,ls))))
a,b,c=map(int,input().split())
k=int(input())
print(sum(sorted([a,b,c])[:2])+(max(a,b,c)*(2**k)))
# 6 12 24
| true
|
670c17d6405078a7c1008d8012a4ab13f1fa8f9d
|
Python
|
Arjun-Ani/python_learn
|
/specialpattern-2.py
|
UTF-8
| 102
| 2.984375
| 3
|
[] |
no_license
|
a=raw_input("Enter the number\n")
b=lambda a:(int(a+a+a+a))+(int(a+a+a))+(int(a+a))+int(a)
print b(a)
| true
|
8ee4759bed050908dca65e17b12e8efb2041b879
|
Python
|
nandopedrosa/snglist
|
/app/models.py
|
UTF-8
| 11,609
| 2.625
| 3
|
[] |
no_license
|
"""
models.py: Domain models
__author__ = "Fernando P. Lopes"
__email__ = "fpedrosa@gmail.com"
"""
from app import db, login_manager
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask.ext.login import UserMixin # implements commons authentication functions
from flask import current_app
from app.util import getsoup
from sqlalchemy.sql import text
# Many-to-Many auxiliary table of Songs and Shows
setlist = db.Table(
'setlist',
db.Column('show_id', db.Integer, db.ForeignKey('show.id')),
db.Column('song_id', db.Integer, db.ForeignKey('song.id')),
db.Column('song_position', db.Integer)
)
# Many-to-Many auxiliary table of Bands and Songs
band_songs = db.Table('band_songs',
db.Column('band_id', db.Integer, db.ForeignKey('band.id'), nullable=False),
db.Column('song_id', db.Integer, db.ForeignKey('song.id'), nullable=False)
)
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), unique=True, index=True, nullable=False)
password_hash = db.Column(db.String(1000), nullable=False)
confirmed = db.Column(db.Boolean, default=False)
bands = db.relationship('Band', backref='user', lazy='dynamic', cascade="all, delete-orphan")
songs = db.relationship('Song', backref='user', lazy='dynamic', cascade="all, delete-orphan")
shows = db.relationship('Show', backref='user', lazy='dynamic', cascade="all, delete-orphan")
def __repr__(self):
return 'User {0} ({1})'.format(self.name, self.email)
@property
def password(self):
raise AttributeError('Password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=86400):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm_token(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Song(db.Model):
__tablename__ = 'song'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
title = db.Column(db.String(128), nullable=False)
artist = db.Column(db.String(128))
key = db.Column(db.String(128))
tempo = db.Column(db.Integer)
duration = db.Column(db.String(5))
lyrics = db.Column(db.Text)
notes = db.Column(db.String(4000))
def __repr__(self):
return self.title
def pretty_duration(self):
if self.duration is not None and self.duration != '':
return self.duration[:2] + ':' + self.duration[2:]
else:
return ''
@staticmethod
def get_lyrics_or_chords(url):
"""
Scrapes the HTML of a given song Lyrics or Chords
:param url: The url of the song (different Providers)
:return: HTML of the song's Lyrics or Chords
"""
html = ''
if 'cifraclub' in url:
if url.startswith('https://m.'):
url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs
url += 'imprimir.html#columns=false' # Printer Friendly page (it's cleaner)
soup = getsoup(url)
sections = soup.find_all('pre')
for s in sections:
html += str(s)
if 'letras.mus.br' in url:
if url.startswith('https://m.'):
url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs
soup = getsoup(url)
article = soup.find('article')
html = str(article)
if 'e-chords' in url:
soup = getsoup(url)
pre = soup.find('pre', id='core')
# Remove Tab Div, keep raw tab
div = pre.find('div')
if div is not None:
tab = div.find('div', class_='tab')
html = '<pre>' + tab.text + '</pre>'
div.extract()
html += str(pre)
if 'freak' in url:
soup = getsoup(url)
content = soup.find('div', id='content_h')
html = str(content)
return html
def get_list_of_associated_bands(self):
formatted_output = ''
associated_bands = self.query.get(self.id).bands.order_by(Band.name).all()
for band in associated_bands:
formatted_output = formatted_output + band.name + ', '
if len(formatted_output) > 0:
formatted_output = formatted_output[:-2]
return formatted_output
class Band(db.Model):
__tablename__ = 'band'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
name = db.Column(db.String(128), nullable=False)
style = db.Column(db.String(128))
members = db.relationship('BandMember',
backref=db.backref('band'),
cascade="all, delete-orphan",
lazy='dynamic')
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=band_songs,
primaryjoin=(band_songs.c.band_id == id),
secondaryjoin=(band_songs.c.song_id == Song.id),
backref=db.backref('bands', lazy='dynamic'),
lazy='dynamic')
def associate_song(self, song):
"""
Adds a song to the association list
:param song: The song object to be added
:return: None
"""
self.songs.append(song)
def disassociate_song(self, song):
"""
Removes a song from the association list
:param song: The song object to be removed
:return: None
"""
self.songs.remove(song)
def __repr__(self):
return 'Band {0}'.format(self.name)
class BandMember(db.Model):
__tablename__ = 'band_member'
id = db.Column(db.Integer, primary_key=True)
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), nullable=False)
def __repr__(self):
return 'Band Member {0} ({1})'.format(self.name, self.email)
# noinspection SqlDialectInspection
class Show(db.Model):
__tablename__ = 'show'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
start = db.Column(db.DateTime, nullable=True)
end = db.Column(db.DateTime, nullable=True)
address = db.Column(db.String(4000))
contact = db.Column(db.String(4000))
pay = db.Column(db.String(128))
notes = db.Column(db.String(4000))
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=setlist,
order_by=setlist.c.song_position,
primaryjoin=(setlist.c.show_id == id),
secondaryjoin=(setlist.c.song_id == Song.id),
backref=db.backref('shows', lazy='dynamic'),
lazy='dynamic')
def __repr__(self):
return self.name
def add_song(self, song):
"""
Adds a song to the show's setlist
:param song: The song object to be added
:return: None
"""
self.songs.append(song)
def remove_song(self, song):
self.songs.remove(song)
def remove_all_songs(self):
with db.engine.connect() as connection:
delete_sql = text('delete from setlist where show_id = :show_id')
delete_sql = delete_sql.bindparams(show_id=self.id)
connection.execute(delete_sql.execution_options(autocommit=True))
def assign_position(self, song, pos=None):
"""
Assigns the correct order position for a new song added to the setlist
:param song: the song to be ordered
:param pos: the position, if it is known
:return: None
"""
if not pos:
next_order = self.__get_max_pos() + 1
else:
next_order = pos
update = text(
"update setlist set song_position = :order where show_id = :show_id and song_id = :song_id")
update = update.bindparams(order=next_order, show_id=self.id, song_id=song.id)
db.engine.execute(update.execution_options(autocommit=True))
def __get_max_pos(self):
"""
Gets the position of the last song in the setlist
:return: the position of the last song
"""
query = text('select max(song_position) as "max_position" from setlist where show_id = :id')
query = query.bindparams(id=self.id)
result = db.engine.execute(query)
for row in result:
max_position = row['max_position']
if max_position is None:
max_position = 0
result.close()
return max_position
| true
|
a572805525402e4fba6cd5ac169f18d8756406e1
|
Python
|
KyleKing/My-Programming-Sketchbook
|
/Assorted_Snippets/python/socket_prototyping/server_makefile.py
|
UTF-8
| 1,059
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
"""Based on https://stackoverflow.com/q/59978887/3219667.
Update: not working. May want to revisit
"""
import socket
from loguru import logger
HOST = '127.0.0.1'
PORT = 65439
ACK_TEXT = 'text_received'
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
logger.debug(f'Waiting for connection on port {sock.getsockname()[1]}')
sock.listen(1)
while True:
conn, addr = sock.accept()
logger.debug(f'Connected by {addr}')
with conn:
sock_file = conn.makefile(mode='rw')
logger.debug('> before sock_file.readline()')
msg = sock_file.readline()
logger.debug('< after sock_file.readline()')
logger.debug(f'msg: {msg!r}')
if not msg:
exit(0)
sock_file.write(msg)
sock_file.flush()
if __name__ == '__main__':
main()
| true
|
5a9a3288a497020de54d83b0e428804dce443279
|
Python
|
harveymei/pythoncrashcourse
|
/formatted_name2.py
|
UTF-8
| 887
| 4.09375
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2020/12/1 11:10 上午
# @Author : Harvey Mei <harvey.mei@msn.com>
# @FileName: formatted_name2.py
# @IDE : PyCharm
# @GitHub :https://github.com/harveymei/
# 避免无限循环,加入退出条件
# Define a quit condition
def get_formatted_name(first_name, last_name):
"""Return a full name, neatly formatted."""
full_name = first_name + " " + last_name
return full_name.title()
while True:
print("\nPlease tell me your name:")
print("(enter 'q' at any time to quit)")
f_name = input("First name: ")
if f_name == 'q': # 加入判断,用户输入q则终止循环
break
l_name = input("Last name: ")
if l_name == 'q': # 加入判断,用户输入q则终止循环
break
formatted_name = get_formatted_name(f_name, l_name)
print("\nHello, " + formatted_name + "!")
| true
|
e6ed1d5058717baa542799cc6d3252e96c17a680
|
Python
|
witnessai/MMSceneGraph
|
/mmdet/patches/visualization/color.py
|
UTF-8
| 1,616
| 3.09375
| 3
|
[
"MIT",
"Python-2.0"
] |
permissive
|
# ---------------------------------------------------------------
# color.py
# Set-up time: 2020/4/26 上午8:59
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com
# ---------------------------------------------------------------
import numpy as np
import seaborn as sns
def color_palette(*args, **kwargs):
"""Obtain a seaborn palette.
Args:
palette: str.
n_colors (int): Number of colors.
desat (float): saturation, 0.0 ~ 1.0
Returns:
tuple[int]: A tuple of 3 integers indicating BGR channels.
"""
palette = sns.color_palette(*args, **kwargs)
# transform to bgr and uint8
new_palette = []
for color in palette:
color = (np.array(color) * 255).astype(np.uint8)
r = color[0]
color[0] = color[2]
color[2] = r
color = tuple([int(c) for c in color])
new_palette.append(color)
return new_palette
def float_palette(to_rgb=False, *args, **kwargs):
"""Obtain a seaborn palette.
Args:
palette: str.
n_colors (int): Number of colors.
desat (float): saturation, 0.0 ~ 1.0
Returns:
tuple[int]: A tuple of 3 floats indicating BGR channels.
"""
palette = sns.color_palette(*args, **kwargs)
new_palette = []
for color in palette:
if not to_rgb:
color = (color[2], color[1], color[0])
new_palette.append(color)
return new_palette
| true
|
808e29e9c6d1aa0768d65144832f1f10f659d537
|
Python
|
dennis2030/leetcodeStudyGroup
|
/406-queue-reconstruction-by-height/Sony_1.py
|
UTF-8
| 808
| 3.328125
| 3
|
[] |
no_license
|
class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
result = []
tall_list =[]
tall_map = dict()
for person in people:
tall = person[0]
if tall not in tall_map:
tall_map[tall] = list()
tall_list.append(tall)
tall_map[tall].append(person[1])
tall_list.sort(key=lambda x: -x)
for tall in tall_list:
tall_map[tall].sort()
for idx in tall_map[tall]:
result.insert(idx, [tall, idx])
return result
if __name__ == '__main__':
sol = Solution()
people = [[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]
print sol.reconstructQueue(people)
| true
|
1eeb2d48325b71b40cae51112084744e45a2477f
|
Python
|
jpisarek/JWT
|
/models.py
|
UTF-8
| 2,227
| 2.53125
| 3
|
[] |
no_license
|
from run import db
from sqlalchemy.orm import relationship
class UserModel(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(120), unique = True, nullable = False)
password = db.Column(db.String(120), nullable = False)
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def find_by_username(cls, username):
return cls.query.filter_by(username = username).first()
@classmethod
def return_all(cls):
def to_json(x):
return {
'username': x.username,
'password': x.password
}
return {'users': list(map(lambda x: to_json(x), UserModel.query.all()))}
class RecipeModel(db.Model):
__tablename__ = 'recipes'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(120), unique = True, nullable = False)
ingredients = db.relationship('IngredientModel', backref='associated_ingredient', lazy='dynamic')
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def return_all(cls):
def to_json(x):
return {
'id': x.id,
'name': x.name
}
return {'recipes': list(map(lambda x: to_json(x), RecipeModel.query.all()))}
@property
def serialize(self):
return {
self.name,
}
class IngredientModel(db.Model):
__tablename__ = 'ingredients'
id = db.Column(db.Integer, primary_key = True)
item = db.Column(db.String(120), unique = False, nullable = False)
recipe = db.Column(db.Integer, db.ForeignKey("recipes.id"), nullable=False)
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def return_all(cls):
def to_json(x):
return {
'id': x.id,
'item': x.item,
'recipe': x.recipe
}
return {'ingredients': list(map(lambda x: to_json(x), IngredientModel.query.all()))}
@property
def serialize(self):
return {
'item': self.item,
}
| true
|
8d6a92ca46671a82d767b6e501980944ddae86f8
|
Python
|
kqvd/IFB104-2017
|
/Assignment 2 - News Archivist/news_archivist.py
|
UTF-8
| 38,190
| 2.859375
| 3
|
[] |
no_license
|
#-----Statement of Authorship----------------------------------------#
#
# This is an individual assessment item. By submitting this
# code I agree that it represents my own work. I am aware of
# the University rule that a student must not act in a manner
# which constitutes academic dishonesty as stated and explained
# in QUT's Manual of Policies and Procedures, Section C/5.3
# "Academic Integrity" and Section E/2.1 "Student Code of Conduct".
#
# Student no: n9934731
# Student name: Kevin Duong
#
# NB: Files submitted without a completed copy of this statement
# will not be marked. Submitted files will be subjected to
# software plagiarism analysis using the MoSS system
# (http://theory.stanford.edu/~aiken/moss/).
#
#--------------------------------------------------------------------#
#-----Task Description-----------------------------------------------#
#
# News Archivist
#
# In this task you will combine your knowledge of HTMl/XML mark-up
# languages with your skills in Python scripting, pattern matching
# and Graphical User Interface development to produce a useful
# application for maintaining and displaying archived news or
# current affairs stories on a topic of your own choice. See the
# instruction sheet accompanying this file for full details.
#
#--------------------------------------------------------------------#
#-----Imported Functions---------------------------------------------#
#
# Below are various import statements that were used in our sample
# solution. You should be able to complete this assignment using
# these functions only.
# Import the function for opening a web document given its URL.
from urllib.request import urlopen
# Import the function for finding all occurrences of a pattern
# defined via a regular expression, as well as the "multiline"
# and "dotall" flags.
from re import findall, MULTILINE, DOTALL
# A function for opening an HTML document in your operating
# system's default web browser. We have called the function
# "webopen" so that it isn't confused with the "open" function
# for writing/reading local text files.
from webbrowser import open as webopen
# An operating system-specific function for getting the current
# working directory/folder. Use this function to create the
# full path name to your HTML document.
from os import getcwd
# An operating system-specific function for 'normalising' a
# path to a file to the path-naming conventions used on this
# computer. Apply this function to the full name of your
# HTML document so that your program will work on any
# operating system.
from os.path import normpath, exists
# Import the standard Tkinter GUI functions.
from tkinter import *
# Import the SQLite functions.
from sqlite3 import *
# Import the date and time function.
from datetime import datetime
#
#--------------------------------------------------------------------#
#-----Student's Solution---------------------------------------------#
#
# Put your solution at the end of this file.
#
# Name of the folder containing your archived web documents. When
# you submit your solution you must include the web archive along with
# this Python program. The archive must contain one week's worth of
# downloaded HTML/XML documents. It must NOT include any other files,
# especially image files.
internet_archive = 'InternetArchive'
################ PUT YOUR SOLUTION HERE #################
#====================================================================#
#TKINTER SETUP
#====================================================================#
#Please note that Windows os works fine, however I'm unsure if it's
#Compatible with mac os
# Create a window
window = Tk()
# Set the window's size
window.geometry('800x600')
# Make window background colour white
window['bg'] = 'white'
# Give the window a title
window.title('RT News Archive')
# Disables maximum mode
window.resizable(0,0)
# Radiobutton Frame
dates_frame = Frame(window, borderwidth=2, relief='solid')
dates_frame.place(relx=0.025, rely=0.42, width=376)
# HTML template, with blanks marked by asterisks
html_template = """<!DOCTYPE html>
<html>
<head>
<!-- Title for browser window/tab -->
<title>RT News Archive</title>
<!-- Overall document style -->
<style>
body {background-color: black}
p {width: 80%; margin-left: auto; margin-right: auto; text-align:justify; font-family: "Arial"}
h1 {width: 80%; margin-left: auto; margin-right: auto; text-align:center; font-family: "Arial"; font-size: 3em}
h2 {width: 80%; margin-left: auto; margin-right: auto; text-align:center; font-family: "Arial"; font-size: 2em}
h3 {width: 80%; margin-left: auto; margin-right: auto; text-align:center; font-family: "Arial"; font-size: 1.5em}
div {width: 80%; margin-left: auto; margin-right: auto; background-color: white; height: 100%;}
hr {margin-top; margin-bottom: lem; background-color: black}
</style>
</head>
<body>
<div>
<!-- Masterhead -->
<h1>RT News Archive</h1>
<!-- Day, Month, Year -->
<h2>***SUBTITLE***</h2>
<!-- RT News Logo from https://upload.wikimedia.org/wikipedia/commons/thumb/a/a0/Russia-today-logo.svg/1200px-Russia-today-logo.svg.png -->
<p style = "text-align:center"><img src = "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a0/Russia-today-logo.svg/1200px-Russia-today-logo.svg.png"
alt = "RT News Logo!" height="100%" width="100%" style="border: black 1px solid">
<p style = "text-align:left"><strong>News source: </strong><a href="https://www.rt.com/rss/news/">https://www.rt.com/rss/news/</a>
<br><strong>Archivist:</strong> Kevin Duong </br></p>
<hr width = "80%" size = 5px>
<!-- News article 1 -->
<!-- Headline -->
<h3>1. ***HEADLINE_1***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_1***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_1***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_1***">***SOURCE_1***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_1*** </br></p>
<hr width = "80%" size = 5px>
<!-- News article 2 -->
<!-- Headline -->
<h3>2. ***HEADLINE_2***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_2***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_2***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_2***">***SOURCE_2***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_2*** </br></p>
<hr width = "80%" size = 5px>
<!-- News article 3 -->
<!-- Headline -->
<h3>3. ***HEADLINE_3***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_3***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_3***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_3***">***SOURCE_3***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_3*** </br></p>
<hr width = "80%" size = 5px>
<!-- News article 4 -->
<!-- Headline -->
<h3>4. ***HEADLINE_4***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_4***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_4***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_4***">***SOURCE_4***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_4*** </br></p>
<hr width = "80%" size = 5px>
<!-- News article 5 -->
<!-- Headline -->
<h3>5. ***HEADLINE_5***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_5***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_5***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_5***">***SOURCE_5***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_5*** </br></p>
<hr width = "80%" size = 5px>
<!-- News article 6 -->
<!-- Headline -->
<h3>6. ***HEADLINE_6***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_6***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_6***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_6***">***SOURCE_6***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_6*** </br></p>
<hr width = "80%" size = 5px>
<!-- News article 7 -->
<!-- Headline -->
<h3>7. ***HEADLINE_7***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_7***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_7***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_7***">***SOURCE_7***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_7*** </br></p>
<hr width = "80%" size = 5px>
<!-- News article 8 -->
<!-- Headline -->
<h3>8. ***HEADLINE_8***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_8***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_8***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_8***">***SOURCE_8***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_8*** </br></p>
<hr width = "80%" size = 5px>
<!-- News article 9 -->
<!-- Headline -->
<h3>9. ***HEADLINE_9***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_9***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_9***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_9***">***SOURCE_9***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_9*** </br></p>
<hr width = "80%" size = 5px>
<!-- News article 10 -->
<!-- Headline -->
<h3>10. ***HEADLINE_10***</h3>
<!-- Article Image -->
<p style = "text-align:center"><img src = "***IMAGE_10***" alt = "Sorry, image not found!" height="100%" width="100%" style="border: black 1px solid;" />
<!-- Story -->
<p>***SYPNOSIS_10***</p>
<!-- Source link -->
<p style = "text-align:left"><strong>Full story:</strong> <a href="***SOURCE_10***">***SOURCE_10***</a>
<!-- Date of publication -->
<br><strong>Dateline:</strong> ***DATE_10*** </br></p>
</div>
</body>
</html>
"""
# Select date radiobutton indicatoron
MODES = [
("Thu, 12th Oct 2017", "InternetArchive/RTNews_12October_2017"),
("Fri, 13th Oct 2017", "InternetArchive/RTNews_13October_2017"),
("Sat, 14th Oct 2017", "InternetArchive/RTNews_14October_2017"),
("Sun, 15th Oct 2017", "InternetArchive/RTNews_15October_2017"),
("Mon, 16th Oct 2017", "InternetArchive/RTNews_16October_2017"),
("Tue, 17th Oct 2017", "InternetArchive/RTNews_17October_2017"),
("Wed, 18th Oct 2017", "InternetArchive/RTNews_18October_2017"),
("Latest", "InternetArchive/RTNews_Latest"),
]
# Create variables for each date
v = StringVar()
v.set("InternetArchive/RTNews_Latest") # Intial selection
#====================================================================#
#FUNCTIONS
#====================================================================#
# If radiobutton equals the date's value
# Date selection function
def selected_date():
try:
if MODES[0][1] == v.get():
archive = open('InternetArchive/RTNews_12October_2017.xhtml',
'r', encoding = 'UTF-8')
messenger.config(text = MODES[0][0])
if MODES[1][1] == v.get():
archive = open('InternetArchive/RTNews_13October_2017.xhtml',
'r',encoding = 'UTF-8')
messenger.config(text = MODES[1][0])
if MODES[2][1] == v.get():
archive = open('InternetArchive/RTNews_14October_2017.xhtml',
'r',encoding = 'UTF-8')
messenger.config(text = MODES[2][0])
if MODES[3][1] == v.get():
archive = open('InternetArchive/RTNews_15October_2017.xhtml',
'r',encoding = 'UTF-8')
messenger.config(text = MODES[3][0])
if MODES[4][1] == v.get():
archive = open('InternetArchive/RTNews_16October_2017.xhtml',
'r',encoding = 'UTF-8')
messenger.config(text = MODES[4][0])
if MODES[5][1] == v.get():
archive = open('InternetArchive/RTNews_17October_2017.xhtml',
'r',encoding = 'UTF-8')
messenger.config(text = MODES[5][0])
if MODES[6][1] == v.get():
archive = open('InternetArchive/RTNews_18October_2017.xhtml',
'r',encoding = 'UTF-8')
messenger.config(text = MODES[6][0])
if MODES[7][1] == v.get():
archive = open('InternetArchive/RTNews_Latest.xhtml',
'r',encoding = 'UTF-8')
messenger.config(text = MODES[7][0])
except FileNotFoundError:
messenger.config(text = 'Error: News file not found in archive!')
for text, mode in MODES:
b = Radiobutton(dates_frame, text=text, variable=v, value=mode,
indicatoron=0, width=26,anchor=W, selectcolor='#77BE17',
bg='white',fg='black', font = ('Arial', 18, 'bold'),
borderwidth=0, command = selected_date).pack()
# Log events on SQLite
# Create variable for checkbutton
check = IntVar()
check.set("-1") # initialize
log_description = ['News extracted from archive',
'Extracted news displayed in web browser',
'Latest news downloaded and stored in archive',
'Event logging switched on',
'Event logging switched off']
# When the db file is tampererd
sqlite_error = 'Error: event_log.db missing / modified!'
# Connect to the event_log.db file
connection = connect(database = 'event_log.db')
event_db = connection.cursor()
template = "INSERT INTO Event_Log VALUES (NULL, 'DESCRIPTION')"
# Checkbutton function
def log_events():
try:
# If the event log button is turned on
if check.get() == 1:
connection = connect(database = 'event_log.db')
event_db = connection.cursor()
event_description = log_description[3]
sql_statement = template.replace('DESCRIPTION', event_description)
event_db.execute(sql_statement)
connection.commit()
# If the event log button is turned off
if check.get() == 0:
connection = connect(database = 'event_log.db')
event_db = connection.cursor()
event_description = log_description[4]
sql_statement = template.replace('DESCRIPTION', event_description)
event_db.execute(sql_statement)
connection.commit()
event_db.close()
connection.close()
except:
messenger.config(text = sqlite_error)
# Raw XML file from the internet archive is converted into
# a HTML with first 10 stories of that day
# Extract news function
def generate_html():
try:
# Initial conditions
archive = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
if v.get() == "InternetArchive/RTNews_12October_2017":
subtitle = "Thursday, 12th October 2017"
if v.get() == "InternetArchive/RTNews_13October_2017":
subtitle = "Friday, 13th October 2017"
if v.get() == "InternetArchive/RTNews_14October_2017":
subtitle = "Saturday, 14th October 2017"
if v.get() == "InternetArchive/RTNews_15October_2017":
subtitle = "Sunday, 15th October 2017"
if v.get() == "InternetArchive/RTNews_16October_2017":
subtitle = "Monday, 16th October 2017"
if v.get() == "InternetArchive/RTNews_17October_2017":
subtitle = "Tuesday, 17th October 2017"
if v.get() == "InternetArchive/RTNews_18October_2017":
subtitle = "Wednesday, 18th October 2017"
if v.get() == "InternetArchive/RTNews_Latest":
subtitle = "Latest Update"
# Regex to find tags off xhtml files based on chosen archive
headline_tag = '<title>(.*)</title>'
image_tag = 'src="(.*?)"'
item_tag = '<item>([\s\S]+?)</item>'
story_tag = ['" /> (.*)<br/><a href', # Single-line code
'" /> ([\s\S]+?)<br/><a href'] # Multi-line code
news_tag = '<guid>(.*)</guid>'
date_tag = '<pubDate>(.*)</pubDate>'
#====================================================================#
# STORY 1
#====================================================================#
# Finds headline
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_1 = findall(headline_tag, xhtml_file.read())[2]
# Finds image
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_1 = findall(image_tag, xhtml_file.read())[0]
# Finds story
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[0]
if sypnosis == '" /> <a href':
sypnosis_1 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_1 = findall(story_tag[1], str(sypnosis))
sypnosis_1 = ' '.join(sypnosis_1)
# Finds news source
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_1 = findall(news_tag, xhtml_file.read())[0]
# Finds date of publication
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_1 = findall(date_tag, xhtml_file.read())[0]
#====================================================================#
# STORY 2
#====================================================================#
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_2 = findall(headline_tag, xhtml_file.read())[3]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_2 = findall(image_tag, xhtml_file.read())[1]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[1]
if sypnosis == '" /> <a href':
sypnosis_2 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_2 = findall(story_tag[1], str(sypnosis))
sypnosis_2 = ' '.join(sypnosis_2)
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_2 = findall(news_tag, xhtml_file.read())[1]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_2 = findall(date_tag, xhtml_file.read())[1]
#====================================================================#
# STORY 3
#====================================================================#
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_3 = findall(headline_tag, xhtml_file.read())[4]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_3 = findall(image_tag, xhtml_file.read())[2]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[2]
if sypnosis == '" /> <a href':
sypnosis_3 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_3 = findall(story_tag[1], str(sypnosis))
sypnosis_3 = ' '.join(sypnosis_3)
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_3 = findall(news_tag, xhtml_file.read())[2]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_3 = findall(date_tag, xhtml_file.read())[2]
#====================================================================#
# STORY 4
#====================================================================#
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_4 = findall(headline_tag, xhtml_file.read())[5]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_4 = findall(image_tag, xhtml_file.read())[3]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[3]
if sypnosis == '" /> <a href':
sypnosis_4 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_4 = findall(story_tag[1], str(sypnosis))
sypnosis_4 = ' '.join(sypnosis_4)
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_4 = findall(news_tag, xhtml_file.read())[3]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_4 = findall(date_tag, xhtml_file.read())[3]
#====================================================================#
# STORY 5
#====================================================================#
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_5 = findall(headline_tag, xhtml_file.read())[6]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_5 = findall(image_tag, xhtml_file.read())[4]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[4]
if sypnosis == '" /> <a href':
sypnosis_5 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_5 = findall(story_tag[1], str(sypnosis))
sypnosis_5 = ' '.join(sypnosis_5)
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_5 = findall(news_tag, xhtml_file.read())[4]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_5 = findall(date_tag, xhtml_file.read())[4]
#====================================================================#
# STORY 6
#====================================================================#
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_6 = findall(headline_tag, xhtml_file.read())[7]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_6 = findall(image_tag, xhtml_file.read())[5]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[5]
if sypnosis == '" /> <a href':
sypnosis_6 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_6 = findall(story_tag[1], str(sypnosis))
sypnosis_6 = ' '.join(sypnosis_6)
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_6 = findall(news_tag, xhtml_file.read())[5]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_6 = findall(date_tag, xhtml_file.read())[5]
#====================================================================#
# STORY 7
#====================================================================#
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_7 = findall(headline_tag, xhtml_file.read())[8]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_7 = findall(image_tag, xhtml_file.read())[6]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[6]
if sypnosis == '" /> <a href':
sypnosis_7 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_7 = findall(story_tag[1], str(sypnosis))
sypnosis_7 = ' '.join(sypnosis_7)
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_7 = findall(news_tag, xhtml_file.read())[6]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_7 = findall(date_tag, xhtml_file.read())[6]
#====================================================================#
# STORY 8
#====================================================================#
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_8 = findall(headline_tag, xhtml_file.read())[9]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_8 = findall(image_tag, xhtml_file.read())[7]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[7]
if sypnosis == '" /> <a href':
sypnosis_8 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_8 = findall(story_tag[1], str(sypnosis))
sypnosis_8 = ' '.join(sypnosis_8)
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_8 = findall(news_tag, xhtml_file.read())[7]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_8 = findall(date_tag, xhtml_file.read())[7]
#====================================================================#
# STORY 9
#====================================================================#
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_9 = findall(headline_tag, xhtml_file.read())[10]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_9 = findall(image_tag, xhtml_file.read())[8]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[8]
if sypnosis == '" /> <a href':
sypnosis_9 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_9 = findall(story_tag[1], str(sypnosis))
sypnosis_9 = ' '.join(sypnosis_9)
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_9 = findall(news_tag, xhtml_file.read())[8]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_9 = findall(date_tag, xhtml_file.read())[8]
#====================================================================#
# STORY 10
#====================================================================#
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
headline_10 = findall(headline_tag, xhtml_file.read())[11]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
image_10 = findall(image_tag, xhtml_file.read())[9]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
sypnosis = findall(item_tag, xhtml_file.read())[9]
if sypnosis == '" /> <a href':
sypnosis_10 = findall(story_tag[0], str(sypnosis))
else:
sypnosis_10 = findall(story_tag[1], str(sypnosis))
sypnosis_10 = ' '.join(sypnosis_10)
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
source_10 = findall(news_tag, xhtml_file.read())[9]
xhtml_file = open(v.get() + '.xhtml','r',encoding = 'UTF-8')
date_10 = findall(date_tag, xhtml_file.read())[9]
#====================================================================#
# Adds the date of the news
html_code = html_template.replace('***SUBTITLE***', subtitle)
# Replace the blanks for story 1
html_code = html_code.replace('***HEADLINE_1***', headline_1)
html_code = html_code.replace('***IMAGE_1***', image_1)
html_code = html_code.replace('***SYPNOSIS_1***', sypnosis_1)
html_code = html_code.replace('***SOURCE_1***', source_1)
html_code = html_code.replace('***DATE_1***', date_1)
# Replace the blanks for story 2
html_code = html_code.replace('***HEADLINE_2***', headline_2)
html_code = html_code.replace('***IMAGE_2***', image_2)
html_code = html_code.replace('***SYPNOSIS_2***', sypnosis_2)
html_code = html_code.replace('***SOURCE_2***', source_2)
html_code = html_code.replace('***DATE_2***', date_2)
# Replace the blanks for story 3
html_code = html_code.replace('***HEADLINE_3***', headline_3)
html_code = html_code.replace('***IMAGE_3***', image_3)
html_code = html_code.replace('***SYPNOSIS_3***', sypnosis_3)
html_code = html_code.replace('***SOURCE_3***', source_3)
html_code = html_code.replace('***DATE_3***', date_3)
# Replace the blanks for story 4
html_code = html_code.replace('***HEADLINE_4***', headline_4)
html_code = html_code.replace('***IMAGE_4***', image_4)
html_code = html_code.replace('***SYPNOSIS_4***', sypnosis_4)
html_code = html_code.replace('***SOURCE_4***', source_4)
html_code = html_code.replace('***DATE_4***', date_4)
# Replace the blanks for story 5
html_code = html_code.replace('***HEADLINE_5***', headline_5)
html_code = html_code.replace('***IMAGE_5***', image_5)
html_code = html_code.replace('***SYPNOSIS_5***', sypnosis_5)
html_code = html_code.replace('***SOURCE_5***', source_5)
html_code = html_code.replace('***DATE_5***', date_5)
# Replace the blanks for story 6
html_code = html_code.replace('***HEADLINE_6***', headline_6)
html_code = html_code.replace('***IMAGE_6***', image_6)
html_code = html_code.replace('***SYPNOSIS_6***', sypnosis_6)
html_code = html_code.replace('***SOURCE_6***', source_6)
html_code = html_code.replace('***DATE_6***', date_6)
# Replace the blanks for story 7
html_code = html_code.replace('***HEADLINE_7***', headline_7)
html_code = html_code.replace('***IMAGE_7***', image_7)
html_code = html_code.replace('***SYPNOSIS_7***', sypnosis_7)
html_code = html_code.replace('***SOURCE_7***', source_7)
html_code = html_code.replace('***DATE_7***', date_7)
# Replace the blanks for story 8
html_code = html_code.replace('***HEADLINE_8***', headline_8)
html_code = html_code.replace('***IMAGE_8***', image_8)
html_code = html_code.replace('***SYPNOSIS_8***', sypnosis_8)
html_code = html_code.replace('***SOURCE_8***', source_8)
html_code = html_code.replace('***DATE_8***', date_8)
# Replace the blanks for story 9
html_code = html_code.replace('***HEADLINE_9***', headline_9)
html_code = html_code.replace('***IMAGE_9***', image_9)
html_code = html_code.replace('***SYPNOSIS_9***', sypnosis_9)
html_code = html_code.replace('***SOURCE_9***', source_9)
html_code = html_code.replace('***DATE_9***', date_9)
# Replace the blanks for story 10
html_code = html_code.replace('***HEADLINE_10***', headline_10)
html_code = html_code.replace('***IMAGE_10***', image_10)
html_code = html_code.replace('***SYPNOSIS_10***', sypnosis_10)
html_code = html_code.replace('***SOURCE_10***', source_10)
html_code = html_code.replace('***DATE_10***', date_10)
#====================================================================#
# Write the HTML code to a file
html_file = open('RTNews.html', 'w')
html_file.write(html_code)
html_file.close()
# Display message
messenger.config(text = 'News Extraction Complete!')
except FileNotFoundError:
messenger.config(text = 'Extraction failed!')
# If the event log button is turned on and
# Extract news button is pressed
try:
if check.get() == 1:
event_description = log_description[0]
sql_statement = template.replace('DESCRIPTION', event_description)
event_db.execute(sql_statement)
connection.commit()
except:
messenger.config(text = sqlite_error)
#====================================================================#
# Displays extracted news function
def display():
location = normpath('/RTNews.html')
path = getcwd()
fullpath = path + location
# Imported 'exists' function from os.path
if exists(fullpath):
webopen('file://' + fullpath)
messenger.config(text = 'Extracted News Displayed!')
else:
messenger.config(text = 'Cannot find Extracted News!')
# If the event log button is turned on
# Display extracted news button is pressed
try:
if check.get() == 1:
event_description = log_description[1]
sql_statement = template.replace('DESCRIPTION', event_description)
event_db.execute(sql_statement)
connection.commit()
except:
messenger.config(text = sqlite_error)
# Archives the latest news function
def archive():
try:
url = 'https://www.rt.com/rss/news/'
web_page = urlopen(url)
web_page_contents = web_page.read().decode('UTF-8')
html_file = open('InternetArchive/RTNews_Latest.xhtml',
'w', encoding = 'UTF-8')
html_file.write(web_page_contents)
html_file.close()
messenger.config(text = 'Latest News Archived!')
except:
messenger.config(text = 'Check Internet Connection / Internet Archive')
# If the event log button is turned on
# Archive latest news button is pressed
try:
if check.get() == 1:
event_description = log_description[2]
sql_statement = template.replace('DESCRIPTION', event_description)
event_db.execute(sql_statement)
connection.commit()
except:
messenger.config(text = sqlite_error)
#====================================================================#
#GUI
#====================================================================#
# Messenger label
messenger = Label(window, text = 'Select a date...', bg='white', fg='black',
font = ('Arial', 22))
messenger.place(relx=0.75, rely=0.65, anchor='center')
messenger.configure(wraplength='260')
# Extract button
extract_button = Button(window, text = 'Extract news from archive',
font = ('Arial', 14), fg='black', bg='white',
relief = 'solid', command = generate_html)
extract_button.place(relx=0.515, rely=0.78, height=60, width=120)
extract_button.configure(wraplength='120')
# Display button
display_button = Button(window, text = 'Display news extracted',
font = ('Arial', 14), fg='black', bg='white',
relief = 'solid', command = display)
display_button.place(relx=0.675, rely=0.78, height=60, width=120)
display_button.configure(wraplength='120')
# Archive button
archive_button = Button(window, text = 'Archive the latest news',
font = ('Arial', 14), fg='black', bg='white',
relief = 'solid', command = archive)
archive_button.place(relx=0.835, rely=0.78, height=60, width=120)
archive_button.configure(wraplength='120')
# Log event checkbox
log_button = Checkbutton(window, text="Log events", font = ('Arial', 14),
fg='black', bg='white', variable = check,
command = log_events)
log_button.place(relx=0.675, rely=0.9)
# Logo image
try:
Label(window, bg = 'black', height = 12).pack(fill=X, pady=23)
img = PhotoImage(file="RT_Image.gif")
img = img.zoom(16)
img = img.subsample(32)
logo = Label(window, image=img, borderwidth=0)
logo.place(relx=0.025, rely=0.04)
except:
messenger.config(text = 'Error: RT_Image.gif not found!')
extract_button.config(state = DISABLED)
display_button.config(state = DISABLED)
archive_button.config(state = DISABLED)
log_button.config(state = DISABLED)
# Logo subtitle
logo_subtitle = Label(window, text = 'Russia Today News Archive',
bg='white', fg='black', font = ('Arial', 22, 'bold'))
logo_subtitle.place(relx=0.625, rely=0.4)
logo_subtitle.configure(wraplength='260')
# -------------------------------------------------------------------#
# Start the event loop
window.mainloop()
| true
|
b33c213d6b70f541a837bcd444d90265ef208cea
|
Python
|
LibreGamesArchive/galaxymageredux
|
/lib/gui/messagebox.py
|
UTF-8
| 656
| 2.65625
| 3
|
[] |
no_license
|
import label, container, misc
class MessageBox(container.Container):
widget_type = 'MessageBox'
def __init__(self, parent, pos, size, name=None):
container.Container.__init__(self, parent, pos, size, name)
def set_top_widget(self, widget):
pass
def add_line(self, text):
label.Label(self, misc.AbsolutePos((0,0)), text)
max_lines = self.get_theme_val('max_lines', 10)
lasty = self.size[1]
for i in self.widgets:
s = i.get_size_with_padding()
i.pos.y = lasty - s[1]
lasty -= s[1]
self.widgets = self.widgets[0:max_lines]
| true
|
94b839f1bb65660cc5e17f0aecf9bf2c128ff355
|
Python
|
ykaw/PiCW
|
/tests/iambic.py
|
UTF-8
| 3,193
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/python3
import pigpio
import time
import threading
import readline
# initialization of GPIO
#
pi=pigpio.pi()
if not pi.connected:
exit()
port_dit=23
port_dah=24
pi.set_mode(port_dit, pigpio.INPUT)
pi.set_mode(port_dah, pigpio.INPUT)
pi.set_pull_up_down(port_dit, pigpio.PUD_UP)
pi.set_pull_up_down(port_dah, pigpio.PUD_UP)
pi.set_glitch_filter(port_dit, 3000)
pi.set_glitch_filter(port_dah, 3000)
port_pwm=18
pwm_freq= 500
pi.set_mode(port_pwm, pigpio.OUTPUT)
pi.hardware_PWM(port_pwm, pwm_freq, 0)
pi.set_PWM_frequency(port_pwm, pwm_freq)
pi.set_PWM_dutycycle(port_pwm, 0)
# handlings for dits and dahs
#
wpm=20
ditlen=60/(50*wpm)
# global status
# and notifying event
#
dit_mark =False
dah_mark =False
evt_mark =0
sqz_marks=[]
ev=threading.Event()
# subthread for iambic output
#
def keying_iambic():
def output_with_squeezed(mark):
def mark_and_space(mark, space):
global pi
global sqz_marks
pi.set_PWM_dutycycle(port_pwm, 128)
time.sleep(mark)
pi.set_PWM_dutycycle(port_pwm, 0)
time.sleep(space)
global ditlen
global sqz_marks
sqz_marks=[]
if mark==1:
alt_mark=2
mark_and_space(ditlen, ditlen)
elif mark==2:
alt_mark=1
mark_and_space(3*ditlen, ditlen)
while sqz_marks:
mark=sqz_marks.pop(0)
if mark==1:
alt_mark=2
mark_and_space(ditlen, ditlen)
elif mark==2:
alt_mark=1
mark_and_space(3*ditlen, ditlen)
return(alt_mark)
global ditlen
global dit_mark, dah_mark, evt_mark, sqz_marks
global ev
while True:
ev.clear()
ev.wait()
next_mark=output_with_squeezed(evt_mark)
while True:
if dit_mark and dah_mark:
next_mark=output_with_squeezed(next_mark)
elif dit_mark:
next_mark=output_with_squeezed(1)
elif dah_mark:
next_mark=output_with_squeezed(2)
else:
break
iambic=threading.Thread(target=keying_iambic)
iambic.start()
# callback function for press/release paddles
#
def cb_func(port, level, tick):
global dit_mark, dah_mark, evt_mark, sqz_marks
global ev
# paddle pressed
if level==0:
if port==port_dit:
evt_mark=1
dit_mark=True
elif port==port_dah:
evt_mark=2
dah_mark=True
sqz_marks.append(evt_mark)
# notify to iambic subthread
ev.set()
# paddle released
elif level==1:
if port==port_dit:
dit_mark=False
elif port==port_dah:
dah_mark=False
# register callbacks
#
cb_dit=pi.callback(port_dit, pigpio.EITHER_EDGE, cb_func)
cb_dah=pi.callback(port_dah, pigpio.EITHER_EDGE, cb_func)
# command loop
#
try:
while True:
line=input(str(wpm) + ' WPM : ')
wpm=float(line)
ditlen=60/(50*wpm)
pi.set_PWM_dutycycle(port_pwm, 0)
pi.stop()
except (EOFError, KeyboardInterrupt):
pi.set_PWM_dutycycle(port_pwm, 0)
pi.stop()
| true
|
f1a5dc64d3ec6c0f4c71b30235923f758048174f
|
Python
|
evinpinar/competitive_python
|
/leetcode/41.py
|
UTF-8
| 1,631
| 3.5
| 4
|
[] |
no_license
|
def firstMissingPositive(nums):
## Find smallest missing positive integer
mini = float("inf")
for num in nums:
if num>0:
mini = num
break
for num in nums:
if num>0 and num < mini:
mini = num
maxi = nums[0]
for num in nums:
if num>0 and num > mini:
maxi = num
if mini >1:
return 1
bin = [0]*(maxi-mini+1)
for num in nums:
if num >0:
bin[num-mini] = 1
for i, b in enumerate(bin):
if b == 0:
return (i+mini)
return maxi+1
def firstMissingPositive2(nums):
for i in range(len(nums)):
while 0 <= nums[i]-1 < len(nums) and nums[nums[i]-1] != nums[i]:
tmp = nums[i]-1
nums[i], nums[tmp] = nums[tmp], nums[i]
print(i, " : ",nums)
for i in range(len(nums)):
if nums[i] != i+1:
return i+1
return len(nums)+1
def firstMissingPositive3(nums):
if not nums:
return 1
nums.append(0)
l = len(nums)
for i in range(l):
if nums[i] <= 0 or nums[i] >= l:
nums[i] = 0
for i in range(l):
nums[nums[i] % l] += l
print(i, nums)
for i in range(l):
if not nums[i] // l:
return i
return l
def firstMissingPositive(nums) -> int:
if not nums:
return 1
num_set = set(nums)
for i in range(1, len(nums) + 2):
if i not in num_set:
return i
in1= [1, 2, 0]
in2 = [3, 4, -1, 1]
in3 = [7, 8, 9, 11, 12]
in4 = [0,1,2,4]
in5 = [3, 5, 7, 4, 2, 1]
print(firstMissingPositive3(in3))
| true
|
1c208f1ad698529739c1813cab89198488f9d4b6
|
Python
|
Ewan-Selkirk/Advent-of-Code-2020
|
/src/day2/day2.py
|
UTF-8
| 1,784
| 3.3125
| 3
|
[] |
no_license
|
psw_req = []
psw_char = []
psw = []
count = 0
def check_password(p_min, p_max, char, password, debug):
if int(p_min) <= password.count(char) <= int(p_max):
if debug:
print(password, "is a valid password!")
return True
else:
if debug:
print(password, "is not a valid password!")
return False
def check_password_p2(p_min, p_max, char, password, debug):
psw_list = list(password)
if psw_list[p_min - 1] == char or psw_list[p_max - 1] == char:
if not (psw_list[p_min - 1] == char and psw_list[p_max - 1] == char):
if debug:
print(password, "is a valid password!")
return True
else:
if debug:
print(password, "is not a valid password!")
return False
def setup():
f = open("./input.txt", "r")
for line in f:
split_line = line.split(" ")
psw_req.append(split_line[0])
psw_char.append(split_line[1].strip(":"))
psw.append(split_line[2].strip("\n"))
def day2_p1(debug):
global count
for x in range(0, 1000):
min_req = int(psw_req[x].split("-")[0])
max_req = int(psw_req[x].split("-")[1])
if check_password(min_req, max_req, psw_char[x], psw[x], debug):
count += 1
print(count)
count = 0
def day2_p2(debug):
global count
for x in range(0, 1000):
min_req = int(psw_req[x].split("-")[0])
max_req = int(psw_req[x].split("-")[1])
if check_password_p2(min_req, max_req, psw_char[x], psw[x], debug):
count += 1
print(count)
count = 0
if __name__ == '__main__':
setup()
day2_p1(True)
day2_p2(True)
| true
|
c69e199c3323fa7df0fdb2bd1740e3cac4b7655f
|
Python
|
maciexu/data_manipulation_with_pandas
|
/Slicing_indexing.py
|
UTF-8
| 5,232
| 4.625
| 5
|
[] |
no_license
|
""" why index?--->>> index can use loc --->>>
Setting an index allows more concise code for subsetting for rows of a categorical variable via .loc[]
"""
# Example 1
# Look at temperatures
print(temperatures)
# Index temperatures by city
temperatures_ind = temperatures.set_index("city")
# Look at temperatures_ind
print(temperatures_ind)
# Reset the index, keeping its contents
print(temperatures_ind.reset_index())
# Reset the index, dropping its contents
print(temperatures_ind.reset_index(drop=True))
# Example 2
# Make a list of cities to subset on
cities = ['Moscow', 'Saint Petersburg']
# Subset temperatures using square brackets
print(temperatures[temperatures['city'].isin(cities)])
# Subset temperatures_ind using .loc[]
print(temperatures_ind.loc[cities])
""" Setting multi-level indexes --->>> nested categorical variables """
# Index temperatures by country & city
temperatures_ind = temperatures.set_index(['country', 'city'])
# List of tuples: Brazil, Rio De Janeiro & Pakistan, Lahore
rows_to_keep = [('Brazil', 'Rio De Janeiro'), ('Pakistan', 'Lahore')]
# Subset for rows to keep
print(temperatures_ind.loc[rows_to_keep])
""" .sort_index() """
# Sort temperatures_ind by index values
print(temperatures_ind.sort_index())
# Sort temperatures_ind by index values at the city level
print(temperatures_ind.sort_index(level='city'))
# Sort temperatures_ind by country then descending city
print(temperatures_ind.sort_index(level=['country', 'city'], ascending=[True, False]))
""" loc and iloc """
You can only slice an index if the index is sorted (using .sort_index()).
To slice at the outer level, first and last can be strings.
To slice at inner levels, first and last should be tuples.
If you pass a single slice to .loc[], it will slice the rows.
"""
# Example 1
# Sort the index of temperatures_ind
temperatures_srt = temperatures_ind.sort_index()
# Subset rows from Pakistan to Russia
print(temperatures_srt.loc['Pakistan':'Russia'])
# Try to subset rows from Lahore to Moscow
print(temperatures_srt.loc['Lahore':'Moscow'])
# Subset rows from Pakistan, Lahore to Russia, Moscow
print(temperatures_srt.loc[('Pakistan', 'Lahore'):('Russia','Moscow')])
# Example 2
# Subset rows from India, Hyderabad to Iraq, Baghdad
print(temperatures_srt.loc[('India', 'Hyderabad'):('Iraq', 'Baghdad')])
# Subset columns from date to avg_temp_c
print(temperatures_srt.loc[:,'date':'avg_temp_c'])
# Subset in both directions at once
print(temperatures_srt.loc[('India', 'Hyderabad'):('Iraq', 'Baghdad'), 'date':'avg_temp_c'])
""" Slicing time series
Note that because the date isn't set as an index, a condition that contains only a year, such as df['date'] == '2009',
will check if the date is equal to the first day of the first month of the year (e.g. 2009-01-01),
rather than checking whether the date occurs within the given year.
"""
# Use Boolean conditions to subset temperatures for rows in 2010 and 2011
print(temperatures[(temperatures['date']>='2010-01-01') & (temperatures['date']<='2011-12-31')])
# Set date as an index
temperatures_ind = temperatures.set_index('date')
# Use .loc[] to subset temperatures_ind for rows in 2010 and 2011
print(temperatures_ind.loc['2010':'2011'])
# Use .loc[] to subset temperatures_ind for rows from Aug 2010 to Feb 2011
print(temperatures_ind.loc['2010-08':'2011-02'])
""" Subsetting by row/column number --->>> iloc() """
# Get 23rd row, 2nd column (index 22, 1)
print(temperatures.iloc[22, 1])
# Use slicing to get the first 5 rows
print(temperatures.iloc[:5])
# Use slicing to get columns 3 to 4
print(temperatures.iloc[:, 2:4])
# Use slicing in both directions at once
print(temperatures.iloc[:5, 2:4])
""" Pivot temperature by city and year
Note: You can access the components of a date (year, month and day) using code of the form
dataframe["column"].dt.component.
For example, the month component is
dataframe["column"].dt.month,
and the year component is
dataframe["column"].dt.year.
"""
# Add a year column to temperatures
temperatures['year'] = temperatures['date'].dt.year
# Pivot avg_temp_c by country and city vs year
temp_by_country_city_vs_year = temperatures.pivot_table(values='avg_temp_c', index=['country','city'], columns='year')
# See the result
print(temp_by_country_city_vs_year)
# Subset for Egypt to India
temp_by_country_city_vs_year.loc['Egypt':'India']
# Subset for Egypt, Cairo to India, Delhi
temp_by_country_city_vs_year.loc[('Egypt', 'Cairo') : ('India', 'Delhi')]
# From Egypt, Cairo to India, Delhi and 2005 to 2010
temp_by_country_city_vs_year.loc[('Egypt', 'Cairo') : ('India', 'Delhi'), 2005:2010]
""" Note:
Call .mean() without arguments to get the mean of each column.
Call .mean(), setting axis to "column" to get the mean of each row.
"""
# Get the worldwide mean temp by year
mean_temp_by_year = temp_by_country_city_vs_year.mean()
# Filter for the year that had the highest mean temp
print(mean_temp_by_year[mean_temp_by_year==mean_temp_by_year.max()])
# Get the mean temp by city
mean_temp_by_city = temp_by_country_city_vs_year.mean(axis='columns')
# Filter for the city that had the lowest mean temp
print(mean_temp_by_city[mean_temp_by_city==mean_temp_by_city.min()])
| true
|
410a46591843e2f56f78829ea98af4fe6247fca6
|
Python
|
katie-mata/aoc
|
/day1/day1.py
|
UTF-8
| 670
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import itertools
import operator
from functools import reduce
def read_input_file(filename):
with open(filename, 'r') as f:
return f.readlines()
def parse_input(input):
return list(map(int, input))
def find_tuples(numbers, tuple_len):
combinations = itertools.combinations(numbers, tuple_len)
return [c for c in list(combinations) if sum(c) == 2020]
def get_product(numbers):
return reduce(operator.mul, numbers, 1)
pairs = find_tuples(parse_input(read_input_file('day1_1.txt')), 2)
print(get_product(pairs[0]))
triples = find_tuples(parse_input(read_input_file('day1_1.txt')), 3)
print(get_product(triples[0]))
| true
|
8fdca9211705a9508d1d2f170e52c629adcf5d55
|
Python
|
EriKKo/adventofcode-2018
|
/12/a.py
|
UTF-8
| 614
| 2.90625
| 3
|
[] |
no_license
|
import sys
lines = [l.strip() for l in sys.stdin]
s = lines[0].split()[2]
m = {}
for i in range(len(s)):
if s[i] == '#':
m[i] = True
trans = {}
for l in lines[2:]:
a,b = l.split(" => ")
trans[a] = b == "#"
def simulate(state):
start = min(state.keys()) - 2
end = max(state.keys()) + 2
newState = {}
for i in range(start, end + 1):
s = ""
for j in range(i - 2, i + 3):
s += "#" if j in state else "."
if s in trans and trans[s]:
newState[i] = True
return newState
def score(state):
return sum(state.keys())
for i in range(20):
m = simulate(m)
print sum(m.keys())
| true
|
47506d12cbc08a3fba76f8419a92c9f3ba0f89fb
|
Python
|
TWoolhouse/Python
|
/Project/Remote/host.py
|
UTF-8
| 610
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
import libs
import node
import sound
import time
vol = sound.Volume(15)
media = sound.Media()
def volume(self, volume, rel):
try:
print("Volume:", vol.set_volume(int(volume), True if rel == "True" else False))
self.send(vol.volume(), "vol")
except ValueError: pass
def prev(self):
media.prev()
print("Previous")
def next(self):
media.next()
print("Next")
def pause(self):
media.pause()
print("Play/Pause")
host = node.Server(funcs={"volume":volume, "pause":pause, "next":next, "prev":prev})
host.open()
while host:
if input():
break
host.close()
| true
|
df82300e17423731f91320a071b8fa945422f4b4
|
Python
|
Success2014/Leetcode
|
/majorityElement.py
|
UTF-8
| 3,776
| 4.3125
| 4
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 14:10:50 2015
Given an array of size n, find the majority element.
The majority element is the element that appears more than ⌊ n/2 ⌋ times.
You may assume that the array is non-empty and the majority element always
exist in the array.
Tags: Divide and Conquer Array Bit Manipulation
Similar Problems (M) Majority Element II
Answers:
1. Runtime: O(n2) — Brute force solution:
Check each element if it is the majority element.
2. Runtime: O(n), Space: O(n) — Hash table:
Maintain a hash table of the counts of each element, then find the most common one.
3. Runtime: O(n log n) — Sorting: As we know more than
half of the array are elements of the same value, we can
sort the array and all majority elements will be grouped
into one contiguous chunk. Therefore, the middle (n/2th)
element must also be the majority element.
4. Average runtime: O(n), Worst case runtime: Infinity — Randomization:
Randomly pick an element and check if it is the majority element.
If it is not, do the random pick again until you find the majority element.
As the probability to pick the majority element is greater than 1/2,
the expected number of attempts is < 2.
5. Runtime: O(n log n) — Divide and conquer: Divide the array into two halves,
then find the majority element A in the first half and the majority element B
in the second half. The global majority element must either be A or B. If A == B,
then it automatically becomes the global majority element. If not, then both A
and B are the candidates for the majority element, and it is suffice to check
the count of occurrences for at most two candidates. The runtime complexity,
T(n) = T(n/2) + 2n = O(n log n).
6. Runtime: O(n) — Moore voting algorithm: We maintain a current candidate
and a counter initialized to 0. As we iterate the array, we look at the current element x:
If the counter is 0, we set the current candidate to x and the counter to 1.
If the counter is not 0, we increment or decrement the counter based on whether
x is the current candidate.
After one pass, the current candidate is the majority element.
Runtime complexity = O(n).
7. Runtime: O(n) — Bit manipulation: We would need 32 iterations, each calculating the number of 1's for the ith bit of all n numbers. Since a majority must exist, therefore, either count of 1's > count of 0's or vice versa (but can never be equal). The majority number’s ith bit must be the one bit that has the greater count.
@author: Neo
"""
class Solution:
# @param {integer[]} nums
# @return {integer}
def majorityElement(self, nums):
nums_dict = {}
n = len(nums)
for num in nums:
if nums_dict.has_key(num):
nums_dict[num] += 1
if nums_dict[num] > n/2:
return num
else:
nums_dict[num] = 1
if nums_dict[num] > n/2:
return num
"""
思想可以延伸到出现次数大于n/k的情况(当然基于hash的方法也可以)
"""
def majorityElement2(self, nums):
"""moore voting algorithm, count must be > 0 at last
最多只有1个这样的数。[n/2]=n/2 or (n-1)/2.大于[n/2],则需要
(n/2 + 1) or (n+1)/2.两个这样的数则总共需要(n+2) or (n+1)个数。"""
if len(nums) == 1:
return nums[0]
count = 0
for num in nums:
if count == 0:
candidate = num
count = 1
elif num == candidate:
count += 1
else:
count -= 1
return candidate
sol = Solution()
print sol.majorityElement2([1,2,2,2,3,2])
| true
|
7e784bbfc674d2a5e3e734584b194efba44f2cdc
|
Python
|
git123hub121/Python-analysis
|
/数据化分析-豆瓣/flask_douban/testCloud.py
|
UTF-8
| 857
| 2.96875
| 3
|
[] |
no_license
|
import jieba #分词
from matplotlib import pyplot as plt #绘图,数据可视化
from wordcloud import WordCloud #词云
from PIL import Image #图片处理
import numpy as np #矩阵运算
import sqlite3 #数据库
con = sqlite3.connect('movie.db')
cur = con.cursor()
sql = 'select instroduction from movie250'
data = cur.execute(sql)
text = ""
for item in data:
text = text + item[0]
cur.close()
con.close()
#分词 5614
cut = jieba.cut(text)
string = ' '.join(cut)
print(len(string))
img = Image.open(r'.\static\assets\img\tree.jpg')
img_array = np.array(img)
wc = WordCloud(
background_color='white',
mask=img_array,
font_path="msyh.ttc"
).generate_from_text(string)
#绘制图片
flg = plt.figure(1)
plt.imshow(wc)
plt.axis('off')
#plt.show()#显示生成的图片
plt.savefig(r'.\static\assets\img\word.jpg',dpi=500)
| true
|
18ebf0e2c41bfa7b05db8692731a7175f31c1614
|
Python
|
cirosantilli/python-cheat
|
/re_cheat.py
|
UTF-8
| 3,975
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
'''
https://docs.python.org/3/library/re.html
'''
import re
if '## Syntax':
if '## Lookahead':
# Don't eat front part or regex
p = re.compile(r'a.')
assert p.sub('0', 'abaac') == '00c'
p = re.compile(r'a(?=.)')
assert p.sub('0', 'abaac') == '0b00c'
if '## re module':
if '## compile':
"""
Return a RegexObject object.
Caches the regex parsing to make it faster.
Always use this unless you will long match once.
Contains basically the same methods as the `re` module.
"""
p = re.compile(r'a.c')
assert p.match('abc')
if '## flags':
assert re.match('a', 'A', re.IGNORECASE)
# Need flags= for re.sub, or set the count=
# https://stackoverflow.com/questions/42581/python-re-sub-with-a-flag-does-not-replace-all-occurrences/42597#42597
assert re.sub('^a', '0', 'ab\nab\n', flags=re.MULTILINE) == '0b\n0b\n'
if '## sub':
# Replace what was matched.
p = re.compile('(a.|b.)')
# By string:
assert p.sub('0', 'a_b_abc') == '000c'
# By callable:
assert p.sub(lambda m: m.group(1)[1:], 'a_b-abc') == '_-bc'
# Count:
assert p.sub('0', 'a_b_abc', count=1) == '0b_abc'
if '## subn':
# Same as sub, but also returns number of subs made:
assert p.subn('0', 'a_b_abc') == ('000c', 3)
if '## match':
re.match(r'a.c', 'abc')
assert re.match(r'a.c', 'abc')
# Must match from beginning of string!
# Consider re.search instead.
# http://stackoverflow.com/questions/28840903/python-regex-match-middle-of-string
assert re.match(r'a.c', '0abc') is None
# Does not however have to match until the end:
assert re.match(r'a.c', 'abc0')
if '## search':
"""
Like match, but also matches in the middle.
"""
assert re.search(r'a.c', '0abc')
# Works.
assert re.search(r'a.c', 'abcaBc')
# . == b, stops at first match. to find all matches, use finditer
# Line start and end are still honoured.
assert not re.search(r'^a', 'ba')
# search and group
assert re.search(r'a(.)c(.)e', 'Xa0c1eYa2c3eZ').group(1) == ('0')
assert re.search(r'a(.)c(.)e', 'Xa0c1eYa2c3eZ').group(2) == ('1')
assert re.search(r'a(.)c(.)e', 'Xa0c1eYa2c3eZ').group(1, 2) == ('0', '1')
if '## finditer':
# A list of all non-overlapping match objects.
matches = list(re.finditer(r'a.c', 'abcaBc'))
if '## split':
assert re.split(r'[ab]+', '0abba1aaaaa2') == ['0', '1', '2']
# https://stackoverflow.com/questions/2136556/in-python-how-do-i-split-a-string-and-keep-the-separators
assert re.split('(0|1)', 'a0bc1d0ef') == ['a', '0', 'bc', '1', 'd', '0', 'ef']
# https://stackoverflow.com/questions/24443995/list-comprehension-joining-every-two-elements-together-in-a-list
def split_and_keep(reg, string):
reg = '(' + reg + ')'
lst = re.split(reg, string)
if len(lst) % 2 == 1:
lst.append('')
for x, y in zip(lst[0::2], lst[1::2]):
yield x + y
assert list(split_and_keep('0|1', 'a0bc1d0ef')) == ['a0', 'bc1', 'd0', 'ef']
"""
## Match object
## MatchObject
https://docs.python.org/2/library/re.html#re.MatchObject
Impossible to access this class: http://stackoverflow.com/questions/4835352/how-to-subclass-the-matchobject-in-python ...
Important methods: TODO examples
group() Return the string matched by the RE
start() Return the starting position of the match
end() Return the ending position of the match
span() Return a tuple containing the (start, end) positions of the match
"""
"""
## RegexObject
Returned by compile.
https://docs.python.org/2/library/re.html#re.RegexObject
"""
| true
|
5112c4f5c46b38bba7246fcd524547aad6bd0ad5
|
Python
|
Linus-MK/AtCoder
|
/AtCoder_unofficial/iroha2019_day1_f.py
|
UTF-8
| 1,049
| 3.6875
| 4
|
[] |
no_license
|
def factorize(n):
'''
素因数分解をする。
タプルの配列にしようと思ったけど、逐次割り算する過程で次数を増やせないじゃん。
二重配列?
dictにしよう。
'''
if n == 1:
raise('n >= 2')
factor = {}
div = 2
while True:
if div * div > n:
factor[n] = factor.get(n, 0) + 1
return factor
if n % div == 0:
n //= div
factor[div] = factor.get(div, 0) + 1
else:
div += 1
n, k = list(map(int, input().split()))
if n == 1:
print(-1)
exit()
d = factorize(n)
max_len = sum([power for power in d.values()])
if k > max_len:
print(-1)
else:
ans_list = []
for i in range(k-1):
min_div = min(d.keys())
n //= min_div
ans_list.append(min_div)
if d[min_div] == 1:
del d[min_div]
else:
d[min_div] -= 1
ans_list.append(n)
ans_str = list(map(str, ans_list))
print(' '.join(ans_str))
| true
|
232919676fe30251686df94a145bb9e14ce25489
|
Python
|
techtronics/project-ML
|
/scripts/filetolist.py
|
UTF-8
| 123
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/python
import sys
d = []
with open(sys.argv[1]) as f:
for line in f:
d.append(line.rstrip())
print d
| true
|
1ede7636918ecfede7130a9b862eafa14947c337
|
Python
|
rafaelwitter/SelfLearning
|
/spreadsheet.py
|
UTF-8
| 686
| 3.0625
| 3
|
[] |
no_license
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials
# use creds to create a client to interact with the Google Drive API
scope = ['https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
sheet = client.open("tabela").sheet1
# Extract and print all of the values
list_of_hashes = sheet.get_all_records()
list_teste = sheet.get_all_values()
sheet.update_cell(3, 2, "I just wrote to a spreadsheet using Python!")
sheet.update_cell(3,4, "oi")
sheet.print(3,4)
| true
|
ce534bf9030bf3d95ea99a712783254f9a62bd6c
|
Python
|
sangwoo7957/Algorithm
|
/Baekjun17404.py
|
UTF-8
| 658
| 2.734375
| 3
|
[] |
no_license
|
import sys
n = int(input())
s = []
for _ in range(n):
s.append(list(map(int, input().split())))
result = sys.maxsize
for color in range(3):
dp = [[0 for _ in range(n)] for _ in range(3)]
for i in range(3):
if i == color:
dp[i][0] = s[0][i]
continue
dp[i][0] = sys.maxsize
for i in range(1, n):
dp[0][i] = s[i][0] + min(dp[1][i - 1], dp[2][i - 1])
dp[1][i] = s[i][1] + min(dp[0][i - 1], dp[2][i - 1])
dp[2][i] = s[i][2] + min(dp[0][i - 1], dp[1][i - 1])
for i in range(3):
if i == color:
continue
result = min(result, dp[i][-1])
print(result)
| true
|
2bf19b3f1908ff9a09cafd5da82a20db1cb1efe8
|
Python
|
feelosophy13/buildwithme
|
/sessionDAO.py
|
UTF-8
| 2,240
| 2.90625
| 3
|
[] |
no_license
|
import sys
import random
import string
import bson
## The session Data Access Object handles interactions with the sessions collection
class sessionDAO:
def __init__(self, database):
self.db = database
self.sessions = database.sessions
def start_session(self, userID, firstname):
session_id = self.get_random_str(32) # create a session ID string
session = {'_id': session_id, 'u': userID, 'f': firstname}
try:
self.sessions.insert(session)
except:
print "Unexpected error on start_session:", sys.exc_info()[0]
return None
return str(session['_id'])
def end_session(self, session_id):
if session_id is None:
return None
try:
self.sessions.remove({'_id': session_id})
except:
print "Unexpected error on end_session:", sys.exc_info()[0]
return
def get_session(self, session_id):
if session_id is None:
return None
session = self.sessions.find_one({'_id': session_id})
return session
def get_userID(self, session_id):
session = self.get_session(session_id)
if session is None:
return None, None
else:
return session['u'] # return userID
def get_userID_firstname(self, session_id):
session = self.get_session(session_id)
if session is None:
return None, None
else:
return session['u'], session['f'] # return userID and first name
def get_random_str(self, num_chars):
random_string = ""
for i in range(num_chars):
random_string = random_string + random.choice(string.ascii_letters)
return random_string
def update_user_firstname(self, userID, firstname):
userID = bson.objectid.ObjectId(userID)
try:
update_status = self.sessions.update({'u':userID}, {'$set':{'f':firstname}}, multi = True)
return update_status['nModified'] > 0 # nModified is zero if there was no session log with the given userID
except:
print "Unexpected error on end_session:", sys.exc_info()[0]
return False
| true
|
3793cd2cbaaec1f44bbfc262f1f9326a8d034a73
|
Python
|
hermelandocp/tarea1
|
/algoritmo1.py
|
UTF-8
| 365
| 3.53125
| 4
|
[] |
no_license
|
nombre_usuario_uno= input("ingresa tu nombre: ")
edad_usuario_uno= input("ingresa tu edad: ")
nombre_usuario_dos = input ("ingresa tu nombre: ")
edad_usuario_dos= input ("ingresa tu edad: ")
if edad_usuario_uno > edad_usuario_dos :
print ("la persona mas grande es: " + nombre_usuario_uno)
else:
print ("la persona mas grande es: " + nombre_usuario_dos)
| true
|
5f68f817a2002f3cd75bf1309d5a4ea4cf5eb3e7
|
Python
|
diserdyuk/parse_finance_yahoo_most_actives
|
/finance_yahoo.py
|
UTF-8
| 2,195
| 3.28125
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
import csv
def get_html(url): # requests and response from url
r = requests.get(url)
if r.ok: # catch
return r.text
print(r.status_code)
def write_csv(d): # function write data to csv
with open('finance_yahoo.csv', 'a') as f:
write = csv.writer(f)
write.writerow((d['symbol'],
d['price'],
d['url']))
def get_data(html): # func.parse html code
soup = BeautifulSoup(html, 'lxml')
trs = soup.find('table').find('tbody').find_all('tr') # find tags for parse data
cnt = 0
for tr in trs:
cnt += 1
tds = tr.find_all('td')
try: # parse symbol from page
symbol = tds[0].find('a').text
except:
symbol = '' # if info none, catche exception
try: # parse price from page
price = tds[2].find('span').text
except:
price = ''
try: # parse url from page
url = 'https://finance.yahoo.com' + tds[0].find('a').get('href')
except:
url = ''
data = {'symbol': symbol, # packing parse data in dict
'price': price,
'url': url}
write_csv(data) # write parse data in csv
def main(): # hub all functions
url = 'https://finance.yahoo.com/most-active?count=25&offset=0'
cnt_symbol = 0
cnt_page = 0 # count pages, +25
while True: # cycle for parse each page
get_data(get_html(url))
cnt_page += 25
soup = BeautifulSoup(get_html(url), 'lxml')
trs = soup.find('table').find('tbody').find_all('tr')
for tr in trs: # cycle for stop parser
cnt_symbol += 1
if cnt_symbol == 223: # 223 its end symbol on pages
break
try: # if page none, catche except
url = 'https://finance.yahoo.com/most-active?count=25&offset=' + str(cnt_page) # 2nd var can use method format
except:
break
if __name__ == '__main__':
main()
| true
|
03b4424c018b525661c3fadac52ff37ff708eac9
|
Python
|
kspar/moodle-fetch-test
|
/tester.py
|
UTF-8
| 1,352
| 2.890625
| 3
|
[] |
no_license
|
from grader import *
from rattad import *
from random import randint
FUNCTION_NAME = 'vahimatest_suurim'
def solution(mat):
return max([min(row) for row in mat])
def random_matrix():
rows = randint(1, 10)
cols = randint(1, 5)
result = []
for i in range(rows):
row = []
for j in range(cols):
row.append(randint(-10, 10))
result.append(row)
return result
def gen_test(test_arg,desc):
@test
@expose_ast
@set_description(desc)
def do_test(m, AST):
must_not_have_input(AST)
must_have_func_def_toplevel(m.module, FUNCTION_NAME)
actual_func_node = get_function_def_node(AST, FUNCTION_NAME)
must_have_n_params(actual_func_node, 1)
actual_func_obj = get_function(m.module, FUNCTION_NAME)
must_have_equal_return_values(solution, actual_func_obj, FUNCTION_NAME, test_arg, args_repr=matrix_repr(test_arg))
a = [[1, 2],
[1, 0]]
gen_test(a, 'Lihtne maatriks 1')
b = [[-1, 9],
[5, -1],
[-1, 1]]
gen_test(b, 'Lihtne maatriks 2')
c = [[1, 9],
[5, 1],
[2, 2],
[3, 6]]
gen_test(c, 'Keerulisem maatriks 1')
d = [[1, 0, -9001]]
gen_test(d, 'Keerulisem maatriks 2')
e = [[42]]
gen_test(e, 'Üheelemendiline maatriks')
for i in range(10):
gen_test(random_matrix(),'Juhuslik maatriks {}'.format(i))
| true
|
982bb2d818544fa35f1d6ff349dc0760f3bfb0c0
|
Python
|
theadamsacademy/python_tutorial_for_beginners
|
/18_lists/3_functions.py
|
UTF-8
| 367
| 4.21875
| 4
|
[] |
no_license
|
names = ["Mike", "Kate", "Dan"]
# Length
print(len(names))
# Check if item exists
if "Kate" in names:
print("Kate is in the list")
# Add item
names.append("Dave")
names.insert(2, "Dave")
# Remove item
names.remove("Kate")
print(names.pop())
del names[2]
del names[1:3]
names.clear()
# Join lists
names = names + ["Dave"]
# Sorting
names.sort()
print(names)
| true
|
3117905af733cb806f81c58b5d64b4c3dff13181
|
Python
|
inthescales/lyres-dictionary
|
/src/diachron/table.py
|
UTF-8
| 989
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
class TableColumn:
def __init__(self, title, elements):
self.title = title
self.elements = elements
def make_table(columns):
output = "<body>\n"
style = '"border: 1px solid black;"'
if not style:
output += "<table>\n"
else:
output += "<table style=" + style + ">\n"
rows = 0
for column in columns:
rows = max(rows, len(column.elements) + 1)
output += "<tr>"
for column in columns:
output += "<th style=\"border: 1px solid black; padding: 8px;\">" + column.title + "</th>"
output += "</tr>"
for r in range(0, rows):
output += "<tr>"
for column in columns:
if r < len(column.elements):
output += "<td style=\"border: 1px solid black; padding: 8px;\">"
output += column.elements[r]
output += "</td>"
output += "</tr>"
# Closing
output += "</table>\n"
output += "</body>\n"
return output
| true
|
47ee788a740abaa47f077b225df014c19e07d0a3
|
Python
|
dinnozap/MinecraftServerMaker
|
/main.py
|
UTF-8
| 973
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
########################################
### Minecraft Server Maker #############
### Create Your Own Minecraft Server ###
########################################
from commun import *
import os
import subprocess as sub
if os.path.isfile("install.py"):
os.remove("install.py")
else:
pass
bcl = 1
nbnotfound = 1
clear()
while bcl:
print("###############################\n### Minecraft Server Maker ###\n##############################")
a=input("[1] Create Server [2] Quit : ")
if a == "1":
clear()
print("Starting creation of the server")
load(4)
from base import main
main()
bcl = 0
elif a == "2":
print("Good Bye ;)")
wait(1)
clear()
exit(0)
else:
clear()
print("Command not found\n:(")
print("Error #", nbnotfound)
nbnotfound += 1
if nbnotfound > 20:
print("Sorry but the program gona shutdown beceause you enter 20 false result !!!!! :/")
wait(2)
clear()
print("Good Bye")
exit(0)
| true
|
b0049225b6f7d906ad1aa692cc021795d554acc5
|
Python
|
jonatanbarkan/loop
|
/cropper.py
|
UTF-8
| 3,468
| 2.890625
| 3
|
[] |
no_license
|
from __future__ import division
import numpy as np
import cv2
import random
import faces
class Cropper(object):
def __init__(self, window_rows, window_cols, resolution, center1=0, center2=0):
# window_rows is the rows step size we want the sliding window to have
# window_cols is the columns step size we want the sliding window to have
# center1 is the center of the box on the
self.window_rows = window_rows
self.original_rows = window_rows
self.window_cols = window_cols
self.original_cols = window_cols
# self.center1 = center1 + (window_rows-1)/2
# self.center2 = center2 + (window_cols-1)/2
self.center1 = center1
self.center2 = center2
self.frame_rows = resolution[0]
self.frame_cols = resolution[1]
def move_rect_center_row(self, jump):
self.center1 += jump
def move_rect_center_col(self, jump):
self.center2 += jump
def random_walk(self):
self.center2 = random.randint(0, self.frame_rows)
self.center1 = random.randint(0, self.frame_cols)
def random_size(self):
# once it gets small it stays small because it is self referencing
row_min = int(0.8*self.original_rows)
row_max = int(1.2*self.original_rows)
self.window_rows = random.randint(row_min, row_max)
self.window_cols = np.round(1.14*self.window_rows)
def find_rect_edges(self):
# (center1, center2) is the pixel in the center of the rectangles view
# up, down, left, right are the respective rows and columns location of the rectangles edges
h = (self.window_rows-1)/2 # height step size from center to edge
w = (self.window_cols-1)/2 # width step size from center to edge
up = int(self.center1-h)
down = int(self.center1+h)
left = int(self.center2-w)
right = int(self.center2+w)
return up, down, left, right
def crop(self, frame):
# crop the frame
up, down, left, right = self.find_rect_edges()
img = frame[up:down, left:right]
return img
def display_loop(self, frame):
for row_jump in range(frame.shape[0]):
# self.move_rect_center_row(1)
for col_jump in range(frame.shape[1]):
# self.move_rect_center_col(1)
# print(frame.shape)
# if frame.shape == (self.window_rows - 1, self.window_cols - 1):
self.display_rect(frame)
def check_ROI_in_frame(self, ROI):
# print ROI.shape[0], ROI.shape[1]
# print self.window_rows
# print self.window_cols
if ROI.shape[0] == 0 & ROI.shape[1] == 0:
return False
elif (ROI.shape[0]+1) == self.window_rows:
if (ROI.shape[1]+1) == self.window_cols:
return True
return False
def save_to_dir(self, path, frame, size, frame_num):
if frame.shape == (size[0] - 1, size[1] - 1):
cv2.imwrite(path + 'cropSize_' + str(size) + 'frameNum_' + str(frame_num) + 'center_' + str(
(self.center1, self.center2)) + '.png', frame)
def display_rect(self, frame, color=(0, 255, 0)):
up, down, left, right = self.find_rect_edges()
cv2.rectangle(frame, (up, left), (down, right), color, 1)
cv2.imshow('frame', frame)
| true
|
c9ea40dfc929e4a88de7328f8f6428b86a23a890
|
Python
|
Hectorline/WebAppETSEIB
|
/app/scripts/calculs.py
|
UTF-8
| 4,861
| 3.03125
| 3
|
[] |
no_license
|
import time
def time_this(func):
"""The time_this decorator"""
def decorated(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
print(func.__name__ + ' rained in', (time.time() - start)*1000, 'ms')
return result
return decorated
@time_this
def matriculats(assig, quadri, an, sex):
"""
retorna el nombre de matriculats per a un aassignatura, quadrimestre, any, si no s'introdueix el sexe,
retorna els dos
"""
f1 = open('NouFitxer.csv', 'r')
c = 0
for linea in f1:
linea = linea.strip()
l = linea.split(';')
if l[2] == assig:
if l[3] == an:
if l[4] == quadri:
if sex != "T":
if l[8] == sex:
c = c + 1
else:
pass
else:
c = c + 1
return c
@time_this
def aprovats(assig, quadri, an, sex):
"""
retorna el nombre d'aprovats per assignatura, quadrimestre i any. si no s'introdueix el paràmetre sex retorna
ambdos sexes
"""
f1 = open('NouFitxer.csv', 'r')
c = 0
for linea in f1:
linea = linea.strip()
l = linea.split(';')
if l[2] == assig:
if l[3] == an:
if l[4] == quadri:
if l[5] == 'S':
if sex != "T":
if l[8] == sex:
c = c + 1
else:
pass
else:
c = c + 1
return c
@time_this
def percentatge(assig, quad, an, sex):
"""
retorna el percentatge d'aprovats per assignatura, quadrimestre, i any
"""
if matriculats(assig, quad, an, sex) != 0:
return (aprovats(assig, quad, an, sex) / matriculats(assig, quad, an, sex)) * 100
else:
return 0
# def aprovats1a(assig):
# f = open('/static/NouFitxer.csv','r')
# exp = []
# la = []
# c = 0
# ctot = 0
# for linea in f:
# l = linea.split(';')
# if exp != l[1]:
# exp = l[1]
# ctot = ctot + 1
# for e in la:
# if e == 'N':
# pass
# else:
# c = c+1
# la = []
# if l[2] == assig:
# la.append(l[5])
# return (c/ctot)*100
# """
# def intent(assig,i,sex=0):
# """
# reorna el percentatge d'aprovats a la i-essima la assignatura assig
# """
# f = open('/NouFitxer.csv','r')
# exp = []
# la = []
# c = 0
# ctot = 0
# for linea in f:
# l = linea.split(';')
# if exp != l[1]:
# exp = l[1]
# if sex != 0:
# if l[8] == sex:
# ctot = ctot + 1
# else:
# pass
# else:
# ctot = ctot + 1
# if 'S' in la:
# if len(la) == i:
# c = c+1
# la = []
# if sex != 0:
# if l[8] == sex:
# if l[2] == assig:
# la.append(l[5])
# else:
# if l[2] == assig:
# la.append(l[5])
# return (c/ctot)*100
@time_this
def notamitja(assig, quad, an, sex):
"""
retorna la mitjana per assigm quad i any
"""
f = open('NouFitxer.csv', 'r')
n = 0
c = 0
for linea in f:
linea = linea.strip()
l = linea.split(';')
if l[2] == assig:
if l[3] == an:
if l[4] == quad:
if sex != "T":
if l[8] == sex:
c = c + 1
n = n + float(l[6])
else:
pass
else:
c = c + 1
n = n + float(l[6])
f.close()
if c == 0:
return 0
return n / c
@time_this
def llistamitja(any, quad, sex):
"""
fa una llista amb les mitjanes de totes les assignatures d'un any i un quadri
"""
l = []
for assig in range(240011, 240016):
l.append(notamitja(str(assig), quad, any, sex))
for assig in range(240021, 240026):
l.append(notamitja(str(assig), quad, any, sex))
return l
@time_this
def llistapercent(any, quad, sex):
# fa la llista amb els perentatges de la nota
l = []
for assig in range(240011, 240016):
l.append(percentatge(str(assig), quad, any, sex))
for assig in range(240021, 240026):
l.append(percentatge(str(assig), quad, any, sex))
return l
| true
|
73bd7e85738352db16b7158ebbdd590638cac38f
|
Python
|
jaybubs/pyprj
|
/misc/even_fibonacci_numbers.py
|
UTF-8
| 321
| 3.5
| 4
|
[] |
no_license
|
def fib(x,y):
z = x+y
return(z)
def evens():
x=1
y=1
d=0
sm=0
while d<4000000:
d=fib(x,y)
print('x: '+str(x)+' y: '+str(y)+' d: '+str(d))
if d%2==0:
sm+=d
print('i summed here: ' +str(sm))
x=y
y=d
return(sm)
print(evens())
| true
|
89f357ee0611eda2fd08ee533c92b53ab1147075
|
Python
|
baubrun/Python-Workbook
|
/Discount_Table/test_discount_table.py
|
UTF-8
| 746
| 2.984375
| 3
|
[] |
no_license
|
import pytest
from .discount_table import discount_table
@pytest.mark.parametrize("price, expected", [
([4.95, 9.95, 14.95, 19.95, 24.95], (
"Original:\t{:.2f}\tDiscount:\t{:.2f}\tNew:\t{:.2f}".format(4.95, 2.97, 1.98) +
"\nOriginal:\t{:.2f}\tDiscount:\t{:.2f}\tNew:\t{:.2f}".format(9.95, 5.97, 3.98) +
"\nOriginal:\t{:.2f}\tDiscount:\t{:.2f}\tNew:\t{:.2f}".format(14.95, 8.97, 5.98) +
"\nOriginal:\t{:.2f}\tDiscount:\t{:.2f}\tNew:\t{:.2f}".format(19.95, 11.97, 7.98) +
"\nOriginal:\t{:.2f}\tDiscount:\t{:.2f}\tNew:\t{:.2f}\n".format(24.95, 14.97, 9.98)))
])
def test_discount_table(price, expected, capsys):
discount_table(price)
out, err = capsys.readouterr()
assert out == expected
| true
|
eb77a9d65e7f3ded95daa45e38b0aee205673df7
|
Python
|
tutunak/codewars
|
/python/4 kyu/IP Validation.py
|
UTF-8
| 755
| 3.84375
| 4
|
[] |
no_license
|
# Write an algorithm that will identify valid IPv4 addresses in dot-decimal format. IPs should be considered valid if
# they consist of four octets, with values between 0..255 (included).
#
# Input to the function is guaranteed to be a single string.
#
# Examples
# // valid inputs:
# 1.2.3.4
# 123.45.67.89
#
# // invalid inputs:
# 1.2.3
# 1.2.3.4.5
# 123.456.78.90
# 123.045.067.089
# Note: leading zeros (e.g. 01.02.03.04) are considered not valid in this kata!
def is_valid_IP(strng):
ip = strng.split('.')
if len(ip) != 4:
return False
for i in ip:
try:
oct = int(i)
except:
return False
if (str(oct) != i) or not (oct >= 0 and oct < 256):
return False
return True
| true
|
985c8696a8cbf5de8dde9e106abbd12af0285b6d
|
Python
|
damiankoper/ripo
|
/video_processor/src/processors/Classification.py
|
UTF-8
| 7,553
| 2.578125
| 3
|
[] |
no_license
|
import numpy as np
import random
import pickle
import cv2
import os
import pickle
from tensorflow import keras
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import time
class Classification:
def __init__(self, width: int = 50, height: int = 50, depth: int = 3):
self.model = None
self.labelizer = None
self.width = width
self.height = height
self.depth = depth
self.train_images = []
self.train_labels = []
self.test_images = []
self.test_labels = []
def genAugmentedDataSet(self, dataFolder: str, newDataFolder: str):
dataGenerator = keras.preprocessing.image.ImageDataGenerator(
rotation_range=90,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.5,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest")
for label in os.listdir(dataFolder):
labelPath = os.path.join(dataFolder, label)
savePath = os.path.join(newDataFolder, label)
for image in os.listdir(labelPath):
imagePath = os.path.join(labelPath, image)
img = keras.preprocessing.image.load_img(imagePath)
if img is not None:
img = keras.preprocessing.image.img_to_array(img)
img = np.expand_dims(img, axis=0)
generateImage = dataGenerator.flow(img, batch_size=1, save_to_dir=savePath,
save_prefix="image", save_format="png")
for i in range(11):
generateImage.next()
def createTrainingData(self, dataFolder: str, createPickles: bool = False):
imagePaths = sorted(list(paths.list_images(dataFolder)))
random.seed(42)
random.shuffle(imagePaths)
for img in imagePaths:
image = cv2.imread(img)
image = cv2.resize(image, (self.width, self.height))
self.train_images.append(image)
label = img.split(os.path.sep)[-2]
self.train_labels.append(label)
self.train_images = np.array(self.train_images, dtype="float") / 255.0
self.train_labels = np.array(self.train_labels)
(self.train_images, self.test_images, self.train_labels, self.test_labels) = train_test_split(self.train_images,
self.train_labels, test_size=0.25, random_state=42)
if (createPickles):
pickle_out = open("data/training/pickles/train_images.p","wb")
pickle.dump(self.train_images, pickle_out)
pickle_out.close()
pickle_out = open("data/training/pickles/train_labels.p","wb")
pickle.dump(self.train_labels, pickle_out)
pickle_out.close()
pickle_out = open("data/training/pickles/test_images.p","wb")
pickle.dump(self.test_images, pickle_out)
pickle_out.close()
pickle_out = open("data/training/pickles/test_labels.p","wb")
pickle.dump(self.test_labels, pickle_out)
pickle_out.close()
def training(self, modelPath: str, loadPickles: bool = False):
if (loadPickles):
self.train_images = pickle.load(open("data/training/pickles/train_images.p", "rb"))
self.train_labels = pickle.load(open("data/training/pickles/train_labels.p", "rb"))
self.test_images = pickle.load(open("data/training/pickles/test_images.p", "rb"))
self.test_labels = pickle.load(open("data/training/pickles/test_labels.p", "rb"))
else:
if(len(self.train_images) == 0):
print("No training data")
return
lb = LabelBinarizer()
self.train_labels = lb.fit_transform(self.train_labels)
self.test_labels = lb.transform(self.test_labels)
# model = keras.models.Sequential()
# model.add(keras.layers.Dense(32, input_shape=((self.width*self.height*self.depth),), activation='relu'))
# model.add(keras.layers.Dense(len(lb.classes_), activation='softmax'))
# model.compile(optimizer='rmsprop',
# loss='categorical_crossentropy',
# metrics=['accuracy'])
# model.fit(train_images, train_labels, validation_data=(test_images, test_labels), epochs=100, batch_size=32)
# model = keras.models.Sequential()
# model.add(keras.layers.Dense(128, activation='relu', input_shape=((self.width*self.height*self.depth),)))
# model.add(keras.layers.Dropout(0.1))
# model.add(keras.layers.Dense(64, activation='relu'))
# model.add(keras.layers.Dropout(0.1))
# model.add(keras.layers.Dense(len(lb.classes_), activation='softmax'))
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(50, 50, 3)))
model.add(keras.layers.Conv2D(32, (3, 3), activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(256, activation='relu'))
model.add(keras.layers.Dense(len(lb.classes_), activation='softmax'))
#tensorboard --logdir data/training/logs/
logName = "log{}".format(int(time.time()))
tensorboard = keras.callbacks.TensorBoard(log_dir="data/training/logs/{}".format(logName))
sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(self.train_images, self.train_labels, validation_data=(self.test_images, self.test_labels),
epochs=15,
batch_size=64,
callbacks=[reduce_lr, tensorboard])
model.save(modelPath+"/model")
os.makedirs(modelPath+"/labelizer", exist_ok=True)
pickle_out = open(modelPath+"/labelizer/labelizer.p","wb")
pickle.dump(lb, pickle_out)
pickle_out.close()
def loadModel(self, modelPath: str):
self.model = keras.models.load_model(modelPath+"/model")
self.labelizer = pickle.load(open(modelPath +"/labelizer/labelizer.p", "rb"))
def classify(self, image):
image = cv2.resize(image, (self.width, self.height))
image = image.astype("float")/255.0
#image = image.flatten()
#image = image.reshape((1, image.shape[0]))
# time_s = time.perf_counter()
image = np.expand_dims(image, axis=0)
prediction_result = self.model.predict_on_batch(image)
# print(time.perf_counter() - time_s)
i = prediction_result.numpy().argmax(axis=1)[0]
label = self.labelizer.classes_[i]
return label, prediction_result
| true
|
78d9a47a85c631126f39dfb6e3c44b4297fafb48
|
Python
|
Relph1119/deep-learning-with-python-notebooks
|
/tensorflow_V2_src/ch03/3-4-classifying-movie-reviews.py
|
UTF-8
| 2,820
| 3.296875
| 3
|
[
"MIT",
"GPL-3.0-only"
] |
permissive
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: 3-4-classifying-movie-reviews.py
@time: 2020/4/9 14:48
@project: deep-learning-with-python-notebooks
@desc: 3.4 电影评论分类:二分类问题
"""
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras import models, layers, losses, metrics, optimizers
from tensorflow.keras.datasets import imdb
# 加载电影评论数据
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# 数据集进行向量化
def vectorize_sequences(sequences, dimension=10000):
"""
将整数序列编码为二进制矩阵
:param sequences:
:param dimension:
:return:
"""
# Create an all-zero matrix of shape (len(sequences), dimension)
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1. # set specific indices of results[i] to 1s
return results
# Our vectorized training data
x_train = vectorize_sequences(train_data)
# Our vectorized test data
x_test = vectorize_sequences(test_data)
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
# 模型构建
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
# 模型装配
model.compile(optimizer=optimizers.RMSprop(lr=0.001),
loss=losses.binary_crossentropy,
metrics=[metrics.binary_accuracy])
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
history = model.fit(partial_x_train,
partial_y_train,
epochs=4,
batch_size=512,
validation_data=(x_val, y_val))
history_dict = history.history
history_dict.keys()
# 绘制图形
loss_value = history_dict['loss']
val_loss_value = history_dict['val_loss']
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
epochs = range(1, len(loss_value) + 1)
plt.subplots_adjust(wspace=0.5)
plt.subplot(1, 2, 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss_value, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss_value, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
results = model.evaluate(x_test, y_test)
print(results)
print(model.predict(x_test))
| true
|
f4b5742b1c8b9ba3672d5eefb8da60ff4aaa3056
|
Python
|
Lyechen/leetcode-python
|
/leetcode_002.py
|
UTF-8
| 2,505
| 3.921875
| 4
|
[] |
no_license
|
"""
You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
Example
Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 8
Explanation: 342 + 465 = 807.
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def addTwoNumbers(l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
up = 0
if l1.val + l2.val >= 10:
ret = ListNode(l1.val + l2.val - 10)
up = 1
else:
ret = ListNode(l1.val + l2.val)
l1 = l1.next
l2 = l2.next
k = ret
if not l1 and not l2 and up:
ret.next = ListNode(up)
return ret
while l1 or l2:
if l1 and l2:
if l1.val + l2.val + up >= 10:
ret.next = ListNode(l1.val + l2.val + up - 10)
up = 1
else:
ret.next = ListNode(l1.val + l2.val + up)
up = 0
ret = ret.next
l1 = l1.next
l2 = l2.next
elif l1:
if l1.val + up >= 10:
ret.next = ListNode(l1.val + up - 10)
up = 1
else:
ret.next = ListNode(l1.val + up)
up = 0
ret = ret.next
l1 = l1.next
elif l2:
if l2.val + up >= 10:
ret.next = ListNode(l2.val + up - 10)
up = 1
else:
ret.next = ListNode(l2.val + up)
up = 0
ret = ret.next
l2 = l2.next
else:
print('why!!!!')
if up:
ret.next = ListNode(up)
return k
# 1562 / 1562 test cases passed. Runtime: 195 ms 84.34 %
"""
一点点感悟: 1、读题一定要细致,遇到leetcode上不知道的结构时,leetcode一般会给出 结构体
"""
if __name__ == "__main__":
# 插入节点一
idx = ListNode(2)
n = idx
idx.next = ListNode(4)
idx = idx.next
idx.next = ListNode(3)
# 可以自己写一个 insert 方法 但是只是为了测试可以不用
# 插入节点二
idx_1 = ListNode(5)
n_1 = idx_1
idx_1.next = ListNode(6)
idx_1 = idx_1.next
idx_1.next = ListNode(4)
# 测试方法正确性
addTwoNumbers(n, n_1)
| true
|
55d25a9c0f0a0a3e4bedc027427e6b20bf7c6d8d
|
Python
|
olivatooo/redes-final-2019
|
/myslip.py
|
UTF-8
| 2,131
| 2.859375
| 3
|
[] |
no_license
|
class CamadaEnlace:
def __init__(self, linhas_seriais):
self.enlaces = {}
for ip_outra_ponta, linha_serial in linhas_seriais.items():
enlace = Enlace(linha_serial)
self.enlaces[ip_outra_ponta] = enlace
enlace.registrar_recebedor(self.callback)
def registrar_recebedor(self, callback):
self.callback = callback
def enviar(self, datagrama, next_hop):
try:
self.enlaces[next_hop].enviar(datagrama)
except:
pass
def callback(self, datagrama):
if self.callback:
self.callback(datagrama)
class Enlace:
def __init__(self, linha_serial):
self.linha_serial = linha_serial
self.linha_serial.registrar_recebedor(self.__raw_recv)
self.buffer = []
self.result = ''
self.callback = None
def registrar_recebedor(self, callback):
self.callback = callback
def enviar(self, datagrama):
datagrama = datagrama.replace(b"\xdb", b"\xdb\xdd").replace(b"\xc0", b"\xdb\xdc")
datagrama = b'\xc0' + datagrama + b'\xc0'
print(f"║ Enviando pacote SLIP ║")
self.linha_serial.enviar(datagrama)
def __raw_recv(self, dados):
dados = dados.hex()
for i in range(0, len(dados), 2):
cursor = dados[i] + dados[i + 1]
if cursor != 'c0':
if cursor != '':
self.buffer.append(cursor)
if cursor == 'c0' and self.buffer:
while self.buffer:
if type(self.result) is bytes:
self.result = self.result.hex() + self.buffer.pop(0)
self.result = self.result + self.buffer.pop(0)
self.result = bytes(bytearray.fromhex(self.result))
self.result = self.result.replace(b"\xdb\xdc", b"\xc0").replace(b"\xdb\xdd", b"\xdb")
try:
self.callback(self.result)
except:
import traceback
traceback.print_exc()
self.result = ''
| true
|
a2ff1299efbbd5ae14a8beb99678baf896a83694
|
Python
|
YashChitgupakar9/practice
|
/014.py
|
UTF-8
| 259
| 3.0625
| 3
|
[] |
no_license
|
#print(dir(tuple))
t = ("yash","rishab","aarti","milind",[2,3,4,5])
t[4].append(6)
print(t)
t1 =('messi','ronaldo','herrera','martial',[{"england":'epl',"spain":"laliga","germany":"bundesliga","france":"ligue1","italy":"seriea"}])
print(t1[4][0]["france"])
| true
|
97d08a831d0cc8b2953a08613d9524000cccee52
|
Python
|
ikwon176/ENGI301
|
/Assignment_05/blink_USR3.py
|
UTF-8
| 2,594
| 2.671875
| 3
|
[] |
no_license
|
"""
--------------------------------------------------------------------------
Blink USR3 LED
--------------------------------------------------------------------------
Credits:
The BeagleBone IO Python library was originally forked from the excellent
MIT Licensed RPi.GPIO library written by Ben Croston.
Basic structure of code (Adafruit's BeagleBone IO Python Library) written
by Justin Cooper, Adafruit Industries. BeagleBone IO Python library is
released under the MIT License.
License:
Copyright 2019 Irene Kwon
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------
Description:
- Want USR3 LED on pocketbeagle to flash at a frequency of 5 Hz
- want 5 Hz and time.sleep(in seconds) so we want 1/5 for the whole
cycle or 1/10 seconds for each part (on) and then 1/10 seconds off
--------------------------------------------------------------------------
"""
import Adafruit_BBIO.GPIO as GPIO
import time
i = 3
GPIO.setup("USR%d" % i, GPIO.OUT)
while True:
GPIO.output("USR%d" % i, GPIO.HIGH)
time.sleep(0.1)
for n in (1, 2, 4):
GPIO.output("USR%d" % i, GPIO.LOW)
time.sleep(0.1)
| true
|
197317a1e0a4fd1c24c465b11c552d646678e784
|
Python
|
hyuhyu2001/Python_oldBoy
|
/src/Day006/Procuder_demo2.py
|
UTF-8
| 1,649
| 3.40625
| 3
|
[] |
no_license
|
#!/user/bin/env python
#encoding:utf-8
'''
函数式编程实现生产者消费者模型
生产者消费者最大特点:为了解耦,解耦就是让程序各个模块之间关联性降到最低
买包子,但是需要等指定厨师做包子,这样便产生了阻塞
但有收银员(Queue,消息队列)后,客户不关心哪个厨师做包子,厨师不关心哪个客户买,这个便是非阻塞模式(支持并发,支持忙闲不均)
再高级一点,客户不用等5分钟,等我做好了包子,我告诉你(即是异步模型)
'''
import threading,time,Queue #不规范写法,正常需要些3行
import random
def Proudcer(name,que):
while True:
if que.qsize()<3: #队列小于3个的时候生成包子
que.put('baozi')
print '%s:Made a baozi...' %name
else:
print '还有3个包子'
time.sleep(random.randrange(1)) #在1秒内取一个随机数,通过时间随机造成忙闲不均
def Consumer(name,que):
while True:
try:
que.get_nowait('baozi') #造成不阻塞,没有包子时会报错,线程全端
print '%s:Get a baozi...' %name
except Exception:
print u'没有包子了...'
time.sleep(random.randrange(3))
q = Queue.Queue()
p1 = threading.Thread(target = Proudcer,args = ['chef1',q])
p2 = threading.Thread(target = Proudcer,args = ['chef2',q])
p1.start()
p2.start()
c1 = threading.Thread(target = Consumer,args = ['chenchao1',q])
c2 = threading.Thread(target = Consumer,args = ['chenchao2',q])
c1.start()
c2.start()
| true
|
c324d1817d44956243279c8a189ca39cb7ef411b
|
Python
|
pparmesh/Delta-Autonomy
|
/delta_rc_car/delta_perception_rc_car/test/cluster_test.py
|
UTF-8
| 1,683
| 2.71875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# -*- coding: utf-8 -*-
# @Author: Heethesh Vhavle
# @Date: Nov 20, 2019
# @Last Modified by: Heethesh Vhavle
# @Last Modified time: Nov 20, 2019
import numpy as np
import matplotlib.pyplot as plt
from pyclustering.cluster.dbscan import dbscan
from pyclustering.cluster import cluster_visualizer
from features import features
cmap = plt.get_cmap('tab10')
# [0, 1, 2, 3, 4, 5, 6]
# [x, y, r, vx, vy, vr, th]
data = np.c_[features[:, 2], features[:, 6], features[:, 5]] #, features[:, 6]
# Create DBSCAN algorithm.
dbscan_instance = dbscan(data, 0.7, 3)
# Start processing by DBSCAN.
dbscan_instance.process()
# Obtain results of clustering.
clusters = dbscan_instance.get_clusters()
noise = dbscan_instance.get_noise()
labels = np.full_like(features[:, 0], -1).astype('int')
for i, indices in enumerate(clusters): labels[indices] = i
# labels += 1
print(labels)
print(len(clusters))
# cmap = plt.get_cmap('tab10')
# for i, (x, y, r, vx, vy, vr, th) in enumerate(features):
# plt.scatter(x, y, color=cmap(labels[i]))
# plt.grid()
# plt.show()
targets = np.array([np.mean(features[cluster], axis=0) for cluster in clusters])
print(targets.shape)
max_vel = np.max(features[:, 5])
features[:, 5] = features[:, 5] / max_vel
for (x, y, r, vx, vy, vr, th), label in zip(features, labels):
if label != -1:
plt.scatter(x, y, color=(0.2, 0.2, vr), s=100)
plt.scatter(x, y, color=cmap(label), s=20)
plt.scatter(targets[:, 0], targets[:, 1], c='r')
plt.grid()
plt.show()
# Visualize clustering results
# visualizer = cluster_visualizer()
# visualizer.append_clusters(clusters, features[:, :2])
# visualizer.append_cluster(noise, features[:, :2], marker='x')
# visualizer.show()
| true
|