blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb9b99aa461b45357f30e30ccba0ff2fdd376c3a
|
29e4d393351c87741f069092eb8d0ab6f1221d6f
|
/venv/lib/python3.6/site-packages/pdfminer/ascii85.py
|
b0af08c8b04af97930de0d7ab9ac088154718fef
|
[
"MIT"
] |
permissive
|
masora1030/eigoyurusan
|
f0eb7d9761aa150379b558c13fc2477daf504417
|
fa82044a2dc2f0f1f7454f5394e6d68fa923c289
|
refs/heads/master
| 2022-12-01T09:31:17.330620
| 2020-07-22T14:51:59
| 2020-07-22T14:51:59
| 279,682,018
| 11
| 2
|
MIT
| 2020-07-22T22:02:57
| 2020-07-14T20:03:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,490
|
py
|
#!/usr/bin/env python
""" Python implementation of ASCII85/ASCIIHex decoder (Adobe version).
This code is in the public domain.
"""
import re
import struct
# ascii85decode(data)
def ascii85decode(data):
"""
In ASCII85 encoding, every four bytes are encoded with five ASCII
letters, using 85 different types of characters (as 256**4 < 85**5).
When the length of the original bytes is not a multiple of 4, a special
rule is used for round up.
The Adobe's ASCII85 implementation is slightly different from
its original in handling the last characters.
The sample string is taken from:
http://en.wikipedia.org/w/index.php?title=Ascii85
>>> ascii85decode(b'9jqo^BlbD-BleB1DJ+*+F(f,q')
b'Man is distinguished'
>>> ascii85decode(b'E,9)oF*2M7/c~>')
b'pleasure.'
"""
n = b = 0
out = b''
for c in data:
if 33 <= c and c <= 117: # b'!' <= c and c <= b'u'
n += 1
b = b*85+(c-33)
if n == 5:
out += struct.pack('>L', b)
n = b = 0
elif c == 122: # b'z'
assert n == 0
out += b'\0\0\0\0'
elif c == 126: # b'~'
if n:
for _ in range(5-n):
b = b*85+84
out += struct.pack('>L', b)[:n-1]
break
return out
# asciihexdecode(data)
hex_re = re.compile(r'([a-f\d]{2})', re.IGNORECASE)
trail_re = re.compile(r'^(?:[a-f\d]{2}|\s)*([a-f\d])[\s>]*$', re.IGNORECASE)
def asciihexdecode(data):
"""
ASCIIHexDecode filter: PDFReference v1.4 section 3.3.1
For each pair of ASCII hexadecimal digits (0-9 and A-F or a-f), the
ASCIIHexDecode filter produces one byte of binary data. All white-space
characters are ignored. A right angle bracket character (>) indicates
EOD. Any other characters will cause an error. If the filter encounters
the EOD marker after reading an odd number of hexadecimal digits, it
will behave as if a 0 followed the last digit.
>>> asciihexdecode(b'61 62 2e6364 65')
b'ab.cde'
>>> asciihexdecode(b'61 62 2e6364 657>')
b'ab.cdep'
>>> asciihexdecode(b'7>')
b'p'
"""
data = data.decode('latin1')
out = [ int(hx,16) for hx in hex_re.findall(data) ]
m = trail_re.search(data)
if m:
out.append(int(m.group(1),16) << 4)
return bytes(out)
if __name__ == '__main__':
import doctest
print('pdfminer.ascii85', doctest.testmod())
|
[
"soraemonpockt@icloud.com"
] |
soraemonpockt@icloud.com
|
0ea758e348df140e2e167621fa3bc788a8118685
|
e2eefb2da7f1a113e44bde5c09074bc8c368e94f
|
/threadingx/thread1.py
|
e16003387239d6402f37ce96e21356419e3a8a81
|
[] |
no_license
|
piyushbhadauriya/WJU_OS
|
88c73001c30c116f8f59b587ad27ade33561137d
|
7d70f23bcb0485abe91abceaedb4a41c3e911e4f
|
refs/heads/master
| 2020-03-28T22:10:25.391954
| 2019-01-15T01:00:55
| 2019-01-15T01:00:55
| 149,210,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
import time
import threading
def countdown(aname, account):
while account > 0:
print(aname, " : counting down ",account)
account -=1
time.sleep(2)
print(aname,' :exit')
return
t1 = threading.Thread(target = countdown, args = ('t1',10,))
t1.start()
t2 = threading.Thread(target = countdown, args = ('t2',20,))
t2.start()
print('exit main thread')
|
[
"coolpiyushsingh@gmail.com"
] |
coolpiyushsingh@gmail.com
|
1e7ab58e067aa60cdb37f60639f963b9c658b7d7
|
cb1711f4bb5d77994738b034a264eb9a1f96e86d
|
/basic/dojo/views_cbv.py
|
8c032acdda55dfce8706fbc3bd94f44c28afbfde
|
[] |
no_license
|
Junhong-Kim/ask-django-lesson
|
8cd8826ecaac2d8b9ff246256de893fe4431b91a
|
af475bfb9f52c5a243a3a12861c7644db43de7f2
|
refs/heads/master
| 2021-05-14T12:38:10.698709
| 2018-01-13T17:46:17
| 2018-01-13T17:46:17
| 116,348,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,658
|
py
|
import os
from django.http import JsonResponse, HttpResponse
from django.views.generic import View, TemplateView
# HttpResponse
class PostListView1(View):
def get(self, request):
name = 'Kim'
html = self.get_template_string().format(name=name)
return HttpResponse(html)
def get_template_string(self):
return '''
<h1>Ask Django</h1>
<p>{name}</p>
<p>Life is too short, You need Python<p>
'''
post_list1 = PostListView1.as_view()
# Template
class PostListView2(TemplateView):
template_name = 'dojo/post_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data()
context['name'] = 'Kim'
return context
post_list2 = PostListView2.as_view()
# JsonResponse
class PostListView3(View):
def get(self, request):
return JsonResponse(self.get_data(), json_dumps_params={'ensure_ascii': False})
def get_data(self):
return {
'message': 'Hello, Python & Django',
'items': ['Python', 'Django'],
}
post_list3 = PostListView3.as_view()
# FileDownload
class ExcelDownloadView(View):
filepath = '/Users/INMA/Downloads/work.xlsx'
def get(self, request):
# os.path.join(settings.BASE_DIR, 'work.xlsx')
filename = os.path.basename(self.filepath)
with open(self.filepath, 'rb') as f:
response = HttpResponse(f, content_type='application/vnd.ms-excel')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
return response
excel_download = ExcelDownloadView.as_view()
|
[
"jhk1005jhk@gmail.com"
] |
jhk1005jhk@gmail.com
|
6e26412342eb240df11bbd524eeca3b26f04bf20
|
dce57a9b68a9dbc4ef9da3a7e193bffcadf6a03b
|
/benchmarks/migrations/0027_auto_20160202_1303.py
|
b3873d6f6346ff7e310a1705470df3ea6c418fbe
|
[] |
no_license
|
Linaro/art-reports
|
263579d5e47e3b14c0d97309c067a3a3f0f54335
|
a2efe96174b11b0f438fea6fafc976f2f8cf29b0
|
refs/heads/master
| 2020-07-12T00:04:40.441291
| 2019-05-17T19:18:53
| 2019-05-17T19:28:02
| 54,223,014
| 2
| 3
| null | 2018-08-06T14:56:53
| 2016-03-18T18:25:40
|
Python
|
UTF-8
|
Python
| false
| false
| 696
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def migrate(apps, schema_editor):
Result = apps.get_model("benchmarks", "Result")
for result in Result.objects.filter(name="linaro-art-tip-build-nexus9-MicroBenchmarks-Baseline"):
result.gerrit_change_number = None
result.gerrit_patchset_number = None
result.gerrit_change_url = None
result.gerrit_change_id = ""
result.save()
class Migration(migrations.Migration):
dependencies = [
('benchmarks', '0026_auto_20160129_1011'),
]
operations = [
migrations.RunPython(migrate, migrations.RunPython.noop),
]
|
[
"sebastian.pawlus@gmail.com"
] |
sebastian.pawlus@gmail.com
|
5889762761a190b70a9ae493826cbb071ec02449
|
5afdb507921d990702cd7ca1e532d804ec852c73
|
/vqvae/VQVAE/esoftmax.py
|
a304952a90f2cc883158f789190a74ca3361b059
|
[
"MIT"
] |
permissive
|
Zirui0623/EvSoftmax
|
1c07c9c8789beb3b7e54f176dcb1330abc16a22e
|
e161e55432f5e30fedf7eae8ae11189c01bcd54a
|
refs/heads/main
| 2023-08-30T05:03:39.393930
| 2021-10-25T00:03:41
| 2021-10-25T00:03:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,127
|
py
|
import torch
import torch.nn.functional as F
INF = 1e6
EPS = 1e-6
def log_esoftmax(input: torch.Tensor, dim: int, training: bool = True) -> torch.Tensor:
return torch.log(esoftmax(input, dim, training))
def esoftmax(input: torch.Tensor, dim: int, training: bool = False) -> torch.Tensor:
mask = input < torch.mean(input, dim=dim, keepdim=True)
mask_offset = torch.ones(input.shape, device=input.device, dtype=input.dtype)
mask_offset[mask] = EPS if training else 0
probs_unnormalized = F.softmax(input, dim=dim) * mask_offset
probs = probs_unnormalized / torch.sum(probs_unnormalized, dim=dim, keepdim=True)
return probs
def esoftmax_loss(
input: torch.Tensor,
target: torch.Tensor,
reduction: str = "none",
dim: int = -1,
training: bool = True,
ignore_index: int = -100
) -> torch.Tensor:
return F.nll_loss(
log_esoftmax(input, dim=dim, training=training),
target,
reduction=reduction,
ignore_index=ignore_index,
)
class LogESoftmax(torch.nn.Module):
def __init__(self, dim: int = -1):
super(LogESoftmax, self).__init__()
self.dim = dim
def forward(self, X: torch.Tensor) -> torch.Tensor:
return log_esoftmax(X, self.dim, self.training)
class ESoftmax(torch.nn.Module):
def __init__(self, dim: int = -1):
super(ESoftmax, self).__init__()
self.dim = dim
def forward(self, X: torch.Tensor) -> torch.Tensor:
return esoftmax(X, self.dim, self.training)
class ESoftmaxLoss(torch.nn.Module):
def __init__(
self, reduction: str = "none", dim: int = -1, ignore_index: int = -100
):
super(ESoftmaxLoss, self).__init__()
self.log_esoftmax = LogESoftmax(dim)
self.reduction = reduction
self.dim = dim
self.ignore_index = ignore_index
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.nll_loss(
self.log_esoftmax(input),
target,
reduction=self.reduction,
ignore_index=self.ignore_index,
)
|
[
"phil@Phils-MacBook-Pro.local"
] |
phil@Phils-MacBook-Pro.local
|
04e9181223e4bd97243feb5684e728af7ed5b29d
|
2945ac394970e1fbbf46fbcfdbd74d8095b89428
|
/lifelong_rl/core/rl_algorithms/offline/offline_rl_algorithm.py
|
0fed211317dedc771d9ff0d1edbb5550b0088fcd
|
[
"MIT"
] |
permissive
|
snu-mllab/EDAC
|
fbca5fec29407dd3ddc03e8585cdf341c4b834c0
|
198d5708701b531fd97a918a33152e1914ea14d7
|
refs/heads/main
| 2023-04-18T05:57:14.753222
| 2022-08-14T04:44:27
| 2022-08-14T04:44:27
| 415,660,116
| 64
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,617
|
py
|
import gtimer as gt
import abc
from lifelong_rl.core import logger
from lifelong_rl.core.rl_algorithms.rl_algorithm import _get_epoch_timings
from lifelong_rl.util import eval_util
class OfflineRLAlgorithm(object, metaclass=abc.ABCMeta):
def __init__(
self,
trainer,
evaluation_policy,
evaluation_env,
evaluation_data_collector,
replay_buffer,
batch_size,
max_path_length,
num_epochs,
num_eval_steps_per_epoch,
num_trains_per_train_loop,
num_train_loops_per_epoch=1,
save_snapshot_freq=1000,
):
self.trainer = trainer
self.eval_policy = evaluation_policy
self.eval_env = evaluation_env
self.eval_data_collector = evaluation_data_collector
self.replay_buffer = replay_buffer
self.batch_size = batch_size
self.max_path_length = max_path_length
self.num_epochs = num_epochs
self.num_eval_steps_per_epoch = num_eval_steps_per_epoch
self.num_trains_per_train_loop = num_trains_per_train_loop
self.num_train_loops_per_epoch = num_train_loops_per_epoch
self.save_snapshot_freq = save_snapshot_freq
self._start_epoch = 0
self.post_epoch_funcs = []
def _train(self):
for epoch in gt.timed_for(
range(self._start_epoch, self.num_epochs),
save_itrs=True,
):
if hasattr(self.trainer, 'log_alpha'):
curr_alpha = self.trainer.log_alpha.exp()
else:
curr_alpha = None
self.eval_data_collector.collect_new_paths(
max_path_length=self.max_path_length,
num_samples=self.num_eval_steps_per_epoch,
discard_incomplete_paths=True,
alpha=curr_alpha,
)
gt.stamp('evaluation sampling')
self.training_mode(True)
for _ in range(self.num_train_loops_per_epoch):
for _ in range(self.num_trains_per_train_loop):
train_data, indices = self.replay_buffer.random_batch(
self.batch_size, return_indices=True)
self.trainer.train(train_data, indices)
self.training_mode(False)
gt.stamp('training')
self._end_epoch(epoch)
def train(self, start_epoch=0):
self._start_epoch = start_epoch
self._train()
def _end_epoch(self, epoch):
snapshot = self._get_snapshot()
if self.save_snapshot_freq is not None and \
(epoch + 1) % self.save_snapshot_freq == 0:
logger.save_itr_params(epoch + 1, snapshot, prefix='offline_itr')
gt.stamp('saving', unique=False)
self._log_stats(epoch)
self._end_epochs(epoch)
for post_epoch_func in self.post_epoch_funcs:
post_epoch_func(self, epoch)
def _get_snapshot(self):
snapshot = {}
for k, v in self.trainer.get_snapshot().items():
snapshot['trainer/' + k] = v
'''
for k, v in self.eval_data_collector.get_snapshot().items():
snapshot['evaluation/' + k] = v
for k, v in self.replay_buffer.get_snapshot().items():
snapshot['replay_buffer/' + k] = v
'''
return snapshot
def _end_epochs(self, epoch):
self.eval_data_collector.end_epoch(epoch)
self.trainer.end_epoch(epoch)
if hasattr(self.eval_policy, 'end_epoch'):
self.eval_policy.end_epoch(epoch)
def _get_trainer_diagnostics(self):
return self.trainer.get_diagnostics()
def _get_training_diagnostics_dict(self):
return {'policy_trainer': self._get_trainer_diagnostics()}
def _log_stats(self, epoch):
logger.log("Epoch {} finished".format(epoch), with_timestamp=True)
"""
Replay Buffer
"""
logger.record_dict(self.replay_buffer.get_diagnostics(),
prefix='replay_buffer/')
"""
Trainer
"""
training_diagnostics = self._get_training_diagnostics_dict()
for prefix in training_diagnostics:
logger.record_dict(training_diagnostics[prefix],
prefix=prefix + '/')
"""
Evaluation
"""
if self.num_eval_steps_per_epoch > 0:
logger.record_dict(
self.eval_data_collector.get_diagnostics(),
prefix='evaluation/',
)
eval_paths = self.eval_data_collector.get_epoch_paths()
if hasattr(self.eval_env, 'get_diagnostics'):
logger.record_dict(
self.eval_env.get_diagnostics(eval_paths),
prefix='evaluation/',
)
logger.record_dict(
eval_util.get_generic_path_information(eval_paths),
prefix="evaluation/",
)
"""
Misc
"""
# time stamp logging early for csv format
gt.stamp('logging', unique=False)
logger.record_dict(_get_epoch_timings())
logger.record_tabular('Epoch', epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
#gt.stamp('logging', unique=False)
@abc.abstractmethod
def training_mode(self, mode):
"""
Set training mode to `mode`.
:param mode: If True, training will happen (e.g. set the dropout
probabilities to not all ones).
"""
pass
|
[
"white0234@snu.ac.kr"
] |
white0234@snu.ac.kr
|
42f83313223bdf6899a7aa3bc49e3dbd77685249
|
a79032631c21fb07b93b467569b2e82e7e912c48
|
/venv/Lib/site-packages/slider/tests/test_beatmap.py
|
73b6997565aa3340691dbb5ec8de7143f50c0808
|
[] |
no_license
|
Rishikathegenius/django-miniproj-rishika
|
c2107db8db2386d320b044f57afd61d8e7d0fdfd
|
4cb2fbe7c54bdb6079228be65d88b32a9219f1df
|
refs/heads/master
| 2023-04-21T16:07:17.390623
| 2021-05-19T07:52:34
| 2021-05-19T07:52:34
| 364,608,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,524
|
py
|
import pytest
import slider.example_data.beatmaps
import slider.beatmap
import slider.curve
from slider.position import Position
from datetime import timedelta
from math import isclose
@pytest.fixture
def beatmap():
return slider.example_data.beatmaps.miiro_vs_ai_no_scenario('Tatoe')
def test_parse_beatmap_format_v3():
# v3 is a very old beatmap version. We just want to make sure it doesn't
# error, see #79 and #87 on github.
slider.example_data.beatmaps.example_beatmap(
"Sambomaster - Sekai wa Sore wo Ai to Yobunda ze (ZZT the Fifth) "
"[Normal].osu"
)
def test_version(beatmap):
assert beatmap.format_version == 14
def test_display_name(beatmap):
assert beatmap.display_name == (
'AKINO from bless4 & CHiCO with HoneyWorks - MIIRO '
'vs. Ai no Scenario [Tatoe]'
)
def test_parse_section_general(beatmap):
assert beatmap.audio_filename == "tatoe.mp3"
assert beatmap.audio_lead_in == timedelta()
assert beatmap.preview_time == timedelta(milliseconds=6538)
assert not beatmap.countdown
assert beatmap.sample_set == "Normal"
assert beatmap.stack_leniency == 0.7
assert beatmap.mode == 0
assert not beatmap.letterbox_in_breaks
assert not beatmap.widescreen_storyboard
def test_parse_section_editor(beatmap):
assert beatmap.distance_spacing == 1.1
assert beatmap.beat_divisor == 6
assert beatmap.grid_size == 4
assert beatmap.timeline_zoom == 1.8
def test_parse_section_metadata(beatmap):
assert beatmap.title == "MIIRO vs. Ai no Scenario"
assert beatmap.title_unicode == "海色 vs. アイのシナリオ"
assert beatmap.artist == "AKINO from bless4 & CHiCO with HoneyWorks"
assert beatmap.artist_unicode == (
"AKINO from bless4 & CHiCO with HoneyWorks"
)
assert beatmap.creator == "monstrata"
assert beatmap.version == "Tatoe"
assert beatmap.source == ""
assert beatmap.tags == [
'kyshiro',
'sukinathan',
'ktgster',
'pishifat',
'smoothie',
'world',
'walaowey',
'toybot',
'sheela901',
'yuii-',
'Sharkie',
'みいろ',
'tv',
'size',
'opening',
'kantai',
'collection',
'kancolle',
'fleet',
'girls',
'magic',
'kaito',
'1412',
'まじっく快斗1412',
'艦隊これくしょん',
'-艦これ-'
]
assert beatmap.beatmap_id == 735272
assert beatmap.beatmap_set_id == 325158
def test_parse_section_difficulty(beatmap):
assert beatmap.hp_drain_rate == 6.5
assert beatmap.circle_size == 4
assert beatmap.overall_difficulty == 9
assert beatmap.approach_rate == 9.5
assert beatmap.slider_multiplier == 1.8
assert beatmap.slider_tick_rate == 1
def test_parse_section_timing_points(beatmap):
# currently only checking the first timing point
timing_points_0 = beatmap.timing_points[0]
assert timing_points_0.offset == timedelta()
assert isclose(timing_points_0.ms_per_beat, 307.692307692308)
assert timing_points_0.meter == 4
# sample_set and sample_type omitted, see #56
assert timing_points_0.volume == 60
# inherited is not in class parameter
assert timing_points_0.kiai_mode == 0
def test_parse_section_hit_objects(beatmap):
# Only hit object 0 tested for now
hit_objects_0 = beatmap.hit_objects(stacking=False)[0]
assert hit_objects_0.position == Position(x=243, y=164)
assert hit_objects_0.time == timedelta(milliseconds=1076)
# Hit object note `type` is done by subclassing HitObject
assert isinstance(hit_objects_0, slider.beatmap.Slider)
# Slider specific parameters
assert hit_objects_0.end_time == timedelta(milliseconds=1178)
assert hit_objects_0.hitsound == 0
assert isinstance(hit_objects_0.curve, slider.curve.Linear)
assert hit_objects_0.curve.points == [Position(x=243, y=164),
Position(x=301, y=175)]
assert round(hit_objects_0.curve.req_length) == 45
assert isclose(hit_objects_0.length, 45.0000017166138)
assert hit_objects_0.ticks == 2
assert isclose(hit_objects_0.num_beats, 0.3333333460489903)
assert hit_objects_0.tick_rate == 1.0
assert isclose(hit_objects_0.ms_per_beat, 307.692307692308)
assert hit_objects_0.edge_sounds == [2, 0]
assert hit_objects_0.edge_additions == ['0:0', '0:0']
assert hit_objects_0.addition == "0:0:0:0:"
def test_hit_objects_stacking():
hit_objects = [slider.beatmap.Circle(Position(128, 128),
timedelta(milliseconds=x*10),
hitsound=1) for x in range(10)]
beatmap = slider.Beatmap(
format_version=14,
audio_filename="audio.mp3",
audio_lead_in=timedelta(),
preview_time=timedelta(),
countdown=False,
sample_set="soft",
stack_leniency=1,
mode=0,
letterbox_in_breaks=False,
widescreen_storyboard=False,
bookmarks=[0],
distance_spacing=1,
beat_divisor=1,
grid_size=1,
timeline_zoom=1,
title="title",
title_unicode="title",
artist="artist",
artist_unicode="artist",
creator="creator",
version="1.0",
source="source",
tags=["tags"],
beatmap_id=0,
beatmap_set_id=0,
hp_drain_rate=5,
circle_size=5,
overall_difficulty=5,
approach_rate=5,
slider_multiplier=1,
slider_tick_rate=1,
timing_points=[],
hit_objects=hit_objects
)
radius = slider.beatmap.circle_radius(5)
stack_offset = radius / 10
for i, ob in enumerate(reversed(beatmap.hit_objects(stacking=True))):
assert ob.position.y == 128-(i*stack_offset)
def test_hit_objects_hard_rock(beatmap):
# Only hit object 0 tested for now
hit_objects_hard_rock_0 = beatmap.hit_objects(hard_rock=True,
stacking=False)[0]
assert hit_objects_hard_rock_0.position == Position(x=243, y=220)
assert hit_objects_hard_rock_0.curve.points == [Position(x=243, y=220),
Position(x=301, y=209)]
def test_closest_hitobject():
beatmap = slider.example_data.beatmaps.miiro_vs_ai_no_scenario('Beginner')
hit_object1 = beatmap.hit_objects()[4]
hit_object2 = beatmap.hit_objects()[5]
hit_object3 = beatmap.hit_objects()[6]
middle_t = timedelta(milliseconds=11076 - ((11076 - 9692) / 2))
assert hit_object1.time == timedelta(milliseconds=8615)
assert hit_object2.time == timedelta(milliseconds=9692)
assert hit_object3.time == timedelta(milliseconds=11076)
assert beatmap.closest_hitobject(timedelta(milliseconds=8615)) == \
hit_object1
assert beatmap.closest_hitobject(timedelta(milliseconds=(8615 - 30))) == \
hit_object1
assert beatmap.closest_hitobject(middle_t) == hit_object2
assert beatmap.closest_hitobject(middle_t, side="right") == hit_object3
def test_ar(beatmap):
assert beatmap.ar() == 9.5
def test_bpm_min(beatmap):
assert beatmap.bpm_min() == 180
def test_bpm_max(beatmap):
assert beatmap.bpm_max() == 195
def test_cs(beatmap):
assert beatmap.cs() == 4
def test_hp(beatmap):
assert beatmap.hp() == 6.5 # issue #57
def test_od(beatmap):
assert beatmap.od() == 9
|
[
"dancefivaella@gmail.com"
] |
dancefivaella@gmail.com
|
bd22ad59dd6d627890c5d26d85d4715723be0995
|
f193fe64b1f71470f8272ececde5520938970eba
|
/rabbit/RBPub.py
|
25da4cbc7ccb4563b60efbaff7d184ad9ea01e19
|
[] |
no_license
|
eduardo-sarmento/Trabalho-2-PPD
|
917bebac9e0b6b241525ea444e70317c227b956a
|
988c243b288480125a58af6a185b825ace822507
|
refs/heads/master
| 2023-08-14T18:33:33.215841
| 2021-10-06T22:26:57
| 2021-10-06T22:26:57
| 406,138,393
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
#!/usr/bin/env python
import pika
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='rsv/hello')
channel.basic_publish(exchange='', routing_key='rsv/hello', body='Hello World!')
print("Sent 'Hello World!'")
connection.close()
|
[
"eduardosarmento49@gmail.com"
] |
eduardosarmento49@gmail.com
|
758f161cf8f14adcbc1f4fe3840d1cf4000f5479
|
428c97701b166c177256cdd510eac9373e75dea8
|
/Activity7.py
|
f8ab7896ba549140951dc70ef46875689ac17bef
|
[] |
no_license
|
mferri17/lstm-text-generator
|
2b240e820dbca6dd047645afa6407bc8956265bf
|
9454dd80c320cf4f476d87752480e8168185b09d
|
refs/heads/master
| 2022-12-11T12:38:02.486497
| 2020-09-07T21:05:18
| 2020-09-07T21:05:18
| 293,628,787
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,684
|
py
|
# --------------------------------
# %tensorflow_version 1.x
import io
import numpy as np
import tensorflow as tf
import tensorflow.python.util.deprecation as deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
# --------------------------------
# from google.colab import drive
# drive.mount('/content/drive')
# --------------------------------
# with open('drive/My Drive/_ USI/Deep Learning Lab/datasets/montecristo.txt', 'r') as f:
# book = f.read()
with open('datasets/montecristo.txt', 'r') as f:
book = f.read()
book = book.lower()
# --------------------------------
### Characters distribution
import pandas
from collections import Counter
from collections import OrderedDict
import string
char_counts = Counter(book)
char_counts_byletter = OrderedDict(sorted(char_counts.items()))
print(f'Characters count ordered alphabetically: {char_counts_byletter}')
df_char_counts_byletter = pandas.DataFrame.from_dict(char_counts_byletter, orient='index')
df_char_counts_byletter.plot(kind='bar')
char_counts_alphabet = dict((i, char_counts_byletter[i]) for i in list(string.ascii_lowercase))
print(f'Alphabet count: {char_counts_alphabet}')
df_char_counts_alphabet = pandas.DataFrame.from_dict(char_counts_alphabet, orient='index')
df_char_counts_alphabet.plot(kind='bar')
top = 20
print(f'Top {top} most common characters')
char_counts.most_common()[:top]
# --------------------------------
#### Handle text to numerical conversion
vocab = sorted(set(book))
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
def text_to_num(text):
return np.array([char2idx[c] for c in text])
def num_to_text(nums):
return ''.join(idx2char[np.array(nums)])
# book = book.lower() # already done before analysis
book_to_num = text_to_num(book)
# --------------------------------
def generate_batches(source, batch_size, sequence_length):
block_length = len(source) // batch_size
batches = []
for i in range(0, block_length, sequence_length):
batch=[]
for j in range(batch_size):
start = j * block_length + i
end = min(start + sequence_length, j * block_length + block_length)
batch.append(source[start:end])
batches.append(np.array(batch, dtype=int))
return batches
# --------------------------------
# ## Little example
# example_text = 'Mi chiamo Marco e sono un gattino.'.lower()
# example_num = text_to_num(example_text)
# print(example_text)
# print(example_num)
# print(generate_batches(example_num, 3, 2))
# --------------------------------
#### Model parameters
batch_size = 16
sequence_length = 256
k = len(char_counts) # Input dimension (unique characters in the text)
hidden_units = 256 # Number of recurrent units
learning_rate = 1e-2
n_epochs = 5
# --------------------------------
### Creating dataset for training
bts = generate_batches(book_to_num, batch_size, sequence_length)
print('Number of batches', len(bts)) # ceiling(len(text) / batch_size / sequence_length)
print('Batch size', len(bts[0]))
print('Sequence length', len(bts[0][0]))
# # Just to notice that last batch is incomplete
# for i in range(len(bts)):
# for j in range(batch_size):
# if len(bts[i][j]) != 256:
# print(len(bts[i][j]), i, j)
bts = np.array(bts[:-1]) # removing last batch because incomplete
print('\nbts shape: ' , bts.shape)
data_X = bts
data_Y = np.copy(data_X)
for batch in range(np.shape(bts)[0]):
for sequence in range(np.shape(bts)[1]):
for character in range(np.shape(bts)[2] - 1):
data_Y[batch][sequence][character] = data_X[batch][sequence][character+1]
data_Y[batch][sequence][np.shape(bts)[2] - 1] = 0 # last character has no target
print('data_X shape: ', data_X.shape)
print('data_Y shape: ', data_Y.shape)
# --------------------------------
### Model definition
seed = 0
tf.reset_default_graph()
tf.set_random_seed(seed=seed)
X_int = tf.placeholder(shape=[None, None], dtype=tf.int64)
Y_int = tf.placeholder(shape=[None, None], dtype=tf.int64)
lengths = tf.placeholder(shape=[None], dtype=tf.int64)
batch_size_tf = tf.shape(X_int)[0]
max_len = tf.shape(X_int)[1] # TODO
# One-hot encoding X_int
X = tf.one_hot(X_int, depth=k) # shape: (batch_size, max_len, k)
# One-hot encoding Y_int
Y = tf.one_hot(Y_int, depth=k) # shape: (batch_size, max_len, k)
# Recurrent Neural Network
basic_cell = tf.nn.rnn_cell.BasicRNNCell(num_units=hidden_units)
# Long-Short Term Memory Neural Network
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [256, 256]]
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=hidden_units)
init_state = lstm_cell.zero_state(batch_size_tf, dtype=tf.float32)
current_state = lstm_cell.zero_state(batch_size_tf, dtype=tf.float32)
# rnn_outputs shape: (batch_size, max_len, hidden_units)
rnn_outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, X, sequence_length=lengths, initial_state=current_state)
# rnn_outputs_flat shape: ((batch_size * max_len), hidden_units)
rnn_outputs_flat = tf.reshape(rnn_outputs, [-1, hidden_units])
# Weights and biases for the output layer
Wout = tf.Variable(tf.truncated_normal(shape=(hidden_units, k), stddev=0.1))
bout = tf.Variable(tf.zeros(shape=[k]))
# Z shape: ((batch_size * max_len), k)
Z = tf.matmul(rnn_outputs_flat, Wout) + bout
Y_flat = tf.reshape(Y, [-1, k]) # shape: ((batch_size * max_len), k)
# Creates a mask to disregard padding
mask = tf.sequence_mask(lengths, dtype=tf.float32)
mask = tf.reshape(mask, [-1]) # shape: (batch_size * max_len)
# Network prediction
pred = tf.squeeze(tf.random.categorical(Z, 1)) * tf.cast(mask, dtype=tf.int64)
pred = tf.reshape(pred, [-1, max_len]) # shape: (batch_size, max_len)
hits = tf.reduce_sum(tf.cast(tf.equal(pred, Y_int), tf.float32))
hits = hits - tf.reduce_sum(1 - mask) # Disregards padding
# Accuracy: correct predictions divided by total predictions
accuracy = hits/tf.reduce_sum(mask)
# Loss definition (masking to disregard padding)
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y_flat, logits=Z)
loss = tf.reduce_sum(loss*mask)/tf.reduce_sum(mask)
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(loss)
# --------------------------------
### Training
print('\n\n --- TRAINING --- \n')
session = tf.Session()
session.run(tf.global_variables_initializer())
batches_number = np.shape(data_X)[0]
losses = np.zeros((n_epochs, batches_number))
for e in range(1, n_epochs + 1):
cs = session.run(init_state, {X_int: data_X[0], Y_int: data_Y[0]}) # initial state
for b in range(batches_number):
c_input = data_X[b]
c_target = data_Y[b]
ls = list([np.shape(c_input)[1]] * np.shape(c_input)[0])
feed = {X_int: data_X[b],
Y_int: data_Y[b],
lengths: ls,
current_state.c: cs.c,
current_state.h: cs.h}
l, _, cs = session.run([loss, train, final_state], feed)
print(f'Epoch {e}, Batch {b}. \t Loss: {l}')
losses[e-1][b] = l # saving losses
# --------------------------------
### Loss plot
import matplotlib
import matplotlib.pyplot as plt
print('losses shape', np.shape(losses))
colors = ['#616BB0', '#74C49D', '#FFFF00', '#B02956', '#B3BAFF']
# Total loss
ys = losses.reshape(-1)
xs = np.arange(len(ys))
plt.plot(xs, ys, '-', c=colors[0], label='training loss over all epochs')
plt.legend()
plt.show()
# By epochs
for e in range(len(losses)):
ys_e = losses[e]
xs_e = np.arange(len(ys_e))
plt.plot(xs_e, ys_e, '-', c=colors[0], label=f'training loss (epoch {e+1})')
plt.legend()
plt.show()
# By epochs all together
for e in range(len(losses)):
ys_e = losses[e]
xs_e = np.arange(len(ys_e))
plt.plot(xs_e, ys_e, '-', c=colors[e], label=f'training loss (epoch {e+1})')
plt.legend()
plt.show()
# --------------------------------
### Model saving
saver = tf.train.Saver()
saver.save(session, 'models/Activity7Model_1.ckpt')
# --------------------------------
### Text generation
import random
import itertools
for n in range(20):
ri = random.randrange(sum(char_counts.values()))
starting_char = next(itertools.islice(char_counts.elements(), ri, None))
gen_input = [text_to_num(starting_char)] # starting character
gen_lengths = [1] # generation is done character by character
cs = session.run(init_state, {X_int: gen_input}) # initial state
gen_text = [gen_input[0][0]] # store the generated text
for i in range(255):
cs, gen_input = session.run([final_state, pred], {X_int: gen_input, lengths: gen_lengths, current_state: cs})
gen_text.append(gen_input[0][0])
print(f'\n\n------- EXAMPLE {n+1} -------\n')
print(num_to_text(gen_text))
# --------------------------------
### Restore from model
# https://stackoverflow.com/questions/33759623/tensorflow-how-to-save-restore-a-model
# https://stackoverflow.com/questions/40442098/saving-and-restoring-a-trained-lstm-in-tensor-flow
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, 'models/Activity7Model_1.ckpt')
print('Model restored.')
ri = random.randrange(sum(char_counts.values()))
starting_char = next(itertools.islice(char_counts.elements(), ri, None))
gen_input = [text_to_num(starting_char)] # starting character
gen_lengths = [1] # generation is done character by character
cs = session.run(init_state, {X_int: gen_input}) # initial state
gen_text = [gen_input[0][0]] # store the generated text
for i in range(255):
cs, gen_input = session.run([final_state, pred], {X_int: gen_input, lengths: gen_lengths, current_state: cs})
gen_text.append(gen_input[0][0])
print(f'\n\n------- EXAMPLE -------\n')
print(num_to_text(gen_text))
# --------------------------------
|
[
"m.ferri17@campus.unimib.it"
] |
m.ferri17@campus.unimib.it
|
cbf8b086ac33bdab4294086c67a15e4d00b65498
|
69d9ffc34f6f542bcb6f2f5658d23d8dcd72eb7b
|
/restframework/filtering/api/views.py
|
ca1e1fe9fdfbf457f9d3b09abca2bb369992e9f6
|
[] |
no_license
|
golammahmud/rest-framework
|
9c82254944729d9d669ab38c8bd2cb48f789e66f
|
913e3630469c14fa67edc419d17184e94d19c9b1
|
refs/heads/master
| 2023-08-24T15:59:49.139124
| 2021-10-12T14:54:53
| 2021-10-12T14:54:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,069
|
py
|
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.authentication import SessionAuthentication, BasicAuthentication,TokenAuthentication
from rest_framework.permissions import IsAuthenticated,IsAdminUser,DjangoModelPermissions,IsAuthenticatedOrReadOnly
from .models import Student
from .serializers import StudentSerializer
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from rest_framework.throttling import AnonRateThrottle,UserRateThrottle
class StudentViewSet(viewsets.ModelViewSet):
queryset = Student.objects.all()
serializer_class = StudentSerializer
authentication_classes = [SessionAuthentication,BasicAuthentication]
permission_classes = [IsAuthenticatedOrReadOnly,]
throttle_classes = [AnonRateThrottle,UserRateThrottle]
#filter each logged in user basis
def get_queryset(self):
user=self.request.user
return Student.objects.filter(passby__username=user)
# for spacified throttle rate each class
# from .throttling import JackRateThrottling
# class StudentViewSet(viewsets.ModelViewSet):
# queryset = Student.objects.all()
# serializer_class = StudentSerializer
# authentication_classes = [SessionAuthentication,BasicAuthentication]
# permission_classes = [IsAuthenticatedOrReadOnly,]
# throttle_classes = [AnonRateThrottle,JackRateThrottling]
# for user in User.objects.all():
# Token.objects.get_or_create(user=user)
from rest_framework.authtoken.models import Token
# token = Token.objects.create(user=instance)
# print(token.key)
#signals for instance user token
# from django.conf import settings
# from django.db.models.signals import post_save
# from django.dispatch import receiver
# from rest_framework.authtoken.models import Token
#
# @receiver(post_save, sender=settings.AUTH_USER_MODEL)
# def create_auth_token(sender, instance=None, created=False, **kwargs):
# if created:
# Token.objects.create(user=instance)
|
[
"golam.mahmud99@gmail.com"
] |
golam.mahmud99@gmail.com
|
d3b5e91d6fe0172f1293634ddffe96ec82a5abea
|
5c1afc37f583622c820cdc093210dc4122278f8e
|
/mummy/models.py
|
80624c800b41b335ed18ab393be636c57e438d34
|
[] |
no_license
|
jordangallacher/LucasSite
|
681474ee2fd9228b33833f959331846769fefb82
|
9363a702ecf76ade4ecccdc22d8ef4ddfb9d8b3e
|
refs/heads/master
| 2022-01-24T07:08:00.152391
| 2019-07-30T07:37:34
| 2019-07-30T07:37:34
| 198,356,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
from django.db import models
# Create a Blog model here.
# title
# pub_date
# body
# image
class Mummy(models.Model):
image = models.ImageField(upload_to='images/')
title = models.CharField(max_length=255)
pub_date = models.DateTimeField()
body = models.TextField()
def __str__(self):
return self.title # This function names the posts in the admin panel
def summary(self):
return self.body[:100]
def pub_date_pretty(self):
return self.pub_date.strftime('%b %e %Y')
# Add the Blog app to settings
# Create a migration
# Migrate
# Add to the Admin
|
[
"jordan.gallacher@gmail.com"
] |
jordan.gallacher@gmail.com
|
daabf75fa0ee4ee0fca9b3cd286480f785179c3c
|
c42d708c04d510ba34ab4b7d058a16ca8098d8de
|
/nogeo/gis.py
|
f51a1ca2cc43af43f931dba60f5e418ed4979215
|
[] |
no_license
|
kuki-gs/noweapons
|
75bb6764b73b92877fb0090ea42bbc3b10186c99
|
8bbb68d0ffce761f5a556d613babc5bb4a16b1dc
|
refs/heads/master
| 2022-02-22T13:43:24.764841
| 2019-09-19T08:16:01
| 2019-09-19T08:16:01
| 209,465,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,454
|
py
|
# -*- coding: utf-8 -*-
import math
import pandas as pd
from lxml import etree
from pykml.factory import KML_ElementMaker as KML
'''
经纬度转平面直角坐标,米勒投影
输入输出:均为带有经度、纬度或x、y坐标的列表
'''
def GPS_to_XY(gps):
L = 6381372 * math.pi * 2 # 地球周长
W = L # 平面展开后,x轴等于周长
H = L / 2 # y轴约等于周长一半
mill = 0.65 # 米勒投影中的一个常数,范围大约在正负2.3之间
x = gps[0] * math.pi / 180 # 将经度从度数转换为弧度
y = gps[1] * math.pi / 180 # 将纬度从度数转换为弧度
y = 1.25 * math.log(math.tan(0.25 * math.pi + 0.4 * y),10) # 米勒投影的转换
# 弧度转为实际距离
#x = (W / 2) + (W / (2 * math.pi)) * x
x = (W / (2 * math.pi)) * x
#y = (H / 2) - (H / (2 * mill)) * y
y = (H / (2 * mill)) * y
return [int(x),int(y)]
'''
经纬度转平面直角坐标,Mercator投影
输入输出:均为带有经度、纬度或x、y坐标的列表
参数说明:
X:水平直角坐标,单位为米(m);
Y:纵向直角坐标,单位为米(m);
B:纬度,单位为弧度(rad);
L:经度,单位为弧度(rad);
Bo:投影基准纬度,Bo =0,单位为弧度((rad);
Lo:坐标原点的经度,Lo =0,单位为弧度(rad);
a:地球椭球体长半轴,a=6378137.0000,单位为米(m);
b:地球椭球体短半轴,b=6356752.3142,单位为米(m);
e:第一偏心率;
e’:第二偏心率。
N-卯酉圈曲率半径,单位为米(m)=a**2/b / math.sqrt(1+e2**2 * math.cos(B0)**2)。
K=N(B0)*math.cos(B0)
'''
def GPS_to_XY2(gps):
B0 =0
L0 =0
a=6378137.000
b=6356752.314
e1=math.sqrt(a**2-b**2)/a
e2=math.sqrt(a**2-b**2)/b
K=a**2/b*math.cos(B0)/math.sqrt(1 + e2**2 * math.cos(B0)**2)
B=gps[1]*math.pi/180
L=gps[0]*math.pi/180
x=K*(L-L0)
y=K*math.log(math.tan(math.pi/4+B/2) * math.pow((1-e1*math.sin(B))/(1+e1*math.sin(B)),e1/2))
return [int(x),int(y)]
'''
GCJ2WGS:GCJ经纬度转换成GWS格式的经纬度
输入输出:均为带有经度、纬度的列表
'''
def gcj2gws(location):
# location格式如下:locations[1] = "113.923745,22.530824"
lon = float(location[0])
lat = float(location[1])
a = 6378245.0 # 克拉索夫斯基椭球参数长半轴a
ee = 0.00669342162296594323 #克拉索夫斯基椭球参数第一偏心率平方
PI = 3.14159265358979324 # 圆周率
# 以下为转换公式
x = lon - 105.0
y = lat - 35.0
# 经度
dLon = 300.0 + x + 2.0 * y + 0.1 * x * x + 0.1 * x * y + 0.1 * math.sqrt(abs(x));
dLon += (20.0 * math.sin(6.0 * x * PI) + 20.0 * math.sin(2.0 * x * PI)) * 2.0 / 3.0;
dLon += (20.0 * math.sin(x * PI) + 40.0 * math.sin(x / 3.0 * PI)) * 2.0 / 3.0;
dLon += (150.0 * math.sin(x / 12.0 * PI) + 300.0 * math.sin(x / 30.0 * PI)) * 2.0 / 3.0;
#纬度
dLat = -100.0 + 2.0 * x + 3.0 * y + 0.2 * y * y + 0.1 * x * y + 0.2 * math.sqrt(abs(x));
dLat += (20.0 * math.sin(6.0 * x * PI) + 20.0 * math.sin(2.0 * x * PI)) * 2.0 / 3.0;
dLat += (20.0 * math.sin(y * PI) + 40.0 * math.sin(y / 3.0 * PI)) * 2.0 / 3.0;
dLat += (160.0 * math.sin(y / 12.0 * PI) + 320 * math.sin(y * PI / 30.0)) * 2.0 / 3.0;
radLat = lat / 180.0 * PI
magic = math.sin(radLat)
magic = 1 - ee * magic * magic
sqrtMagic = math.sqrt(magic)
dLat = (dLat * 180.0) / ((a * (1 - ee)) / (magic * sqrtMagic) * PI);
dLon = (dLon * 180.0) / (a / sqrtMagic * math.cos(radLat) * PI);
wgsLon = lon - dLon
wgsLat = lat - dLat
return [wgsLon,wgsLat]
'''
转换df类型经纬度
输入:类型dataframe,包含字段LONGITUDE和LATITUDE
输出:在输入基础上增加2列wgs_lon和wgs_lat
'''
def gcj2wgs_for_df(data_with_gcj):
list_gcj = data_with_gcj[['LONGITUDE','LATITUDE']].values.tolist()
list_wgs = list(map(gcj2gws,list_gcj))
data_with_gcj=pd.concat([data_with_gcj.reset_index(level=0).drop(['index'],axis=1),pd.DataFrame(list_wgs,columns=["wgs_lon","wgs_lat"])],axis=1)
return data_with_gcj
"""
生成tac为单位的kml文件
输入:data类型df,包含字段wgs_lon、wgs_lat、ECI、RSRP、SINR以及TAC、tac_cluster;district_name区县名字, path_result保存路径
输出:kml节点,包含采样点集合的cluser的图层节点
"""
def gen_kml_tac(data, district_name, path_result):
# 创建谷歌图层文件
list_tac = sorted(data['跟踪区'].astype(int).drop_duplicates().tolist())
for tac in list_tac:
df_tac_data = data[data['跟踪区'] == tac]
list_cluster = sorted(df_tac_data['tac_cluster'].drop_duplicates().tolist())
for cluster in list_cluster:
df_cluster_data = df_tac_data[df_tac_data['tac_cluster'] == cluster]
# 如果是tac中的第一个cluster,则创建一个tac文件夹,并添加第一个cluster节点
if cluster == list_cluster[0]:
kml_tac = KML.Folder(KML.name("跟踪区=" + str(tac)),
gen_kml_cluster(df_cluster_data, cluster))
# 添加后面的cluster
else:
kml_tac.append(gen_kml_cluster(df_cluster_data, cluster))
# 如果是第一个tac,创建kml文件,并添加第一个tac节点
if tac == list_tac[0]:
kml_doc= KML.Document(KML.name(district_name), kml_tac)
else:
kml_doc.append(kml_tac)
etree_doc = etree.tostring(etree.ElementTree(kml_doc), pretty_print=True)
#
with open(path_result + district_name + '.kml', 'wb') as fp:
fp.write(etree_doc)
"""
生成一个包含采样点集合的cluser的kml图层节点
pykml是在python2下写的,在导入以后,有些地方可能会出错,所以需要修改pykml,主要是一些print格式有问题
输入:类型df,包含字段wgs_lon、wgs_lat、ECI、RSRP、SINR
输出:kml节点,包含采样点集合的cluser的图层节点
"""
def gen_kml_cluster(data,cluster_name):
lon=data['wgs_lon']
lat=data['wgs_lat']
if len(lon)!=len(lat):
print ('lon != lat nums,请检查数据的经纬度信息')
sys.exit(0)
# 创文件夹,添加第一个点
kml_cluster=KML.Folder(KML.name(str(cluster_name)),
KML.styleUrl("#m_ylw-pushpin"),
KML.Placemark(KML.styleUrl("#m_ylw-pushpin"),
KML.description('ECI:'+str(int(data["ECI"].iloc[0])),
'SINR:'+str(round(data["SINR"].iloc[0],1)),
'RSRP:'+str(round(data["RSRP"].iloc[0],1))),
KML.Point(KML.styleUrl("#m_ylw-pushpin"),
KML.coordinates(str(lon.iloc[0])+','+str(lat.iloc[0])+',0'))))
# 添加后面的采样点
for i in range(1,len(lon)):
kml_cluster.append(KML.Placemark(KML.description('ECI:'+str(int(data["ECI"].iloc[i])),
'SINR:'+str(round(data["SINR"].iloc[i], 1)),
'RSRP:'+str(round(data["RSRP"].iloc[i], 1))),
KML.Point(KML.coordinates(str(lon.iloc[i])+','+str(lat.iloc[i])+',0'))))
# print etree.tostring(etree.ElementTree(kml_file ),pretty_print=True)
return kml_cluster
def gen_kml_enb(data_wgs,level=3):#level1=tac,level2=enb,level3=cell,如果data_wgs数据已经包含,所以gongcan_l表就不需要了
data_wgs = data_wgs[['p_day', 'CELL_RSRP', 'CELL_SINR', 'CELL_CELLID', 'eNBID', '区县', '频段', '站型', '方位角', 'wgs_lon',
'wgs_lat']]
data_wgs["ECI"] = data_wgs["CELL_CELLID"]
# print(data_wgs.head())
# data_wgs=pd.merge(data_wgs,gongcan_l[['ECI','eNBID','跟踪区']],left_on='CELL_CELLID',right_on="ECI",how="left")
data_wgs.dropna(inplace=True)
cell_list=data_wgs.CELL_CELLID.astype(int).drop_duplicates()#用于获得这些小区的共站同方向小区,然后再取其覆盖采样点生成google地图文件
cell_list=cell_list.to_list()
ENB_list=data_wgs.eNBID.astype(int).drop_duplicates().to_list()
# import os
# filename = os.path.basename(file_csv)
for i,ENB in enumerate(ENB_list):
ENB_data=data_wgs[data_wgs['eNBID']==ENB]
classify_list=ENB_data['CELL_CELLID'].drop_duplicates().to_list()
# classify_list=sorted(list(classify_list))
for j,clas in enumerate(classify_list):#这里按照ENB输出点,所以用ENB——list替代了cell_list
point_data=ENB_data[ENB_data['CELL_CELLID']==clas]
if j==0:#clas==classify_list[0]:
cell_kml=KML.Folder(KML.name("ENB:"+str(ENB)),
gen_kml_point(point_data,clas))#加个document根目录,append的时候避免树结构错乱
else:
cell_kml.append(gen_kml_point(point_data,clas))
if i==0:
ENB_kml=KML.Folder(KML.name('_反向覆盖小区采样点'+str(ENB)),cell_kml)
else:
ENB_kml.append(cell_kml)
return ENB_kml
|
[
"noreply@github.com"
] |
kuki-gs.noreply@github.com
|
0e2e8fb97a3cefd834a2f935fe8284b71ce38843
|
bdbaa26a91b46598a1b77b736639e97e39be5572
|
/MapPro/orders/migrations/0003_order_is_archived.py
|
a93d436121cb24c4eddd09c7b70a0675ea53faba
|
[] |
no_license
|
Armenvardanyan95/mappro_api
|
457c7bb054909150d1cf1e6b74a9e57fd80c99ff
|
30d8acf9560097aad92a8379eb1d0c58384ed351
|
refs/heads/master
| 2020-03-12T02:24:43.928045
| 2018-04-20T18:58:32
| 2018-04-20T18:58:32
| 130,401,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-20 18:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0002_auto_20170812_1437'),
]
operations = [
migrations.AddField(
model_name='order',
name='is_archived',
field=models.BooleanField(default=True),
),
]
|
[
"armenvardanyan95@gmail.com"
] |
armenvardanyan95@gmail.com
|
c89f1bd79b26aa7ced8a1a83e589f2abcbd2a9c4
|
6a6b9a3f06e1a5bf80b9313428ced8719aafe644
|
/image_load.py
|
57d2f3d9259b7443b7fd57f87d27e93e1e1c362f
|
[] |
no_license
|
PhilCoker/BnP-YGO
|
fe1b291431313e1d9089c424f28f4a8790ca89ec
|
892ebe7d5311b0ef17fba2bec04cf991a749d6b9
|
refs/heads/master
| 2021-01-20T11:13:44.963048
| 2014-10-13T01:11:27
| 2014-10-13T01:11:27
| 23,847,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
import os
import sys
import urllib
if( len(sys.argv) < 2 ):
print 'Provide the deck name.'
sys.exit(0)
base = 'http://static.api5.studiobebop.net/ygo_data/card_images/'
#See if img dir exists, if not then create it
if 'img' not in os.listdir('.'):
os.mkdir( 'img' )
#Go through all cards in deck and download images, put them in img
with open('deck\\' + sys.argv[1]) as file:
for line in file:
replaced = line.replace('\n', '').replace( ' ', '_' ).replace('-','_')
url = base + replaced + '.jpg'
print 'Looking at url ', url
urllib.urlretrieve( url, 'img\\' + replaced + '.png' )
|
[
"phillip.coker@gmail.com"
] |
phillip.coker@gmail.com
|
da8ed9569251ebd3679be699e72cfe847fd69021
|
3fbaf156377676d0971275dbbecdb1198c0016d6
|
/apps/organization/models.py
|
fe3f16fda9b27443a7a94e65e4df3b9006eab185
|
[] |
no_license
|
handsome-man/MxOnline
|
c171848546b0bdcd1d6fc29f2364f3d9f8803f8b
|
a3253f56552d5cc20b248c39b175815aed297c14
|
refs/heads/master
| 2020-04-29T06:34:58.819180
| 2019-05-17T07:42:42
| 2019-05-17T07:42:42
| 175,921,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,471
|
py
|
# organization/models.py
from datetime import datetime
from django.db import models
class CityDict(models.Model):
name = models.CharField('城市', max_length=20)
desc = models.CharField('描述', max_length=200)
add_time = models.DateTimeField(default=datetime.now)
class Meta:
verbose_name = '城市'
verbose_name_plural = verbose_name
class CourseOrg(models.Model):
ORG_CHOICES = (
("pxjg", u"培训机构"),
("gx", u"高校"),
("gr", u"个人"),
)
name = models.CharField('机构名称', max_length=50)
desc = models.TextField('机构描述')
category = models.CharField(max_length=20, choices=ORG_CHOICES, verbose_name=u"机构类别", default="pxjg")
click_nums = models.IntegerField('点击数', default=0)
tag = models.CharField('机构标签', max_length=10, default='全国知名')
fav_nums = models.IntegerField('收藏数', default=0)
students = models.IntegerField("学习人数", default=0)
course_nums = models.IntegerField("课程数", default=0)
image = models.ImageField('logo', upload_to='org/%Y/%m', max_length=100)
address = models.CharField('机构地址', max_length=150, )
city = models.ForeignKey(CityDict, verbose_name='所在城市', on_delete=models.CASCADE)
add_time = models.DateTimeField(default=datetime.now)
class Meta:
verbose_name = '课程机构'
verbose_name_plural = verbose_name
# 获取教师数
def get_teacher_nums(self):
return self.teacher_set.all().count()
class Teacher(models.Model):
org = models.ForeignKey(CourseOrg, verbose_name='所属机构', on_delete=models.CASCADE)
name = models.CharField('教师名', max_length=50)
work_years = models.IntegerField('工作年限', default=0)
work_company = models.CharField('就职公司', max_length=50)
work_position = models.CharField('公司职位', max_length=50)
points = models.CharField('教学特点', max_length=50)
click_nums = models.IntegerField('点击数', default=0)
fav_nums = models.IntegerField('收藏数', default=0)
teacher_age = models.IntegerField('年龄', default=25)
image = models.ImageField(
default='',
upload_to="teacher/%Y/%m",
verbose_name="头像",
max_length=100)
add_time = models.DateTimeField(default=datetime.now)
class Meta:
verbose_name = '教师'
verbose_name_plural = verbose_name
|
[
"lunan.liu@gesion.net"
] |
lunan.liu@gesion.net
|
e6e443870ee55b979e6309a4240301ce1c4bfac1
|
85284870656a0ce68790492d18fac20dd323d5e1
|
/ps2.py
|
9e5745e261a426ce5a3e4b258334b51f842c1d5f
|
[] |
no_license
|
2018EET2555/Assignment_8
|
da448c9a2c717632b179091ff4216412216ac1da
|
f7ef82dfd103bb242e1103f3771662c9391e8c67
|
refs/heads/master
| 2020-07-27T20:53:40.862713
| 2019-09-18T07:04:15
| 2019-09-18T07:04:15
| 209,213,079
| 0
| 0
| null | 2019-09-18T03:54:25
| 2019-09-18T03:54:25
| null |
UTF-8
|
Python
| false
| false
| 3,355
|
py
|
#print welcome message
print("Welcome to the Game!")
#this function enters the value at position and return 4 ig game is draw returnn 1 if player 1 wins or 2 if player 2 wins and 0 for continue game
def enter(p,v,player):
flag=4 #inintiallize a flag to check the draw
i=(p-1)/3 #array indicis using position
j=(p-1)%3 #array indicis using position
arr[i][j]=v #assign value
# sum_r[i][player]+=
sum_r[i][0]+=v #add value to row
sum_r[i][1]+=1 #increase count of row
if (sum_r[i][0]==15 and sum_r[i][1]==3): #if sum of row is 15 return ooutcome
return player
sum_c[j][0]+=v #add value to col
sum_c[j][1]+=1 #increase count of col
if sum_c[j][0]==15 and sum_c[j][1]==3: #if sum of row is 15 return ooutcome
return player
if(i==j):
sum_d[0][0]+=v #add value to diagional
sum_d[0][1]+=1 #increase count of diagional
if i+j==2:
sum_d[1][0]+=v #add value to diagional
sum_d[1][1]+=1 #increase count of diagional
if (sum_d[0][0]==15 and sum_d[0][1]==3) or (sum_d[1][0]==15 and sum_d[1][1]==3) : #check diagonal for 15 sum and eturn outcome
return player
var=0
for i in range(3): #checks if all cells filled than draw
var+=sum_r[i][1]
if(var==9): #draw the match
return flag
for k in range(3): #this loop check if possibility of any body win
if sum_r[k][0]<15 or sum_c[k][0]<15:
flag=0
return flag
for k in range(2): #this loop check if possibility of any body win
if(sum_d[i][0]<15):
flag=0
return flag
return flag #return draw
def isvalid(p,v,player): #check validity of user entered input
if p>9 or p<1 or (player==1 and v%2==0) or (player==2 and v%2==1) or v>9 or v<1 or (v in num) or (p in pos):
return 0
return 1
#handles the input and game flow
def game():
print("Player 1's chance")
player=1
while(1) : #ask user want to continue
print("player {play}'s chance".format(play=player)) #print which players chance
p,v=raw_input("Enter the position and number to be entered: ").split() #take input
if(not(p.isdigit() and v.isdigit())): #check if entered digit else give error
print("pos or value not valid enter again")
continue
p=int(p)
v=int(v)
if isvalid(p,v,player): #check validity
dic=enter(p,v,player) #enter value in array cell
for i in arr: #prints the array
for j in i:
print("{v} |".format(v=j)),
print("\n")
num.append(v) #track numbered enterd
pos.append(p) #track position
if(dic==4): #print draw
print("Game Draw")
break
if(dic==player): #print if player wins
print("player {play} wins".format(play=player))
break
if player==1: #chnage the player in every loop
player=2
else:
player=1
else:
print("pos or value not valid enter again") #if not valid [positoin enter again]
#-----------------------------------------------------
flag="1" #to check whether user wants to continue
while flag=="1": #loop till flag 1
sum_r=[[0,0] for i in range(3)] #stores rows sum and elememnt count
sum_c=[[0,0] for i in range(3)] #stores column sum and elememnt count
sum_d=[[0,0] for i in range(2)]#stores diagonal sum and elememnt count
num=[] #contains number enytered
pos=[] #contains posiotion entered
arr=[[0 for i in range(3)] for j in range(3)] #game array
game() #call game
flag=str(raw_input("enter 1 for continue enter 0 other key for exit ")) #ask if user wants to continue
|
[
"eet182555@ee.iitd.ac.in"
] |
eet182555@ee.iitd.ac.in
|
434186f4c4cad82219aec208733f761601a3a107
|
db73aac3a075276f6917250417dcfa08964bbecd
|
/src/tests/conftest.py
|
e05d911358b8777aeb3fb535b1db4f65a78188d8
|
[] |
no_license
|
alexandershov/aio
|
3000af9cc699dcf95a6d7bec05b6dfbd7a2d4def
|
da50e258e8416700ff4d6eff3921b3e95e49ce28
|
refs/heads/master
| 2020-04-05T17:41:24.498274
| 2018-12-02T16:54:34
| 2018-12-02T16:54:55
| 157,071,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
import logging
import pytest
import aio
@pytest.fixture(name='future')
def future_fixture(request):
del request # unused
return aio.Future()
@pytest.fixture(name='loop', autouse=True)
def loop_fixture(request):
del request # unused
loop = aio.new_event_loop()
aio.set_event_loop(loop)
yield loop
loop.close()
aio.set_event_loop(None)
@pytest.fixture(scope='session', autouse=True)
def logging_fixture(request):
logger = logging.getLogger('aio')
level = _get_logging_level(request)
if level is not None:
logger.setLevel(level)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
def _get_logging_level(request):
level_name = request.config.getoption('--log-level')
if level_name is None:
return None
if not hasattr(logging, level_name):
raise RuntimeError(f'Unknown log level: {level_name}')
return getattr(logging, level_name)
|
[
"codumentary.com@gmail.com"
] |
codumentary.com@gmail.com
|
52fb348cf10da06e9826fe37f4a38f2cdf289d0e
|
53405777b878efa8b2200f73455bf47701227dd8
|
/answer_desc2.py
|
13de4a8aea39397b335c4ec09c13a8a788732022
|
[] |
no_license
|
shraddhansahula/AutomaticQA
|
912ac31e9fc78094900af9987af75effe994315f
|
fe89d64871b4e5a5cc2622e099641484343cae3a
|
refs/heads/master
| 2020-12-30T13:07:54.801108
| 2017-05-28T19:13:34
| 2017-05-28T19:13:34
| 91,323,824
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,716
|
py
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('./LCS')
sys.path.append('./headRelated')
sys.path.append('./ngramOverlap')
sys.path.append('./skipBigram')
sys.path.append('./synHypOverlap')
sys.path.append('./treeKernel')
import os
import re
import pickle
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from lxml import etree
from lcs import lcs_wlcs #returns lcs, wlcs
from head import head_related #returns relHeadScore, exactHeadScore
from ngram import ngram_overlap #returns 1gram score
from skip import skip_bigram #returns skip score
from syn import syn_hyp_overlap #returns synOverlap, hypOverlap, glossOverlap
from synTreeKernel import syn_tree_kernel #returns treekernel score
import pickle
import multiprocessing as mp
from nltk.corpus import wordnet as wn
from nltk.tokenize import sent_tokenize, word_tokenize
"""
This code requires a chapter number and a question as first two arguments. Also needs a stop_words.txt.
"""
# iORj = 0 #0 if i else 1
# chapNum = int(sys.argv[1])
# if chapNum <=8:
# iORj = 0
# else:
# iORj = 1
# chapNum = chapNum - 8
# print chapNum, iORj
def extract_sentences(chapNum):
# sentences = []
# os.chdir("/home/shraddhan/Honors/DUC Dataset/DUC2006_Summarization_Documents/duc2006_docs/D0601A")
# listOfFiles = os.listdir(".")
# for file in listOfFiles:
# print file
# inp = etree.parse(file)
# root = inp.getroot()
# for child in root.iter():
# if child.tag == "P":
# text = child.text.split(".")
# for i,j in enumerate(text):
# text[i] = text[i].replace("\n", " ")
# text[i] = text[i].replace("\t", " ")
# if text[i] and not text[i].isspace():
# sentences.append(text[i])
# return sentences
classIdentifier = ""
if chapNum <= 8:
classIdentifier = "i"
else:
chapNum = chapNum - 8
classIdentifier = "j"
file = open("./Dataset_NCERT/Dataset-txt/"+classIdentifier+"ess30"+str(chapNum)+".txt")
sentences = file.read()
file.close()
sentences = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', sentences)
for i, s in enumerate(sentences):
s = s.replace("\n", " ")
sentences[i] = s
return sentences
# def extract_sentences_anurag(query):
# f = open("stop_words.txt", "r")
# stopWords = [word for word in f.readlines()]
# f.close()
# index = {}
# with open("inverted_index") as f:
# for line in f:
# line = line.strip("\n").split("=")
# index[line[0]] = line[1].split("||")
# queryWords = word_tokenize(query)
# q = [word for word in queryWords if word not in stopWords]
# queryRel = q[:]
# for word in q:
# for i, j in enumerate(wn.synsets(word)):
# for l in j.lemmas():
# queryRel.append(l.name())
# queryRel = list(set(queryRel))
# sentenceIDs = []
# for i in queryRel:
# if i in index:
# sentenceIDs += index[i]
# sentenceIDs = [int(i) for i in sentenceIDs]
# relevantSent = [i for i in sorted(sentenceIDs) if i > 211 and i < 520]
# f = open("sentences.txt", "r")
# sentence_list = [sent.strip("\n") for sent in f.readlines()]
# f.close()
# final_list = [sentence_list[i] for i in relevantSent]
# return final_list
score_feature_candidate = []
global i
i = 0
def extract_features(candidate):
feature_vector = []
global i
i += 1
print "finding features for", i
try:
feature_vector += list(lcs_wlcs(query, candidate))
feature_vector += list(head_related(query, candidate))
feature_vector.append(ngram_overlap(query, candidate))
feature_vector.append(skip_bigram(query, candidate))
feature_vector += list(syn_hyp_overlap(query, candidate))
feature_vector.append(syn_tree_kernel(query, candidate))
except:
feature_vector = [0,0,0,0,0,0,0,0,0,0]
#score_feature_candidate.append((0,feature_vector,candidate
print "processed", i
print feature_vector
return (0, feature_vector, candidate)
with open("question.txt") as f:
for line in f:
line = line.split("|")
chapNum = int(line[0])
query = str(line[1])
print chapNum, query
candidates = extract_sentences(chapNum)
print len(candidates)
pool = mp.Pool(processes=12)
features = pool.map(extract_features, candidates)
features = [(x[0], x[1], unicode(x[2], "utf-8")) for x in features]
weights = [3.0000000000000013, 6.999999999999991, 6.799999999999992, 0.1, 0.2, 0.1, 1.6000000000000003, 1.0999999999999999, 33.90000000000021, 0.30000000000000004]
for k, f in enumerate(features):
f = list(f)
score = 0
#print f
for i,j in enumerate(f[1]):
score += weights[i]*j
#print score
f[0] = score
#print f
f[:0] = [k]
f = tuple(f)
features[k] = f
lenFeatures = len(features)
windowFeature = []
windowSize = 4
for k, f in enumerate(features):
if k > lenFeatures - windowSize:
break
windowScore = 0
windowSentence = ""
for i in xrange(0, windowSize):
windowScore += features[k+i][1]
windowSentence += features[k+i][3]+" "
windowFeature.append((windowScore, windowSentence.strip()))
windowFeature = sorted(windowFeature, key=lambda x: -x[0])
# for i in xrange(0,5):
# print windowFeature[i]
wordCount = 0
i = 0
summary = []
#generating summary
while(1):
sentence = windowFeature[i][1]
i += 1
wordList = nltk.word_tokenize(sentence)
wordCount += len(wordList)
summary.append(sentence)
if wordCount>450:
break
new_summary = []
for sent in summary:
# print sent_tokenize(sent)
# print ""
temp_list = sent_tokenize(sent)
for s in temp_list:
if s not in new_summary:
new_summary.append(s)
# new_summary = list(set(new_summary))
answer = ""
for s in new_summary:
answer += s
fileName = open("answers.txt", "a+")
fileName.write(query + "\n")
fileName.write(answer + "\n\n")
fileName.close()
|
[
"gupta.anu1995@gmail.com"
] |
gupta.anu1995@gmail.com
|
87ddf8d6595c8889f6590dc836af99201a598e30
|
46dc310cf50c41bd909c22d26060e6e2b525a844
|
/reports.py
|
eac1991fd3d4f2e5c0f857836ed7d9c7963e18d0
|
[] |
no_license
|
mderamus19/student-exercise-reports
|
090fd224c81975c840314f7e986a35b59ffd28cf
|
6d45f0e0b3c2f08bdf95b20fd066d11dc7cd3f9b
|
refs/heads/master
| 2020-07-26T04:13:02.726894
| 2019-09-16T18:11:22
| 2019-09-16T18:11:22
| 208,530,656
| 0
| 0
| null | 2019-09-16T18:11:16
| 2019-09-15T02:24:15
|
Python
|
UTF-8
|
Python
| false
| false
| 7,154
|
py
|
import sqlite3
class Student():
def __init__(self, first, last, handle, cohort):
self.first_name = first
self.last_name = last
self.slack_handle = handle
self.student_cohort = cohort
def __repr__(self):
return f'{self.first_name} {self.last_name} is in {self.student_cohort}'
class Cohort():
def __init__(self, cohortName):
self.Name = cohortName
def __repr__(self):
return f'{self.Name}'
class Exercise():
def __init__(self, id, name, language):
self.id = id
self.name = name
self.language = language
def __repr__(self):
return f'{self.name} {self.language}'
class Javascript():
def __init__(self, id, name, language):
self.id = id
self.name = name
self.language = language
def __repr__(self):
return f'{self.name} {self.language}'
class Python():
def __init__(self, id, name, language):
self.id = id
self.name = name
self.language = language
def __repr__(self):
return f'{self.name} {self.language}'
class Student_Cohort():
def __init__(self, id, first_name, last_name, cohort):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.cohort = cohort
def __repr__(self):
return f'{self.first_name} {self.last_name } {self.cohort}'
class Instructor_Cohort():
def __init__(self, id, first_name, last_name, cohort):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.cohort = cohort
def __repr__(self):
return f'{self.first_name} {self.last_name } {self.cohort}'
class StudentExerciseReports():
"""Methods for reports on the Student Exercises database"""
def __init__(self):
self.db_path = "/Users/misty/workspace/python/StudentExercises/studentexercises.db"
def all_students(self):
"""Retrieve all students with the cohort name"""
with sqlite3.connect(self.db_path) as conn:
conn.row_factory = lambda cursor, row: Student(row[1], row[2], row[3], row[5])
db_cursor = conn.cursor()
db_cursor.execute("""
select student_Id,
s.first_name,
s.last_name,
s.slack_handle,
s.student_cohort_Id,
c.Name
from Student s
join Cohort c on s.student_cohort_Id = c.Id
order by s.student_cohort_Id
""")
all_students = db_cursor.fetchall()
for student in all_students:
print(student)
def all_cohorts(self):
'''Retrieve all cohorts'''
with sqlite3.connect(self.db_path) as conn:
conn.row_factory = lambda cursor, row: Cohort(row[0])
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT c.Name
FROM Cohort c
""")
all_cohorts = db_cursor.fetchall()
for cohort in all_cohorts:
print(cohort)
def all_exercises(self):
'''Retrieve all exercises'''
with sqlite3.connect(self.db_path) as conn:
conn.row_factory = lambda cursor, row: Exercise(row[0], row[1], row[2])
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT exercise_Id,
exercise_name,
exercise_language
FROM exercise
""")
all_exercises = db_cursor.fetchall()
for exercise in all_exercises:
print(exercise)
def all_js_exercises(self):
'''Retrieve all javascript exercises'''
with sqlite3.connect(self.db_path) as conn:
conn.row_factory = lambda cursor, row: Javascript(row[0], row[1], row[2])
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT exercise_Id,
exercise_name,
exercise_language
FROM exercise
WHERE exercise_language = "Javascript"
""")
all_js_exercises = db_cursor.fetchall()
for javascript in all_js_exercises:
print(javascript)
def all_py_exercises(self):
'''Retrieve all python exercises'''
with sqlite3.connect(self.db_path) as conn:
conn.row_factory = lambda cursor, row: Python(row[0], row[1], row[2])
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT exercise_Id,
exercise_name,
exercise_language
FROM exercise
WHERE exercise_language = "Python"
""")
all_py_exercises = db_cursor.fetchall()
for python in all_py_exercises:
print(python)
def all_csharp_exercises(self):
'''Retrieve all C# exercises'''
with sqlite3.connect(self.db_path) as conn:
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT exercise_name,
exercise_language
FROM exercise
WHERE exercise_language = "C#"
""")
# conditional to check the length of all csharp exercises
all_csharp_exercises = db_cursor.fetchall()
if len(all_csharp_exercises) == 0:
print("There are no C# exercises!")
else:
for csharp in all_csharp_exercises:
print(csharp)
def all_students_cohorts(self):
'''Retrieve all students and cohort names'''
with sqlite3.connect(self.db_path) as conn:
conn.row_factory = lambda cursor, row: Student_Cohort(row[0], row[1], row[2], row [3])
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT student_Id,
first_name,
last_name,
c.Name
FROM Cohort c
JOIN student s
ON c.Id = s.student_cohort_Id
""")
all_students_cohorts = db_cursor.fetchall()
for studentCohort in all_students_cohorts:
print(studentCohort)
def all_instructors_cohorts(self):
'''Retrieve all instructors and cohort names'''
with sqlite3.connect(self.db_path) as conn:
conn.row_factory = lambda cursor, row: Instructor_Cohort(row [0],row[1], row[2], row[3])
db_cursor = conn.cursor()
db_cursor.execute("""
SELECT instructor_Id,
first_name,
last_name,
c.Name
FROM Cohort c
JOIN instructor i
ON c.Id = i.instructor_cohort_Id
""")
all_instructors_cohorts = db_cursor.fetchall()
for instructorCohort in all_instructors_cohorts:
print(instructorCohort)
reports = StudentExerciseReports()
reports.all_students()
reports.all_cohorts()
reports.all_exercises()
reports.all_js_exercises()
reports.all_py_exercises()
reports.all_csharp_exercises()
reports.all_students_cohorts()
reports.all_instructors_cohorts()
|
[
"mistyderamus@gmail.com"
] |
mistyderamus@gmail.com
|
bfcec09fb2a9c0b6cc6a755d255269a6957f56b4
|
61f12e69b3d7a11f9e0e6cf9cedb43c9af07c245
|
/Graph_Algorithm/Breadth_First.py
|
bafe54d3d78ef76eea5bdc2edf0ea9cd3dbc0638
|
[] |
no_license
|
ananabh/Graph_Algorithm
|
7bc6c35031fb82638239adb394fa41d27a632e14
|
c9d3c4b5ee720701d76d65eace00eeab484d4187
|
refs/heads/master
| 2021-07-24T13:40:21.312361
| 2020-01-03T10:19:08
| 2020-01-03T10:19:08
| 101,926,480
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
from queue import Queue
import numpy as np
def search(graph, start=0):
queue = Queue()
queue.put(start)
visited = np.zeros(graph.numVertices)
while not queue.empty():
vertex = queue.get()
if visited[vertex] == 1:
continue
print("Visit :", vertex)
visited[vertex] = 1
for v in graph.get_adjacent_vertices(vertex):
if visited[v] != 1:
queue.put(v)
|
[
"abhianan@users.noreply.github.com"
] |
abhianan@users.noreply.github.com
|
677c1989fdeda9af47f28bd069d7fa5eb7a950dd
|
8e035733fa236599e7af5ad4819882cf07b38689
|
/djangoquill/djangorestquill/tests.py
|
d50f1c8c7871322b46cf521820a65189926ade13
|
[] |
no_license
|
suhjohn/Django-Rest-Quill-Test
|
ee0a3a60484c71e73c861c118c6ac60e7e53a9eb
|
5d7e3075af1e9d6dd16071f0dd7fc86074cecd1f
|
refs/heads/master
| 2021-09-03T06:08:24.659637
| 2018-01-06T06:35:58
| 2018-01-06T06:35:58
| 116,368,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
from django.test import TestCase
# Create your tests here.
# Test for Problems such as
# What if content doesn't exist?
# What if content is in wrong format?
# What if content length is 0?
# What if content length is super long?
# What happens when we update?
from rest_framework.test import APITestCase
class QuillRelatedModelCreateTest(APITestCase):
def test_quillpost_related_model_create(self):
url = "/answer/"
|
[
"johnsuh94@gmail.com"
] |
johnsuh94@gmail.com
|
5d5711105462042d67bf0ef658d2b857aeadfc20
|
b62ba4f33cca622e78da298afa33f9dcf431e4b3
|
/server/expenses/migrations/0003_auto_20190920_1259.py
|
471777671ed4a1e3134dd6ec92966ed2c5c756d3
|
[
"MIT"
] |
permissive
|
cristicismas/top-budget
|
98c7c96288a5df1dece195d0e953247188dad509
|
d61db578287b2f77c12032045fca21e58c9ae1eb
|
refs/heads/master
| 2021-10-30T17:26:48.957692
| 2020-09-29T11:57:18
| 2020-09-29T11:57:18
| 189,410,116
| 0
| 0
|
MIT
| 2021-10-05T22:08:17
| 2019-05-30T12:30:04
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 733
|
py
|
# Generated by Django 2.2.4 on 2019-09-20 12:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('expenses', '0002_auto_20190829_1335'),
]
operations = [
migrations.AlterField(
model_name='expense',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='expenses.Location'),
),
migrations.AlterField(
model_name='expense',
name='source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='expenses.Source'),
),
]
|
[
"cristicismas99@gmail.com"
] |
cristicismas99@gmail.com
|
05d8440c2736146da4ffc999819c9caf857631f5
|
6cf2b45ee8516c7b65fbe362928bfee90ff50779
|
/2-EstruturaDeDecisao/exercício11.py
|
79cbe12f143385a1a86b576402093055d87b9ff0
|
[] |
no_license
|
nralex/Python
|
1d719179ebf22507132584b8285380c55499d9da
|
17cbb5544265aec715c8959699a2f34ccc1f9b01
|
refs/heads/main
| 2023-02-17T10:20:10.155394
| 2021-01-09T23:21:20
| 2021-01-09T23:21:20
| 319,101,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
# As Organizações Tabajara resolveram dar um aumento de salário aos seus colaboradores
# e lhe contraram para desenvolver o programa que calculará os reajustes.
# Faça um programa que recebe o salário de um colaborador e o reajuste segundo o seguinte critério, baseado no salário atual:
# * salários até R$ 280,00 (incluindo) : aumento de 20%
# * salários entre R$ 280,00 e R$ 700,00 : aumento de 15%
# * salários entre R$ 700,00 e R$ 1500,00 : aumento de 10%
# * salários de R$ 1500,00 em diante : aumento de 5% Após o aumento ser realizado, informe na tela:
# * o salário antes do reajuste;
# * o percentual de aumento aplicado;
# * o valor do aumento;
# * o novo salário, após o aumento.
salário = float(input('Informe o seu salário: R$'))
if salário <= 280:
percentual = 0.2
elif 280 < salário < 700:
percentual = 0.15
elif 700 <= salário < 1500:
percentual = 0.1
else:
percentual = 0.05
acrescimo = salário * percentual
print(f'Salário antes do reajuste R${salário:.2f}')
print(f'Percentual de aumento aplicado {percentual * 100}%')
print(f'Valor do aumento R${acrescimo:.2f}')
print(f'Novo salário R${salário + acrescimo:.2f}')
|
[
"alexserno@gmail.com"
] |
alexserno@gmail.com
|
8c6def7114c99b3c611a71cb5478f0f823098672
|
119c82b3c1719753b93bd925e157b10e72cdd11f
|
/blog/urls.py
|
693ba80b5090141a7be0bcb3b844403ab0b7fdac
|
[] |
no_license
|
siddharth007-singh/Blog-Website-
|
bee83ac276ff4fffda4f6c6a98f93da74e6e15af
|
ed7a544b0afa2c5df651fdac6fe91745c747e9fd
|
refs/heads/main
| 2023-04-12T04:56:18.035098
| 2021-05-12T08:00:17
| 2021-05-12T08:00:17
| 366,699,889
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
from django.contrib import admin
from django.urls import path
from datawork import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name="homepage"),
path('detail/<int:post_id>', views.detail, name="detailpage"),
path('meet_author', views.meet_author, name="meet_author"),
path('searchbar', views.searchbar, name="searchbar"),
path('category_filter/<int:cat_id>', views.category_filter, name="category_filter"),
path('login', views.login, name="login"),
path('signin', views.signin, name='signin'),
path('user_dashboard', views.user_dashboard, name="user_dashboard"),
path('user_insert_cat', views.user_insert_cat, name="insert_category"),
path('user_insert_topic', views.user_insert_topic, name="insert_topic"),
path('user_insert_post', views.user_insert_post, name="insert_post"),
path('user_manage_post', views.user_manage_post, name="manage_post"),
path('user_report', views.user_report, name="report"),
path('user_profile', views.user_profile, name="profile"),
path('user_edit_image/<int:nu_id>', views.user_edit_image, name="user_edit_image"),
path('user_edit_info/<int:nu_id>', views.user_edit_info, name="user_edit_info"),
path('user_view_post/<int:post_id>', views.user_view_post, name="user_view_post"),
path('user_edit_post/<int:post_id>', views.user_edit_post, name="user_edit_post"),
path('admin_login/', views.admin_login, name="admin_login"),
path('secure_dashboard', views.secure_dashboard, name="secure_dashboard"),
path('secure_manage_post', views.secure_manage_post, name="secure_manage_post"),
path('secure_viewpost/<int:post_id>', views.secure_viewpost, name="secure_viewpost"),
path('secure_edit/<int:post_id>', views.secure_edit, name="secure_edit"),
path('secure_manage_user', views.secure_manage_user, name="secure_manage_user"),
path('secure_manage_report', views.secure_manage_report, name="secure_manage_report"),
path('secure_manage_profile', views.secure_manage_profile, name="secure_manage_profile"),
path('like_deslike/<int:id>', views.like_dislike, name="like_deslike"),
path('delete_post/<int:post_id>', views.delete_post, name="delete_post"),
path('logout', views.logout, name="logout"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"siddharth.code.cws@gmail.com"
] |
siddharth.code.cws@gmail.com
|
86ffea4bdb21794f082509a02de64082e37e21f9
|
e9b6e2318d537830b848b9c1c67d6954865fe0c3
|
/pet/petenv/bin/tkconch
|
e7c7d8dd4ef6ecced5216302f28bedb98ba94527
|
[] |
no_license
|
lvkunpeng/webspider
|
3c59b67a8e9274605366640cd20d4293cbc7232d
|
681a77586974f4c17673763f8c6f42421cd694b5
|
refs/heads/master
| 2022-12-14T04:20:12.921858
| 2018-02-11T08:28:07
| 2018-02-11T08:28:07
| 113,159,632
| 0
| 1
| null | 2022-11-16T23:57:17
| 2017-12-05T09:14:34
|
Python
|
UTF-8
|
Python
| false
| false
| 252
|
#!/root/Desktop/pet/petenv/bin/python3.5
# -*- coding: utf-8 -*-
import re
import sys
from twisted.conch.scripts.tkconch import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"‘lvkunpeng@163.com’"
] |
‘lvkunpeng@163.com’
|
|
73c8395ae9161ae8bc73887438f449575c6db641
|
d055b5225381a86b00a7373440b248e5b016e37c
|
/flaskforum/users/forms.py
|
255d85c672c4d85a221e634de885140a4b890f40
|
[] |
no_license
|
HariRam1998/flask-forum-plant-disease-prediction
|
a9220d6fff1648c20ae57e514f676ca55532fc38
|
dd8f61fd2fa74e5c6c670eae89bed43506c93ec8
|
refs/heads/master
| 2023-04-10T18:47:09.891687
| 2021-04-23T11:31:49
| 2021-04-23T11:31:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,656
|
py
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, EqualTo, ValidationError
from flask_login import current_user
from flaskforum.models import User
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired()])
password = PasswordField('Password', validators = [DataRequired()])
remember = BooleanField('Remember me')
submit = SubmitField('Log in')
class RegisterForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired()])
password = PasswordField('Password', validators = [DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators = [DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class UpdateAccountForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email', validators=[DataRequired()])
picture = FileField('Update Profile Picture', validators = [FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data!= current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
if email.data!= current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class ChangeAccountForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Change Password')
class ForgotAccountForm(FlaskForm):
email = StringField('Email', validators=[DataRequired()])
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if not user:
raise ValidationError('That email is taken. Please choose a different one.')
submit = SubmitField('Send Mail')
|
[
"HariRam1998@users.noreply.github.com"
] |
HariRam1998@users.noreply.github.com
|
b26813fea036ee40ff9c130c6296d8e1f3fcd9f7
|
1e182b7cbedc5bf3696ecaa236990c57983fcc0b
|
/tests/test_tensor_maps.py
|
7fcb52c2f635ed4bc6478c68f9d6f05be306bc93
|
[
"BSD-3-Clause"
] |
permissive
|
mit-ccrg/ml4c3-mirror
|
c1c87230c5e9f18164bc1d9085328ad0117a1c6e
|
0e25886083ccefc6cbb6250605c58f018f70a2e9
|
refs/heads/master
| 2023-02-28T15:15:36.481784
| 2021-02-08T15:01:57
| 2021-02-08T15:01:57
| 337,133,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
# Imports: third party
import pytest
from tensorflow.keras.losses import logcosh
# Imports: first party
from tensormap.TensorMap import TensorMap
# pylint: disable=no-member
class TestTensorMaps:
"""
Class to test ECG tensor maps.
"""
@staticmethod
def test_tensor_map_equality():
tensor_map_1a = TensorMap(
name="tm",
loss="logcosh",
channel_map={"c1": 1, "c2": 2},
metrics=[],
tensor_from_file=pytest.TFF,
)
tensor_map_1b = TensorMap(
name="tm",
loss="logcosh",
channel_map={"c1": 1, "c2": 2},
metrics=[],
tensor_from_file=pytest.TFF,
)
tensor_map_2a = TensorMap(
name="tm",
loss=logcosh,
channel_map={"c1": 1, "c2": 2},
metrics=[],
tensor_from_file=pytest.TFF,
)
tensor_map_2b = TensorMap(
name="tm",
loss=logcosh,
channel_map={"c2": 2, "c1": 1},
metrics=[],
tensor_from_file=pytest.TFF,
)
tensor_map_3 = TensorMap(
name="tm",
loss=logcosh,
channel_map={"c1": 1, "c2": 3},
metrics=[],
tensor_from_file=pytest.TFF,
)
tensor_map_4 = TensorMap(
name="tm",
loss=logcosh,
channel_map={"c1": 1, "c2": 3},
metrics=[all],
tensor_from_file=pytest.TFF,
)
tensor_map_5a = TensorMap(
name="tm",
loss=logcosh,
channel_map={"c1": 1, "c2": 3},
metrics=[all, any],
tensor_from_file=pytest.TFF,
)
tensor_map_5b = TensorMap(
name="tm",
loss=logcosh,
channel_map={"c1": 1, "c2": 3},
metrics=[any, all],
tensor_from_file=pytest.TFF,
)
assert tensor_map_1a == tensor_map_1b
assert tensor_map_2a == tensor_map_2b
assert tensor_map_1a == tensor_map_2a
assert tensor_map_5a == tensor_map_5b
assert tensor_map_2a != tensor_map_3
assert tensor_map_3 != tensor_map_4
assert tensor_map_3 != tensor_map_5a
assert tensor_map_4 != tensor_map_5a
|
[
"noreply@github.com"
] |
mit-ccrg.noreply@github.com
|
c8b3f6213dc2511271bb7c53d95522854e18f936
|
5700b2c6bfe55291444d77db74e23e03b2b03681
|
/MergeSort.py
|
bbec45bb6ddc236a1dfc6c3384025a0633fa303f
|
[] |
no_license
|
IrsyadMakarim/TubesAKA
|
024a579441aa1b3ea43c5e0f9adeb1a28c19c52d
|
d96838459077bb638e7d3d6162ff4638e3689daf
|
refs/heads/master
| 2023-02-20T12:59:49.811746
| 2020-12-26T09:10:33
| 2020-12-26T09:10:33
| 324,094,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,451
|
py
|
import pygame
import random
import time
pygame.font.init()
startTime = time.time()
n = 151
screen = pygame.display.set_mode((900, 650))
pygame.display.set_caption("SORTING VISUALISER")
run = True
width = 900
length = 600
array = [0] * n
arr_clr = [(0, 204, 102)] * n
clr_ind = 0
clr = [(0, 204, 102), (255, 0, 0),
(0, 0, 153), (255, 102, 0)]
fnt = pygame.font.SysFont("comicsans", 30)
fnt1 = pygame.font.SysFont("comicsans", 20)
def generate_arr():
for i in range(1, n):
arr_clr[i] = clr[0]
array[i] = random.randrange(1, 100)
generate_arr()
def refill():
screen.fill((255, 255, 255))
draw()
pygame.display.update()
pygame.time.delay(20)
def mergesort(array, l, r):
mid = (l + r) // 2
if l < r:
mergesort(array, l, mid)
mergesort(array, mid + 1, r)
merge(array, l, mid,
mid + 1, r)
def merge(array, x1, y1, x2, y2):
i = x1
j = x2
temp = []
pygame.event.pump()
while i <= y1 and j <= y2:
arr_clr[i] = clr[1]
arr_clr[j] = clr[1]
refill()
arr_clr[i] = clr[0]
arr_clr[j] = clr[0]
if array[i] < array[j]:
temp.append(array[i])
i += 1
else:
temp.append(array[j])
j += 1
while i <= y1:
arr_clr[i] = clr[1]
refill()
arr_clr[i] = clr[0]
temp.append(array[i])
i += 1
while j <= y2:
arr_clr[j] = clr[1]
refill()
arr_clr[j] = clr[0]
temp.append(array[j])
j += 1
j = 0
for i in range(x1, y2 + 1):
pygame.event.pump()
array[i] = temp[j]
j += 1
arr_clr[i] = clr[2]
refill()
if y2 - x1 == len(array) - 2:
arr_clr[i] = clr[3]
else:
arr_clr[i] = clr[0]
def draw():
txt = fnt.render("PRESS" \
" 'ENTER' TO PERFORM SORTING.", 1, (0, 0, 0))
screen.blit(txt, (20, 20))
txt1 = fnt.render("PRESS 'R' FOR NEW ARRAY.",
1, (0, 0, 0))
screen.blit(txt1, (20, 40))
txt2 = fnt1.render("ALGORITHM USED: " \
"MERGE SORT", 1, (0, 0, 0))
screen.blit(txt2, (600, 60))
txt3 = fnt1.render("RUNNING TIME(sec): "+ \
str(int(time.time() - startTime)), \
1, (0, 0, 0))
screen.blit(txt3, (600, 20))
element_width = (width - 150) // 150
boundry_arr = 900 / 150
boundry_grp = 550 / 100
pygame.draw.line(screen, (0, 0, 0),
(0, 95), (900, 95), 6)
for i in range(1, 100):
pygame.draw.line(screen,
(224, 224, 224),
(0, boundry_grp * i + 100),
(900, boundry_grp * i + 100), 1)
for i in range(1, n):
pygame.draw.line(screen, arr_clr[i], \
(boundry_arr * i - 3, 100), \
(boundry_arr * i - 3, array[i] * boundry_grp + 100), \
element_width)
while run:
screen.fill((255, 255, 255))
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
generate_arr()
if event.key == pygame.K_RETURN:
mergesort(array, 1, len(array) - 1)
draw()
pygame.display.update()
pygame.quit()
|
[
"1rsy4d.m@gmail.com"
] |
1rsy4d.m@gmail.com
|
2fe1fd45dcf963302d499f8cd39946fc538d073b
|
aaf772cc4830a98839f6bbba28fc0e7523fdc341
|
/mme_ave_msk.py
|
e344d5c0f4f7ab8e747822d48d34dcd48b87bcc5
|
[] |
no_license
|
eguil/Density_bining
|
a01df60b79ec3382399062b3f7dfe35f5641f695
|
a450ecaa784965e4d09453a1b255874bb5a82318
|
refs/heads/master
| 2021-09-20T03:05:20.099344
| 2021-09-15T09:06:38
| 2021-09-15T09:06:38
| 18,653,021
| 4
| 3
| null | 2019-05-21T17:12:38
| 2014-04-10T21:48:22
|
Python
|
UTF-8
|
Python
| false
| false
| 15,302
|
py
|
import os,glob,sys,resource,socket
from libDensityPostpro import mmeAveMsk1D,mmeAveMsk2D, mmeAveMsk3D
from modelsDef import defModels
from correctBinFiles import correctFile
from string import replace
import warnings
import time as timc
warnings.filterwarnings("ignore")
# ----------------------------------------------------------------------------
#
# Perform model ensemble mean and other statistics for density binning output
# run with 'pythoncd mme_ave_msk.py' (cdms python)
#
# April 2016 : add ToE computation support (for 2D files only)
# May 2016 : add obs support
# Nov 2016 : add 3D files support
# Jan 2017 : add picontrol and 1pctCo2 support
#
# TODO : add arguments to proc for INIT part (exper, raw, fullTS, test, keepfiles, oneD/twoD, mm/mme, ToE...) or per step
#
# ----------------------------------------------------------------------------
tcpu0 = timc.clock()
#
# ----------------------------
# !!! Compulsory work order !!!
# ----------------------------
# 0) create ptopsigmaxy and correct grid interpolation issues (hist and histNat)
# 0.1) raw, oneD, mm, fullTS = T, correctF = F
# 0.2) for file in cmip5.* ; do ncks -A -v ptopsigmaxy $file ../$file ; echo $file; done
# 0.3) raw, oneD, mm, fullTS = F, correctF = T
# 1) run oneD first (mm and mme) for historical and histNat
# 2) run twoD mm for histNat
# 3) run twoD + ToE mm for historical (or better use Yona's calculation)
# 4) run twoD mme for historical (still to implement for ToE)
#
# ===============================================================================================================
# INIT - work definition
# ===============================================================================================================
#raw = True
raw = False
# fullTS = True # to compute for the full range of time (used for raw/oneD to compute ptopsigmaxy)
fullTS = False
#testOneModel = True
testOneModel = False
# Initial correction of Raw binned files (longitude interpolation and bowl issues)
correctF = False # only active if Raw = True
# Keep existing files or replace (if True and file present, ignores the model mm or mme computation)
# Use False for testing
keepFiles = True
oneD = False
twoD = False
#oneD = True
twoD = True
mm = False
mme = True
# experiment
#exper = 'historical'
#exper = 'historicalNat'
#exper = 'piControl'
#exper = '1pctCO2'
exper = 'rcp85'
#exper = 'obs'
# Time mean/max bowl calculation used to mask out bowl
timeBowl = 'max'
if twoD:
correctF = False # already done for oneD
# ToE
#ToE = True
ToE = False
ToeType = 'histnat' # working from hist and histnat
#ToeType = 'picontrol' # working from hist and picontrol
if not ToE:
ToeType ='F'
# Select range of MME
#selMME = 'All' # select all models for MME
#selMME = 'Hist' # select only models for which there are rcp85 and hist and simulations
selMME = 'Nat' # select only models for which there are hist AND histNat simulations
#selMME = '1pct' # select only models for which there are piControl AND 1pctCO2 simulations
# ===============================================================================================================
hostname = socket.gethostname()
if 'locean-ipsl.upmc.fr' in hostname:
baseDir = '/Volumes/hciclad/data/Density_binning/'
#baseDir = '/Volumes/hciclad2/data/Density_binning/'
elif 'waippo.local' in hostname or 'canalip.upmc.fr' in hostname or 'waippo-3.local' in hostname:
if raw:
baseDir = '/Volumes/hciclad/data/Density_binning/'
baseDir = '/Volumes/hciclad/data/Density_binning/'
else:
baseDir ='/Users/ericg/Projets/Density_bining/'
baseDir = '/Volumes/hciclad/data/Density_binning/'
elif 'private.ipsl.fr' in hostname:
baseDir = '/data/ericglod/Density_binning/'
elif 'crunchy.llnl.gov' in hostname:
baseDir = '/work/guilyardi/'
else:
print hostname
sys.exit('Unknown hostname')
if exper <> 'obs':
# define all models
models = defModels()
# Years interval for difference reference
iniyear = 1861
peri1 = (1861-iniyear)+1
peri2 = (1950-iniyear)+2
# I/O directories
#rootDir = '/Users/ericg/Projets/Density_bining/Prod_density_april15/'
#rootDir = '/Volumes/hciclad/data/Density_binning/Prod_density_april15/Raw/'
#rootDir = '/data/ericglod/Density_binning/Prod_density_april15/Raw/'
#rootdir = '/work/guilyardi/Prod_density_april15/Raw'
if raw:
rootDir =baseDir+'Prod_density_april15/Raw/'
else:
rootDir =baseDir+'Prod_density_april15/'
histDir = rootDir+'historical'
histNatDir = rootDir+'historicalNat'
piControlDir = rootDir+'piControl'
pctCO2Dir = rootDir+'1pctCO2'
rcp85Dir = rootDir+'rcp85'
histMMEOut = rootDir+'mme_hist'
histNatMMEOut = rootDir+'mme_histNat'
picMMEOut = rootDir+'mme_piControl'
pctMMEOut = rootDir+'mme_1pctCO2'
rcp85MMEOut = rootDir+'mme_rcp85'
ToeNatOut = rootDir+'toe_histNat'
# output name
outroot = 'cmip5.multimodel'
inroot = 'cmip5'
else:
# Specific variables for observations
obsm = {'name':'EN4' ,'props':[1,0,0,114], 'picontrol':[0]}
#obsm = {'name':'Ishii' ,'props':[1,0,0,67], 'picontrol':[0]}
models = [obsm]
if models[0]['name'] == 'EN4': # 1900.01 - 2015.04 (115 time steps, ignore last year) Good et al.
iniyear = 1900
peri1 = (2014-iniyear)+1
peri2 = (1900-iniyear)+2
idxtime = [0,114]
elif models[0]['name'] == 'Ishii': # 1945.01 - 2012.12 (68 time steps)
iniyear = 1945
peri1 = (2012-iniyear)+1
peri2 = (1945-iniyear)+2
idxtime = [0,67]
#rootDir = '/Users/ericg/Projets/Density_bining/Prod_density_obs_april16/'
rootDir ='/Volumes/hciclad/data/Density_binning/Prod_density_obs_april16/'
ObsMMEOut = rootDir+'mme_obs'
outroot = models[0]['name']
inroot = 'obs'
mm = True
mme = False
#
nmodels = len(models)
# perform a selection of a few models (for testing or updating)?
modelSel = range(nmodels)
# modelSel = [3,10,18,19,25,27,28]
#modelSel = [22,23]
if testOneModel:
modelSel = [19]
if mme:
fullTS = False
correctF = False
if ToE:
if ToeType == 'histnat':
selMME = 'Nat' # force if ToE & histnat used
if exper == 'historical':
indir = [histDir]
outdir = histMMEOut
idxtime=[0,145]
elif exper == 'historicalNat':
indir = [histNatDir]
outdir = histNatMMEOut
idxtime=[0,145]
elif exper == 'piControl':
indir = [piControlDir]
outdir = picMMEOut
idxtime=[0,-140] # last 140 years are used for mme
selMME = '1pct' # select on runs that also have a 1pctCO2
elif exper == '1pctCO2':
indir = [pctCO2Dir]
outdir = pctMMEOut
idxtime=[0,140]
selMME = 'piCtl' # select on runs that also have a piControl
elif exper == 'rcp85':
indir = [rcp85Dir]
outdir = rcp85MMEOut
idxtime=[0,95]
elif exper == 'obs':
indir = [rootDir]
outdir = ObsMMEOut
if ToE:
if ToeType == 'histnat':
indir = [histDir, histNatMMEOut]
outdir = ToeNatOut
if raw:
dim = 2
appendDim1d='2D'
appendDim2d='3D'
if mme:
if exper == 'historical':
indir = [rootDir+'mme_hist']
outdir = rootDir+'mme_hist'
if mme:
if exper == 'historicalNat':
indir = [rootDir+'mme_histNat']
outdir = rootDir+'mme_histNat'
else:
dim = 1
appendDim1d='zon1D'
appendDim2d='zon2D'
if raw & twoD :
outdir = outdir+'/mme'
if mme:
indir[0] = indir[0]+'/mme'
if mme:
indir[0] = outdir
timeInt=[peri1,peri2]
listens = []
listens1 = []
print
print '-----------------------------------------------------------------------------------------------'
print ' Enter mme_ave_mask.py for multi-model ensemble averaging for density bins'
print '-----------------------------------------------------------------------------------------------'
if oneD:
print ' -> work on 1D files'
if twoD:
print ' -> work on 2D files (using 1D files)'
if raw:
print ' -> work on raw 4D data'
if correctF:
print ' -> Correct files for longitude and bowl issues'
if ToE:
print ' -> computing ToE for type = ',ToeType
if mm:
print ' -> Performing ensemble(s) for',exper
print ' -> Type of time selection on bowl (mean or max):',timeBowl
if mme:
print ' -> Performing MME for',selMME, 'models for', exper
if exper == 'piControl':
print ' -> USing'
print
print ' --> indir = ',indir
print ' --> outdir = ',outdir
print '-----------------------------------------------------------------------------------------------'
print
os.chdir(indir[0])
for i in modelSel:
mod = models[i]['name']
years = [models[i]['props'][3],models[i]['props'][4]]
if exper == 'historical':
nens = models[i]['props'][0]
chartest = exper
elif exper == 'historicalNat':
nens = models[i]['props'][1]
chartest = exper
elif exper == 'piControl':
nyears = models[i]['picontrol'][0]
nens = 1
years=[0,nyears]
if selMME == '1pct' and nyears < 140 and nyears > 0:
nens = 1
print ' TOO SHORT: IGNORE model', mod
chartest = exper
elif exper == '1pctCO2':
nens = models[i]['props'][2]
years=[0,140]
chartest = exper
elif exper == 'rcp85':
nens = models[i]['props'][5]
years=[0,95]
chartest = exper
elif exper == 'obs':
nens = models[i]['props'][0]
chartest = 'historical'
if ToE:
if ToeType == 'histnat':
nens = models[i]['props'][1]
if years[1] <> 0: # do not ignore model
if nens > 0: # only if 1 member or more
if raw:
listf = glob.glob(inroot+'.'+mod+'.*.nc')
listf1 = listf
else:
listf = glob.glob(inroot+'.'+mod+'.*zon2D*')
listf1 = glob.glob(inroot+'.'+mod+'.*zon1D*')
if len(listf) == 0:
print i, mod
sys.exit('### no such file !')
start = listf[0].find(chartest)+len(chartest)
end = listf[0].find('.an.')
rip = listf[0][start:end]
if raw:
outFile = replace(listf[0],rip,'.ensm')
outFile1 = outFile
if mm & correctF: # correct file in indir+'/correct' and change indir
idxcorr = models[i]['correctFile']
outDirc = indir[0]+'/correct'
print ' -> correct',len(listf),'files towards', outDirc
for filec in listf:
# test if file is here before creating
if os.path.isfile(outDirc+'/'+filec):
print ' -> corrected file present: ',filec
else:
print ' -> correct ',filec
correctFile(idxcorr, 1, filec, indir[0], filec, outDirc)
i#ndirnew = outDirc
else:
outFile = replace(listf[0],rip,'.ensm')
outFile1 = replace(outFile,'2D','1D')
# Create lists for mme
if mme:
if selMME == 'All':
listens.append(outFile)
listens1.append(outFile1)
print ' Add ',i,mod, '(slice', years, nens, 'members) to MME'
if selMME == 'Nat': # only select model if histNat mm is present
if models[i]['props'][1] > 0:
listens.append(outFile)
listens1.append(outFile1)
print ' Add ',i,mod, '(slice', years, nens, 'members) to MME'
if selMME == '1pct': # only select model if 1pctCO2 mm is present
if models[i]['props'][2] > 0:
listens.append(outFile)
listens1.append(outFile1)
print ' Add ',i,mod, '(slice', years, nens, 'members) to MME'
if selMME == 'piCtl': # only select model if piCtl mm is present
if models[i]['picontrol'][0] > 0:
listens.append(outFile)
listens1.append(outFile1)
print ' Add ',i,mod, '(slice', years, nens, 'members) to MME'
if selMME == 'Hist': # only select model if hist mm is present
if models[i]['props'][0] > 0:
listens.append(outFile)
listens1.append(outFile1)
print ' Add ',i,mod, '(slice', years, nens, 'members) to MME'
# Perform model ensemble
if mm:
if twoD:
if os.path.isfile(outdir+'/'+outFile) & keepFiles:
print ' -> File exists - IGNORE mm of',outFile,'already in',outdir
else:
print ' -> working on: ', i,mod, 'slice', years, nens, 'members'
if dim == 1:
mmeAveMsk2D(listf,years,indir,outdir,outFile,timeInt,mme,timeBowl,ToeType)
elif dim == 2:
mmeAveMsk3D(listf,years,indir,outdir,outFile,timeInt,mme,ToeType)
print 'Wrote ',outdir+'/'+outFile
if oneD:
if os.path.isfile(outdir+'/'+outFile1) & keepFiles:
print ' -> File exists - IGNORE mm of',outFile1,'already in',outdir
else:
print ' -> working on: ', i,mod, 'slice', years, nens, 'member(s)'
mmeAveMsk1D(listf1,dim,years,indir,outdir,outFile1,timeInt,mme,ToeType,fullTS)
print 'Wrote ',outdir+'/'+outFile1
if mme:
# run 1D MME first
if twoD:
outFile = outroot+'_'+selMME+'.'+exper+'.ensm.an.ocn.Omon.density_'+appendDim2d+'.nc'
if os.path.isfile(outdir+'/'+outFile) & keepFiles:
print ' -> IGNORE: mme of',outFile,'already in',outdir
else:
if dim == 1:
mmeAveMsk2D(listens,idxtime,indir,outdir,outFile,timeInt,mme,timeBowl,ToeType)
elif dim ==2:
mmeAveMsk3D(listens,idxtime,indir,outdir,outFile,timeInt,mme,ToeType)
print 'Wrote ',outdir+'/'+outFile
if oneD:
outFile1 = outroot+'_'+selMME+'.'+exper+'.ensm.an.ocn.Omon.density_'+appendDim1d+'.nc'
if os.path.isfile(outdir+'/'+outFile1) & keepFiles:
print ' -> IGNORE: mme of',outFile1,'already in',outdir
else:
mmeAveMsk1D(listens1,dim,idxtime,indir,outdir,outFile1,timeInt,mme,ToeType,False)
print 'Wrote ',outdir+'/'+outFile1
tcpu1 = timc.clock()
print ' Max memory use',resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1.e6,'GB'
print ' CPU use',tcpu1-tcpu0
# ---------------------------
#modelsurf = ['ACCESS1-0','ACCESS1-3','CMCC-CESM','CMCC-CM','CMCC-CMS','CNRM-CM5','CSIRO-Mk3-6-0','EC-EARTH','FGOALS-s2','GFDL-ESM2G','GISS-E2-R-CC','GISS-E2-R','MIROC5','MIROC-ESM-CHEM','MIROC-ESM','MPI-ESM-LR','MPI-ESM-MR','MPI-ESM-P','NorESM1-ME','NorESM1-M']
|
[
"Eric.Guilyardi@locean-ipsl.upmc.fr"
] |
Eric.Guilyardi@locean-ipsl.upmc.fr
|
741da4aa788642759e3693ac73deec10287f9425
|
66c917eeefbd2eb0d7c2de16c67b1211518cc74e
|
/k-means.py
|
5a7d5822410f9f0af41be575c3db2fdad9f97060
|
[] |
no_license
|
yz-chen18/AndrewML
|
201d993ee9f834f37dac6d59b896ab8f9c933239
|
b10e6dffb2f92f8193a1d2a15cdddb72a4529be0
|
refs/heads/master
| 2020-07-14T03:48:17.109479
| 2019-09-10T03:01:16
| 2019-09-10T03:01:16
| 205,230,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,255
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def loadData(fpath):
mat = []
labels = []
with open(fpath, 'r') as f:
for line in f.readlines():
label = line.split()[-1]
line = line.split()[:-1]
line = [float(num) for num in line]
mat.append(line)
labels.append(int(label))
mat = np.array(mat)
return mat, labels
def choose2rand(m):
p1 = m
p2 = m
while (p1 == p2):
p1 = np.random.randint(0, m)
p2 = np.random.randint(0, m)
return p1, p2
def k_means(inX, maxIter, K = 2, looptimes = 1):
m = len(inX)
times = 0
labelList = []
costList = []
labelMat = [1 for i in range(m)]
p0, p1 = choose2rand(m)
p0 = inX[p0]
p1 = inX[p1]
for l in range(looptimes):
p0, p1 = choose2rand(m)
p0 = inX[p0]
p1 = inX[p1]
templabel = [0 for i in range(m)]
for i in range(maxIter):
for j in range(m):
d0 = np.multiply(inX[j] - p0, inX[j] - p0).sum()
d1 = np.multiply(inX[j] - p1, inX[j] - p1).sum()
if (d0 > d1):
templabel[j] = 1
p1_sum = np.zeros(inX[0].shape)
p1_num = 0
p2_sum = np.zeros(inX[0].shape)
p2_num = 0
for p in range(m):
if (templabel[p] == 0):
p1_sum += inX[p]
p1_num += 1
else:
p2_sum += inX[p]
p2_num += 1
p0 = p1_sum / p1_num
p1 = p2_sum / p2_num
cost = 0
for i in range(m):
if (templabel[i] == 0):
d = np.multiply(inX[i] - p0, inX[i] - p0).sum()
else:
d = np.multiply(inX[i] - p1, inX[i] - p1).sum()
cost += d
labelList.append(templabel)
costList.append(cost)
mincost = costList[0]
minIndex = 0
for i in range(len(costList)):
if (costList[i] < mincost):
minIndex = i
labelMat = labelList[minIndex]
return labelMat, p0, p1
def draw(fpath):
inX, labels = loadData(fpath)
labelMat, p1, p2 = k_means(inX, 100, 2, 4)
fig = plt.figure()
m = len(labelMat)
error = 0
rate = 0
ax = fig.add_subplot(1, 1, 1)
ax.scatter(p1[0], p1[1], color = 'k', s = 50)
ax.scatter(p2[0], p2[1], color = 'k', s = 50)
for i in range(m):
if (labelMat[i] != labels[i]):
error += 1
if (labelMat[i] == 1):
ax.scatter(inX[i][0], inX[i][1], color='r', s = 20)
else:
ax.scatter(inX[i][0], inX[i][1], color='b', s = 20)
#print(labelMat)
#print(labels)
plt.show()
rate = error / m
if (rate > 0.5):
rate = 1 - rate
print(rate)
'''
for j in range(4):
labelMat = labelList[j]
axj = fig.add_subplot(2, 2, j + 1)
for i in range(m):
if (labelMat[i] == 1):
axj.scatter(inX[i][0], inX[i][1], color = 'r')
else:
axj.scatter(inX[i][0], inX[i][1], color = 'b')
plt.show()
'''
draw("C:\\Users\\91969\\Desktop\\testSet.txt")
|
[
"yz_chen1999@163.com"
] |
yz_chen1999@163.com
|
7d4a92cde5a395b02c3428e9df8b95c0e88be56a
|
6f6205a6be1377b1f765cd7f752be6d40709e7e2
|
/core/infer.py
|
33cc2ee70ce17022f478cd5d42d8f9ac5f04e42d
|
[] |
no_license
|
imounish/HCR_Flask
|
5793f6ce2bf98aa995151a49d521b51e0d0026b8
|
92d4f29f1536bd98a1158b7e644f4d750bcf458c
|
refs/heads/master
| 2022-06-19T17:31:10.499946
| 2020-05-09T17:10:08
| 2020-05-09T17:10:08
| 262,615,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
import os
import cv2
from core.Model import Model, DecoderType
from core.SamplePreprocessor import preprocess
from core.DataLoader import Batch
class FilePaths:
fnCharList = os.path.abspath("../hcr_flask/model/charList.txt")
fnAccuracy = os.path.abspath("../hcr_flask/model/accuracy.txt")
# fnInfer = os.path.abspath("../static/data/test.png")
def infer(imgPath):
decoderType = DecoderType.BestPath
model = Model(open(FilePaths.fnCharList).read(), decoderType, mustRestore = True, dump = False)
img = preprocess(cv2.imread(imgPath, cv2.IMREAD_GRAYSCALE), Model.imgSize)
batch = Batch(None, [img])
(recognized, probability) = model.inferBatch(batch, True)
return (recognized[0], probability[0])
|
[
"ish.mp98@gmail.com"
] |
ish.mp98@gmail.com
|
7d9d64e560d6b1485394ab64f0bb032a2d1fc7ca
|
ac8a93c0917945b9a1dc7e0343b016d518424f05
|
/frexp/test_util.py
|
715167cb0fb8947120c46e175ac492470d742b78
|
[
"MIT"
] |
permissive
|
brandjon/frexp
|
f90af9a7268b191e3ef11c05515dfeaab1d80676
|
246ca0d9c4f4e7a849fcd0709748a7f7ac0961bc
|
refs/heads/master
| 2021-01-18T23:01:00.746997
| 2014-11-26T04:28:47
| 2014-11-26T04:28:47
| 18,230,534
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,265
|
py
|
"""Unit tests for util.py."""
import unittest
from frexp.util import *
class UtilCase(unittest.TestCase):
def test_stopwatch(self):
n = 10
def dummytimer():
return n
t = StopWatch(dummytimer)
# Init.
self.assertEqual(t.elapsed, 0)
# Basic start/stop/elapsed usage.
t.start()
n = 13
v = t.elapsed
t.stop()
self.assertEqual(v, 3)
# Context manager usage.
with t:
n = 15
self.assertEqual(t.elapsed, 5)
# Consume while timing.
with t:
n = 17
v = t.consume()
self.assertEqual(v, 7)
self.assertEqual(t.elapsed, 0)
# Consume outside of timing.
with t:
n = 18
v = t.consume()
self.assertEqual(v, 1)
self.assertEqual(t.elapsed, 0)
# No double start/stop.
t = StopWatch(dummytimer)
with self.assertRaises(AssertionError):
t.start()
t.start()
t = StopWatch(dummytimer)
with self.assertRaises(AssertionError):
t.stop()
t.stop()
if __name__ == '__main__':
unittest.main()
|
[
"jon.brandvein@gmail.com"
] |
jon.brandvein@gmail.com
|
7a9b82e11189361007fff141564ebe00b71a73c4
|
65993766064d0973b055103272a5630bf3662ada
|
/dataset/taskset.py
|
6ed50019f17bebc315d4d734563780d6d90956b2
|
[] |
no_license
|
unlimitlife/Temporal-Ensembling
|
ea0e477345f52bc58b4dce46d7de6aa8b3f8c8df
|
839857033bdf54e2d80930226913e501a394baf7
|
refs/heads/master
| 2022-09-05T19:08:17.192724
| 2020-06-01T05:46:53
| 2020-06-01T05:46:53
| 268,435,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,923
|
py
|
import os
import pickle
import random
import copy
import numpy as np
from torchvision.datasets import ImageFolder
from torch.utils.data import IterableDataset
from PIL import Image
import bisect
import torch
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as data
from .TinyImageNet import TinyImageNet
from .SubImageNet import SubImageNet
from .zca_bn import ZCA
_datasets = {'cifar10': torchvision.datasets.CIFAR10,
'cifar100': torchvision.datasets.CIFAR100,
'mnist': torchvision.datasets.MNIST,
'stl10': lambda data_path, train, download: torchvision.datasets.STL10(data_path,
split='train' if train else 'test',
download=download),
'tiny_image': TinyImageNet,
'sub_image': SubImageNet}
def zca_to_image(x):
x = x.reshape((32,32,3))
m,M = x.min(), x.max()
x = (x - m) / (M - m)
return Image.fromarray(np.uint8(x*255))
def preprocess(data_path, dataset, ZCA_=False):
""" If the dataset does not exist, download it and create a dataset.
Args:
data_path (str): root directory of dataset.
dataset (str): name of dataset.
"""
il_data_path = os.path.join(data_path, 'zca_' + dataset)
train_path = os.path.join(il_data_path, 'train')
val_path = os.path.join(il_data_path, 'val')
if os.path.isdir(il_data_path):
return
os.makedirs(train_path)
os.makedirs(val_path)
train_set = _datasets[dataset](data_path, train=True, download=True)
val_set = _datasets[dataset](data_path, train=False, download=True)
images = {}
labels = {}
if ZCA_ == True:
for tag, cur_set, cur_path in [['train', train_set, train_path], ['test', val_set, val_path]]:
for idx, item in enumerate(cur_set):
images.setdefault(tag,[])
images[tag].append(np.asarray(item[0],dtype='float32').reshape(-1,3,32,32) / np.float32(255))
labels.setdefault(tag,[])
labels[tag].append(np.asarray(item[1],dtype='int32'))
images[tag] = np.concatenate(images[tag])
labels[tag] = np.asarray(labels[tag])
#import pdb; pdb.set_trace()
whitener = ZCA(x=images['train'])
#import sys; sys.exit()
for tag, cur_path in [['train', train_path],['test', val_path]]:
###images[tag] = whitener.apply(images[tag])
# Pad according to the amount of jitter we plan to have.
for idx, (img, label) in enumerate(zip(images[tag], labels[tag])):
img = zca_to_image(img)
item = (img, label)
if not os.path.exists(os.path.join(cur_path, str(label))):
os.makedirs(os.path.join(cur_path, str(label)))
with open(os.path.join(cur_path, str(label), str(idx) + '.p'), 'wb') as f:
pickle.dump(item, f)
# dump pickles for each class
else:
for cur_set, cur_path in [[train_set, train_path], [val_set, val_path]]:
for idx, item in enumerate(cur_set):
label = item[1]
if not os.path.exists(os.path.join(cur_path, str(label))):
os.makedirs(os.path.join(cur_path, str(label)))
with open(os.path.join(cur_path, str(label), str(idx) + '.p'), 'wb') as f:
pickle.dump(item, f)
class Taskset(data.Dataset):
def __init__(self, root, train=True, transform=None, num_labels=4000, num_classes=10):
"""
Args:
root (str): root directory of dataset prepared for incremental learning (by preper_for_IL)
task (list): list of classes that are assigned for the task
task_idx (int): index of the task, ex) 2nd task among total 10 tasks
train (bool): whether it is for train or not
transform (callable) : transforms for dataset
target_transform (callable) : transforms for target
"""
if train:
self.root = os.path.expanduser(root) + '/train'
else:
self.root = self.root = os.path.expanduser(root) + '/val'
if not os.path.isdir(self.root):
print('Exception: there is no such directory : {}'.format(self.root))
self.train = train # training set or test set
self.num_labels = num_labels
self.transform = transform
self.targets = []
self.filenames = []
self.data = []
self.un_data = []
if self.train:
for cls in os.listdir(self.root):
chk = 0
file_path = self.root + '/' + str(cls)
files = os.listdir(file_path)
random.shuffle(files)
#for file in os.listdir(file_path):
for file in files:
with open(file_path + '/' + file, 'rb') as f:
if chk < int(self.num_labels/num_classes):
entry = pickle.load(f)
self.data.append(entry[0])
self.targets.append(entry[1])
self.filenames.append(file)
else :
entry = pickle.load(f)
self.un_data.append(entry[0])
self.filenames.append(file)
chk += 1
else:
for cls in os.listdir(self.root):
file_path = self.root + '/' + str(cls)
for file in os.listdir(file_path):
with open(file_path + '/' + file, 'rb') as f:
entry = pickle.load(f)
self.data.append(entry[0])
self.targets.append(entry[1])
self.filenames.append(file)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target, soft_label) where target is index of the target class.
"""
idx = index
if self.train:
if idx < self.num_labels:
return self.transform(self.data[idx]), self.targets[idx], idx
else:
return self.transform(self.un_data[idx-self.num_labels]), -1, idx
else:
img, target = self.data[idx], int(self.targets[idx])
img = self.transform(img)
return img, target, idx
def __len__(self):
return len(self.data) + len(self.un_data)
if __name__ == "__main__":
import sys
sys.path.append(os.getcwd())
from config import config
for dataset in _datasets:
preprocess(config['data_path'], dataset)
|
[
"unlimitlife@naver.com"
] |
unlimitlife@naver.com
|
2e90384cfa5f35cd00ef16bf0072d0a2d0e3fd30
|
5e14dbed4139925544a24cdf299d3db73c31f960
|
/books/migrations/0001_initial.py
|
f3f3daf19c9684d6e303193e0f81f26b5e342feb
|
[] |
no_license
|
JJuniJJuni/book_management
|
aa0867328f81e4641d5266b0cf2af454ca101962
|
7068ab109c3477740fb3e1b7fadbdc8d79494272
|
refs/heads/master
| 2022-12-16T14:35:52.375170
| 2019-09-16T14:28:01
| 2019-09-16T14:28:01
| 139,111,221
| 0
| 0
| null | 2021-06-10T21:08:14
| 2018-06-29T06:40:32
|
Tcl
|
UTF-8
|
Python
| false
| false
| 1,939
|
py
|
# Generated by Django 2.0.6 on 2018-06-11 05:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('name', models.CharField(max_length=45)),
('isbn10', models.CharField(max_length=10)),
('isbn13', models.CharField(max_length=13)),
('book_type', models.CharField(max_length=45)),
('publisher', models.CharField(max_length=45)),
('author', models.CharField(max_length=45)),
],
),
migrations.CreateModel(
name='BookInfo',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('publication_at', models.DateField(blank=True,
null=True)),
('registration_at', models.DateTimeField(auto_now_add=True)),
('book_writing', models.TextField(blank=True)),
('book_contents', models.TextField(blank=True)),
('star_point', models.PositiveSmallIntegerField(blank=True,
null=True)),
('author_writing', models.TextField(blank=True)),
('book', models.OneToOneField(on_delete=True,
to='books.Book')),
],
),
]
|
[
"gud305@gmail.com"
] |
gud305@gmail.com
|
921fdfb2ed89dff1c23de5a2ea5c4ddd22590652
|
b88b6fd3e7965b1df338589f4d7d1db205eb649a
|
/kindadmin/pager.py
|
caaac006cd1b8a279e1bcd4e30b88f1a73e9df4f
|
[] |
no_license
|
xieyousheng/Kindadmin
|
b47747b0bf65da576f3e7c122285d313be45a3eb
|
fe65727ec290260a2b20a6972f7b660fcd564d5f
|
refs/heads/master
| 2020-11-28T20:58:49.244902
| 2019-12-24T14:41:28
| 2019-12-24T14:41:28
| 229,918,477
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,127
|
py
|
class Pagination:
def __init__(self,items,getargs=None,url='',pageItemLs=1,maxPageNum=11):
'''
:param items: 数据库查询的数据
:param currenPageNum: 当前页码 --- curren_page_num
:param pageItemLs: 一页显示多少条数据--- page_item_list
:param maxPageNum: 页面最多显示多少页码---max_page_num
:param url: 在哪个页面进行分页 --- url
:param getargs: 保留url带有get参数 --- url
'''
self.url = url
self.items = items
self.page_items_max = items.count()
self.page_item_list = pageItemLs
self.curren_page_num = None
#如果最多显示页码大于总页数,那就把总页数赋值给最多显示页码
self.max_page_num = maxPageNum if self.total_page_num > maxPageNum else self.total_page_num
'''
如果传入的当前页码不是整数,当前页码就直接赋值为1,
如果传入的当前页码小于等于0 ,当前页码就直接赋值为1,
如果传入的当前页码大于数据能分出来的最大页码数,就把当前页码赋值为最大页码数
否则就把传入的当前页码赋值给当前页码
'''
self.get_args(getargs)
try:
v = int(self.curren_page_num)
if v <= 0 :
self.curren_page_num = 1
elif v > self.total_page_num:
self.curren_page_num = self.total_page_num
else:
self.curren_page_num = v
except Exception as e:
self.curren_page_num = 1
def get_args(self,getargs):
result = ''
for k,v in getargs.items():
if k != 'p':
if v:
result += '&%s=%s' % (k,v)
else:
self.curren_page_num = v
self.url_args = result
def get_item(self):
'''
根据分页生成数据返回
:return:
'''
return self.items[self.start:self.end]
@property
def total_page_num(self):
'''
计算总页数
:return:
'''
total,b = divmod(self.page_items_max,self.page_item_list)
total = total + 1 if b != 0 else total
return total
@property
def start(self):
'''计算数据切片的起始切片位置'''
return ( self.curren_page_num -1 ) * self.page_item_list
@property
def end(self):
'''计算数据切片的结束切片位置'''
return self.curren_page_num * self.page_item_list
def pagenum_range(self):
'''
动态生成页码
:return:
'''
#以显示页码数量的一半为临界点
page = self.max_page_num // 2
if self.curren_page_num <= page:
#如果当前页码小于临界点,页码的显示就是 1 - 最大能分出的页码数
return range(1,self.max_page_num+1)
#如果(当前页 + page) 要大于 总页码数量, 页码数就显示 (总页数 - 一页最多显示页码数 +1) - (总页数 + 1)
if (self.curren_page_num + page) > self.total_page_num :
return range(self.total_page_num - self.max_page_num + 1 ,self.total_page_num +1 )
# 页码数就显示 (当前页码数 - page) - (当前页 + page + 1)
return range(self.curren_page_num - page,self.curren_page_num + page + 1)
def item_list(self,type='http'):
'''
返回HTML代码
:return:
'''
if self.page_items_max:
item = ['<nav aria-label="..." ><ul class="pagination">',]
if type == 'http':
item.append( '<li><a href="%s?p=1%s">首页</a></li>' % (self.url,self.url_args))
if self.curren_page_num == 1:
item.append('<li class="disabled"><a>上一页</a></li>')
else:
item.append('<li><a href="%s?p=%s%s">上一页</a></li>' % (self.url, self.curren_page_num - 1,self.url_args))
for i in self.pagenum_range():
if i == self.curren_page_num:
item.append('<li class="active"><a href="%s?p=%s%s">%s</a></li>' % (self.url, i,self.url_args, i))
else:
item.append('<li><a href="%s?p=%s%s">%s</a></li>' % (self.url, i,self.url_args, i))
if self.curren_page_num == self.total_page_num:
item.append('<li class="disabled"><a>下一页</a></li>')
else:
item.append('<li><a href="%s?p=%s%s">下一页</a></li>' % (self.url, self.curren_page_num + 1,self.url_args))
item.append('<li><a href="%s?p=%s%s">尾页</a></li>' % (self.url, self.total_page_num,self.url_args))
elif type == 'ajax':
item.append('<li><a pager=1>首页</a></li>')
if self.curren_page_num == 1:
item.append('<li class="disabled"><a>上一页</a></li>')
else:
item.append('<li><a pager=%s>上一页</a></li>' % (self.curren_page_num - 1))
for i in self.pagenum_range():
if i == self.curren_page_num:
item.append('<li class="active"><a pager=%s>%s</a></li>' % ( i, i))
else:
item.append('<li><a pager=%s>%s</a></li>' % ( i, i))
if self.curren_page_num == self.total_page_num:
item.append('<li class="disabled"><a>下一页</a></li>')
else:
item.append('<li><a pager=%s>下一页</a></li>' % (self.curren_page_num + 1))
item.append('<li><a pager=%s>尾页</a></li>' % (self.total_page_num))
item.append(' </ul></nav>')
return ''.join(item)
else:
return ''
|
[
"noreply@github.com"
] |
xieyousheng.noreply@github.com
|
c1b97a227b543d07d9d07e860a85c5260abd350c
|
9db46d4400247152a3e203f3ea911fb380987cc8
|
/waits.py
|
19c6292b19d6b3afbf5924096c6148350590199b
|
[] |
no_license
|
kren1504/Python_Selenium_DDT_project
|
354f778f27e19d296309c20c350369e55b487cfa
|
693ec05c5525767e6b0e65feada44a2e8ca49d1c
|
refs/heads/main
| 2023-05-11T15:15:01.196162
| 2021-06-07T02:20:40
| 2021-06-07T02:20:40
| 350,905,512
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
import unittest
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class NavigationTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path=r'C:\Users\kren1\Documents\curso\Python_con_Selenium\chromedriver.exe')
driver =self.driver
driver.implicitly_wait(10)
driver.maximize_window()
driver.get('http://demo-store.seleniumacademy.com/')
def test_browse_navigation(self):
WebDriverWait(self.driver, 10).until(lambda s: s.find_element_by_id('select-language').get_attribute('length') == '3')
account =WebDriverWait(self.driver,10).until(EC.visibility_of_element_located((By.LINK_TEXT,'ACCOUNT')))
account.click()
def test_create_new_customer(self):
self.driver.find_element_by_link_text('ACCOUNT').click()
my_account = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located((By.LINK_TEXT, "My Account")))
my_account.click()
create_account_button = WebDriverWait(self.driver, 20).until(EC.element_to_be_clickable((By.LINK_TEXT,'CREATE AN ACCOUNT')))
create_account_button.click()
WebDriverWait(self.driver, 10).until(EC.title_contains('Create New Customer Account'))
def tearDown(self):
self.driver.close()
if __name__ =="__main__":
unittest.main(verbosity=2)
|
[
"kren1504@hotmail.com"
] |
kren1504@hotmail.com
|
ef1344159a67b2724a5f45c55b235d2af12ec257
|
b53b1f4d3741570c8ad42c1bedd2f3598980730b
|
/exercise7.py
|
b44bd190ca11d3596d7d65328f6f63733481d690
|
[
"MIT"
] |
permissive
|
nikhadif/Advanced-Programming
|
575912aee51fb59359cfd63b329cf43b10cd2767
|
e5bf4f5014c17c252bc0cb93c9d44c7c615b79c0
|
refs/heads/main
| 2023-01-29T17:57:38.117060
| 2020-11-21T21:22:35
| 2020-11-21T21:22:35
| 314,827,244
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 87
|
py
|
data_file = open("example.txt", 'r')
for line_str in data_file:
print(line_str)
|
[
"hadif.nik@gmail.com"
] |
hadif.nik@gmail.com
|
5ba1360e974bb9d3dcb3cbff20b1de8ea546185d
|
93c94374aa97e7cfac87506270f1064bc406339c
|
/Webdriver Commands.py
|
586617f66f6ece1942df88538552663c4da411bf
|
[] |
no_license
|
Ravjot03/Selenium-with-Python
|
66af3f3c05838c03551501e6873acd87efe05b30
|
9b2ed606252f60385f5478a89e2d6ddce9843eec
|
refs/heads/master
| 2022-04-18T11:44:28.344079
| 2020-04-20T19:06:06
| 2020-04-20T19:06:06
| 257,086,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
from selenium import webdriver
driver= webdriver.Chrome("D:\Chrome Driver\chromedriver.exe")
driver.get("https://www.makemytrip.com/")
# Returns the title of the page
print(driver.title)
# Returns the URL of the page
print(driver.current_url)
|
[
"noreply@github.com"
] |
Ravjot03.noreply@github.com
|
4727a1221f0dc2188075fd7838b1d1f0dc1f9c19
|
a76803a441b76595372329a54e84e2b2d8fd5c6b
|
/typesan/__init__.py
|
6dfd5f3f0e862b059bb8c896eb3e868b3ffcd09c
|
[] |
no_license
|
securesystemslab/sanitizing-for-security-benchmarks
|
ac66b1e6bd67a954a88e48751df8ea98b2b400b9
|
c2bf9d922ec8564208a7f926dce56e3a2dfc8355
|
refs/heads/master
| 2020-03-19T07:34:23.463598
| 2018-06-05T06:04:35
| 2018-10-04T04:28:34
| 136,126,254
| 20
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22
|
py
|
from typesan import *
|
[
"dokyung.song@gmail.com"
] |
dokyung.song@gmail.com
|
ec7f116ef7b00dc8259fa4a5002bc9309c32e10b
|
f101fe75892da8d7b5258d22bd31534d47f39ec1
|
/ConvertNpzToMat.py
|
e137f11a7b1d3f999076ff7217fb1d4f89bf77eb
|
[] |
no_license
|
xianjunxia/Acoustic-event-detection-with-feature-space-attention-based-convolution-recurrent-neural-network
|
2ae9d4d0148f5082cc6739f753bf750e1940ecfb
|
d2a7b36700e798e0da02d3efebb27cd340235f36
|
refs/heads/master
| 2020-03-22T17:11:53.028900
| 2018-07-10T05:15:32
| 2018-07-10T05:15:32
| 140,379,734
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
import numpy as np
import scipy.io as sio
feat_file_fold = '/data/users/21799506/Data/PRL2018/Evaluation/feat/' + 'mbe_mon_fold0' + '.npz'
dmp = np.load(feat_file_fold)
_X_train, _Y_train, _X_test, _Y_test = dmp['arr_0'], dmp['arr_1'], dmp['arr_2'], dmp['arr_3']
name = '/data/users/21799506/Data/PRL2018/Evaluation/Test.mat'
sio.savemat(name,{'testX':_X_test,'testY':_Y_test})
|
[
"noreply@github.com"
] |
xianjunxia.noreply@github.com
|
324846bdc1a33530a491dd793d925c60de0513e6
|
90f7f011a3071a5116e5eca293a892b87ca7f9cd
|
/rich/_log_render.py
|
b5fb6c26a4f2cbbe10536b126c3057de4b2994b8
|
[
"MIT"
] |
permissive
|
gkando/rich
|
862d53b410d4603362de0e8cc7fa88716f2f4be1
|
9e6ac1e0ee6dc8938daab5f86028aa52ecd6ed05
|
refs/heads/master
| 2022-11-15T04:28:12.928279
| 2020-07-02T19:00:33
| 2020-07-02T19:00:33
| 276,956,105
| 1
| 0
|
MIT
| 2020-07-03T17:40:32
| 2020-07-03T17:40:32
| null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
from datetime import datetime
from typing import Iterable, List, Optional, TYPE_CHECKING, Union
from .text import Text, TextType
if TYPE_CHECKING:
from .console import Console, ConsoleRenderable, RenderableType
from .table import Table
class LogRender:
def __init__(
self,
show_time: bool = True,
show_level: bool = False,
show_path: bool = True,
time_format: str = "[%x %X]",
) -> None:
self.show_time = show_time
self.show_level = show_level
self.show_path = show_path
self.time_format = time_format
self._last_time: Optional[str] = None
def __call__(
self,
console: "Console",
renderables: Iterable["ConsoleRenderable"],
log_time: datetime = None,
time_format: str = None,
level: TextType = "",
path: str = None,
line_no: int = None,
link_path: str = None,
) -> "Table":
from .containers import Renderables
from .table import Table
output = Table.grid(padding=(0, 1))
output.expand = True
if self.show_time:
output.add_column(style="log.time")
if self.show_level:
output.add_column(style="log.level", width=8)
output.add_column(ratio=1, style="log.message")
if self.show_path and path:
output.add_column(style="log.path")
row: List["RenderableType"] = []
if self.show_time:
if log_time is None:
log_time = datetime.now()
log_time_display = log_time.strftime(time_format or self.time_format)
if log_time_display == self._last_time:
row.append(Text(" " * len(log_time_display)))
else:
row.append(Text(log_time_display))
self._last_time = log_time_display
if self.show_level:
row.append(level)
row.append(Renderables(renderables))
if self.show_path and path:
path_text = Text()
path_text.append(
path, style=f"link file://{link_path}" if link_path else ""
)
if line_no:
path_text.append(f":{line_no}")
row.append(path_text)
output.add_row(*row)
return output
|
[
"willmcgugan@gmail.com"
] |
willmcgugan@gmail.com
|
3a0e5f00f550ad757fd4e79463d9e1b6f71dba5e
|
98a435a3d25cc14c6ca807d38012e42f32e3b073
|
/PythonFile/TestStatus.py
|
2b380be2669b43f8abbce42171714b1f14412f54
|
[] |
no_license
|
congnguyen53/FantasticFour
|
9b0780e284638e1774b9adec3d9131e428e508e3
|
8576d30e53a97695e1c18fcea16ea595b977f8d8
|
refs/heads/master
| 2021-03-05T15:19:00.762662
| 2020-04-18T02:56:46
| 2020-04-18T02:56:46
| 246,130,223
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,319
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'TestStatus.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Test(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 500)
MainWindow.setMinimumSize(QtCore.QSize(400, 500))
MainWindow.setMaximumSize(QtCore.QSize(400, 500))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(50, 180, 161, 51))
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(50, 80, 151, 51))
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(50, 280, 171, 51))
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.in_patid = QtWidgets.QTextEdit(self.centralwidget)
self.in_patid.setGeometry(QtCore.QRect(220, 90, 161, 41))
font = QtGui.QFont()
font.setPointSize(14)
self.in_patid.setFont(font)
self.in_patid.setObjectName("in_patid")
self.in_teststatus = QtWidgets.QTextEdit(self.centralwidget)
self.in_teststatus.setGeometry(QtCore.QRect(220, 190, 161, 41))
font = QtGui.QFont()
font.setPointSize(14)
self.in_teststatus.setFont(font)
self.in_teststatus.setObjectName("in_teststatus")
self.in_dateorder = QtWidgets.QTextEdit(self.centralwidget)
self.in_dateorder.setGeometry(QtCore.QRect(220, 290, 161, 41))
font = QtGui.QFont()
font.setPointSize(14)
self.in_dateorder.setFont(font)
self.in_dateorder.setObjectName("in_dateorder")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 400, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Test Status:"))
self.label_2.setText(_translate("MainWindow", "Patient ID:"))
self.label_3.setText(_translate("MainWindow", "Date Order:"))
self.in_patid.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:14pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">jdoe01</p></body></html>"))
self.in_teststatus.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:14pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">In Progress</p></body></html>"))
self.in_dateorder.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:14pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">2020-01-01</p></body></html>"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_Test()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
congnguyen53.noreply@github.com
|
277ead3ec5a5cbba60b9ea0253dfa9ef4caa72ba
|
b44d20c67f9acd5842a1b33245829df2c56477e9
|
/lesson07/vector.py
|
8eae1f01d2697bcf52e9ebbeab9fb907a0eec7d2
|
[] |
no_license
|
yxdragon/python_study
|
c6f344b2ab0776e97a4527a21b7b78a39dabf7e0
|
0fdea17598c8d1338919aa66e61a6a0f507bdc83
|
refs/heads/master
| 2021-09-03T20:12:36.808985
| 2018-01-11T17:09:13
| 2018-01-11T17:09:13
| 113,181,903
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 654
|
py
|
import numpy as np
import matplotlib.pyplot as plt
def vectordot():
v1 = np.array([3,0])
v2 = np.array([2,3])
print(np.dot(v1, v2))
def vectorcross():
v1 = np.array([3,0])
v2 = np.array([2,3])
print(np.cross(v2, v1))
def triarea(p1, p2, p3):
v1 = np.array(p2)-np.array(p1)
v2 = np.array(p3)-np.array(p1)
print(np.abs(np.cross(v1,v2))/2)
def polyarea(xys):
print(np.abs(np.cross(xys[:-1], xys[1:]).sum())/2)
#triarea((0,0),(5,0),(0,-2),(0,0))
#xys = np.array([(0,0),(5,0),(0,-2),(0,0)])
#polyarea(xys)
a = np.linspace(0, np.pi*2, 10000)
xs = np.cos(a) * 1
ys = np.sin(a) * 1
polyarea(np.array([xs, ys]).T)
|
[
"imagepy@sina.com"
] |
imagepy@sina.com
|
b58ca56d1b2430ff3129ed9bdc14dbf57d872bc1
|
11a57d6fb851762ef22248f4c0a83b2bf84734fa
|
/node_modules/app.js/node_modules/jsdom/node_modules/contextify/build/config.gypi
|
ae4f56bb1cc85e5f4c33269039cbfe72a4df7202
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
diennguyen90/MongodbBlog
|
094ebef2eeb893c12219dfd97fa2d3f19d1081b8
|
e6b4c92871786ba8e119b8ebe3b593d58d162ccb
|
refs/heads/master
| 2020-05-21T13:16:12.473729
| 2015-04-07T10:20:10
| 2015-04-07T10:20:10
| 33,200,089
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,200
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/diennguyen/.node-gyp/0.10.35",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"prefeix": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"email": "",
"init_license": "ISC",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/diennguyen/.npm-init.js",
"userconfig": "/Users/diennguyen/.npmrc",
"node_version": "0.10.35",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/diennguyen/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/1.4.28 node/v0.10.35 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/g3/cx1qxkrs4_51vpltyj_5psx80000gn/T",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"diennguyen90@yahoo.com"
] |
diennguyen90@yahoo.com
|
5557027ea330017c972d13f21cffde827f4eb448
|
2825bf6479e08dfead428ff9f29f28d5c23d953e
|
/24_2/24_11.py
|
304ba9bae57ecb98b831ca046b177afe5ea24486
|
[] |
no_license
|
zedaster/ImaevIntensive
|
bc459187dace7946d8ad75a04e058748134aeac4
|
b91760fa23f25ce2d19778781f35416c177ab881
|
refs/heads/main
| 2023-06-22T00:24:47.039208
| 2021-07-20T10:40:54
| 2021-07-20T10:40:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
file = open('24-153.txt')
line = file.readline().replace('\n', '')
data = {}
for i in range(2, len(line)):
if line[i-2] == line[i-1]:
symb = line[i]
if symb not in data:
data[symb] = 1
else:
data[symb] += 1
print(data)
|
[
"serzh.kazantseff@gmail.com"
] |
serzh.kazantseff@gmail.com
|
6158c49a8c266213f56fbcfaba232e8b7c064876
|
e386b999ffb35c267ce237faed9d9a5a444836aa
|
/import requests.py
|
45ac6df220e3e4e9d36eaba748c1dbff5ccf9981
|
[] |
no_license
|
jgotting/test_requests
|
e7946d8741671d092499e164704a9f4d98e99ab5
|
c32c27c89e3356e21e93ff90f4be6aacebd2d942
|
refs/heads/main
| 2023-07-14T20:36:14.516938
| 2021-08-26T12:09:58
| 2021-08-26T12:09:58
| 397,298,573
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
import requests
response = requests.get("https://jsonplaceholder.typicode.com/todos/2")
print(response.status_code)
res = (response.json()) # Transform JSON to a python dictionary
print(res)
print(type(res)) # Check the type
for key in res: # Iterate through the Dict
print(key, " --> ", res[key])
|
[
"johan@gotting.com"
] |
johan@gotting.com
|
2e1f1061f771d1f7f74627ae140395e510728bee
|
6c721f3cfce6dc88396cd3b5f6a59d65a2ea5033
|
/some_learn/Data_Set_handle/Caltech-Dateset/param_copy/copy_param.py
|
3b86ec1b6b3778a1e258d9286b759fc6c22fe8a2
|
[
"MIT"
] |
permissive
|
unicoe/PycharmProjects
|
20a3dabe88c7874da54451c7bb16999afc0eee35
|
23ff314eb5ac9bfa01a8278089d722b5d0061751
|
refs/heads/master
| 2020-03-23T09:16:25.907188
| 2019-12-21T03:10:49
| 2019-12-21T03:10:49
| 141,377,686
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
# -*- coding: utf-8 -*-
# @Time : 19-3-4 下午8:31
# @Author : unicoe
# @Email : unicoe@163.com
# @File : copy_param.py
# @Software: PyCharm
source_ls = []
target_ls = []
for source_i in open("/home/user/PycharmProjects/some_learn/Data_Set_handle/Caltech-Dateset/param_copy/source.txt"):
source_ls.append(source_i.strip("\n"))
for target_i in open("/home/user/PycharmProjects/some_learn/Data_Set_handle/Caltech-Dateset/param_copy/source.txt"):
target_ls.append(target_i.strip("\n"))
print(source_ls)
print(target_ls)
for i in range(len(source_ls)):
print("""if k == '"""+source_ls[i] +"""' :\n\tpretrained_dict1['"""+target_ls[i]+"""'] = v""")
|
[
"unicoe@163.com"
] |
unicoe@163.com
|
10dd7a08250f2c7156f886c74ef886a92bf41c9b
|
5419d8d02a7fdec15f542498840d19af939fa130
|
/LeetCode_June_Challenge/Day_2_Delete_Node_in_a_Linked_List.py
|
06bd7812d8148323ec6c59a2a6fd2cd9fb5c8a74
|
[] |
no_license
|
foolchauhan/DataStructureAndAlgorithms
|
3cf1d4927b6b11592789cb8d1a6800c9de4822e2
|
d53281a724a8b58ccc67ebe8c0e0af6c4ae63c4a
|
refs/heads/master
| 2022-10-10T08:29:14.461248
| 2020-06-10T16:18:36
| 2020-06-10T16:18:36
| 244,270,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
'''
Delete Node in a Linked List
Write a function to delete a node (except the tail) in a singly linked list, given only access to that node.
Given linked list -- head = [4,5,1,9], which looks like following:
Example 1:
Input: head = [4,5,1,9], node = 5
Output: [4,1,9]
Explanation: You are given the second node with value 5, the linked list should become 4 -> 1 -> 9 after calling your function.
Example 2:
Input: head = [4,5,1,9], node = 1
Output: [4,5,9]
Explanation: You are given the third node with value 1, the linked list should become 4 -> 5 -> 9 after calling your function.
Note:
The linked list will have at least two elements.
All of the nodes' values will be unique.
The given node will not be the tail and it will always be a valid node of the linked list.
Do not return anything from your function.
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
|
[
"chauhanchetan82@gmail.com"
] |
chauhanchetan82@gmail.com
|
bcf8cc6ce6fe953c3fafda864ffdde4f0dc32dae
|
3b384fd6e34c8df6514a7c22349cb7ba997b2aa8
|
/prendi.py
|
97fd4762347ef249a0cd611003a1eff9f9aff0b6
|
[] |
no_license
|
raspberryveronica/termostato
|
e428dc819b3ab136a576ee4a61ee8298c64b555d
|
85ca7b330d94a7d28416c653a112fa7e3d2a50b8
|
refs/heads/master
| 2020-03-28T01:23:19.253860
| 2018-09-05T10:35:06
| 2018-09-05T10:35:06
| 146,267,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
#!/usr/bin/env python
import MySQLdb
db = MySQLdb.connect("localhost", "monitor", "password", "temps")
curs=db.cursor()
#curs.execute ("SELECT * FROM tempdat")
curs.execute ("SELECT * FROM tempdat ORDER BY tdate DESC, ttime DESC LIMIT 1")
print "\nDate Time Zone Temperature Termostato"
print "======================================================================================"
for reading in curs.fetchall():
print str(reading[0])+" "+str(reading[1])+" "+\
reading[2]+" "+str(reading[3]+" "+str(reading[4]))
|
[
"vsmacchia@dg.gostec.it"
] |
vsmacchia@dg.gostec.it
|
bd3157a476e2b4f85fd51c0484bf86d06c75c84f
|
0a1ed4a884ebc5789a2e6513eba7ee5f1eeb1f6d
|
/nuxeo-tools-hooks/nxtools/hooks/tests/webhooks/github_handlers/test_push_notify_mail.py
|
bb316a21543c01c967530c4db9c4a0e0f90fcba6
|
[
"Apache-2.0"
] |
permissive
|
pombredanne/nuxeo-tools-hooks
|
0ea4d447d7fa4e61850a35c5d703d9495985cb0f
|
9f96f91a28c1e42da9e4786efbad1c294a3d0c57
|
refs/heads/master
| 2021-01-18T12:48:44.534835
| 2016-07-21T14:05:33
| 2016-07-21T14:05:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,633
|
py
|
# -*- coding: utf-8 -*-
"""
(C) Copyright 2016 Nuxeo SA (http://nuxeo.com/) and contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
Pierre-Gildas MILLON <pgmillon@nuxeo.com>
"""
from multiprocessing import Process
from mock.mock import Mock, patch
from nxtools import services
from nxtools.hooks.entities.github_entities import PushEvent
from nxtools.hooks.entities.github_entities import RepositoryWrapper
from nxtools.hooks.services.config import Config
from nxtools.hooks.services.mail import EmailService
from nxtools.hooks.tests.webhooks.github_handlers import GithubHookHandlerTest
from nxtools.hooks.endpoints.webhook.github_handlers.push_notify_mail import GithubPushNotifyMailHandler
class MockedProcess(Process):
def start(self):
self.run()
class GithubNotifyMailHandlerTest(GithubHookHandlerTest):
def setUp(self):
super(GithubNotifyMailHandlerTest, self).setUp()
patcher = patch("nxtools.hooks.services.mail.EmailService.sendemail", Mock())
patcher.start()
self.addCleanup(patcher.stop)
patcher2 = patch("nxtools.hooks.endpoints.webhook.github_handlers.push_notify_mail.Process", MockedProcess)
patcher2.start()
self.addCleanup(patcher2.stop)
def get_event_from_body(self, body):
"""
:rtype: nxtools.hooks.entities.github_entities.PushEvent
"""
return PushEvent(None, None, body, True)
@property
def handler(self):
"""
:rtype: nxtools.hooks.endpoints.webhook.github_handlers.push_notify_mail.GithubPushNotifyMailHandler
"""
return services.get(GithubPushNotifyMailHandler)
@property
def email_service(self):
"""
:rtype: nxtools.hooks.services.mail.EmailService
"""
return services.get(EmailService)
@property
def config(self):
"""
:rtype: nxtools.hooks.services.config.Config
"""
return services.get(Config)
def test_bad_branch_payload(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.assertTrue(body["ref"])
body["ref"] = "refs/wrong/anything"
self.assertTrue(self.handler.is_bad_ref(self.get_event_from_body(body)))
self.assertTupleEqual((400, GithubPushNotifyMailHandler.MSG_BAD_REF % body["ref"]),
self.handler.handle(body))
self.email_service.sendemail.assert_not_called()
def test_ignored_stable_branch_payload(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.config._config.set(self.handler.config_section, "ignored_branches", "stable")
self.config._config.set(self.handler.config_section, "ignore_checks",
"nxtools.hooks.endpoints.webhook.github_handlers.push_notify_mail."
"branch_ignore")
self.assertTrue(body["ref"])
body["ref"] = "refs/heads/stable"
event = self.get_event_from_body(body)
branch = event.ref[11:]
self.assertTupleEqual((False, True, None), self.handler.check_branch_ignored(event))
self.assertTupleEqual((200, GithubPushNotifyMailHandler.MSG_OK), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
self.assertFalse(self.handler.is_jenkins(event))
email = self.handler.get_commit_email(event, event.commits[0], True)
self.assertRegexpMatches(email.body, 'WARNING: only Jenkins should commit on this branch')
self.assertRegexpMatches(email.body, 'Branch: ' + branch)
self.assertRegexpMatches(email.subject, '^\[WARN\] %s: %s \(branch@%s\)$' % (
event.repository.name,
event.commits[0].message,
branch))
def test_ignored_snapshot_branch_payload(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.config._config.set(self.handler.config_section, "ignored_branch_suffixes", "-SNAPSHOT")
self.config._config.set(self.handler.config_section, "ignore_checks",
"nxtools.hooks.endpoints.webhook.github_handlers.push_notify_mail."
"suffix_ignore")
self.assertTrue(body["ref"])
body["ref"] = "refs/heads/5.7-SNAPSHOT"
event = self.get_event_from_body(body)
branch = event.ref[11:]
self.assertTupleEqual((False, True, None), self.handler.check_branch_ignored(event))
self.assertTupleEqual((200, GithubPushNotifyMailHandler.MSG_OK), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
self.assertFalse(self.handler.is_jenkins(event))
email = self.handler.get_commit_email(event, event.commits[0], True)
self.assertRegexpMatches(email.body, 'WARNING: only Jenkins should commit on this branch')
self.assertRegexpMatches(email.body, 'Branch: ' + branch)
self.assertRegexpMatches(email.subject, '^\[WARN\] %s: %s \(branch@%s\)$' % (
event.repository.name,
event.commits[0].message,
branch))
def test_jenkins_ignored_payload(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.config._config.set(self.handler.config_section, "ignored_branches", "stable")
self.assertTrue(body["ref"])
self.assertTrue(body["pusher"])
body["ref"] = "refs/heads/stable"
body["pusher"] = {
"name": self.handler.jenkins_username,
"email": self.handler.jenkins_email,
}
event = self.get_event_from_body(body)
response = GithubPushNotifyMailHandler.MSG_IGNORE_BRANCH % event.ref[11:]
self.assertTupleEqual((True, False, response), self.handler.check_branch_ignored(event))
self.assertTupleEqual((200, response), self.handler.handle(body))
self.email_service.sendemail.assert_not_called()
self.assertTrue(self.handler.is_jenkins(event))
def test_jenkins_payload(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.assertTrue(body["pusher"])
body["pusher"] = {
"name": self.handler.jenkins_username,
"email": self.handler.jenkins_email,
}
event = self.get_event_from_body(body)
branch = event.ref[11:]
self.assertTupleEqual((False, False, None), self.handler.check_branch_ignored(event))
self.assertTupleEqual((200, GithubPushNotifyMailHandler.MSG_OK), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
email = self.handler.get_commit_email(event, event.commits[0], False)
self.assertEqual(email.sender, "Pierre-Gildas MILLON via Jenkins <%s>" % self.handler.sender)
self.assertEqual(email.reply_to, "Pierre-Gildas MILLON via Jenkins <pgmillon@nuxeo.com>")
self.assertRegexpMatches(email.body, 'Branch: ' + branch)
self.assertRegexpMatches(email.body, 'Author: Pierre-Gildas MILLON via Jenkins <pgmillon@nuxeo.com>')
self.assertRegexpMatches(email.body, 'Pusher: %s <%s>' % (
self.handler.jenkins_username,
self.handler.jenkins_email
))
self.assertRegexpMatches(email.subject, '^%s: %s \(branch@%s\)$' % (
event.repository.name,
event.commits[0].message,
branch))
def test_jenkins_payload_via_jenkins(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.assertTrue(body["pusher"])
body["pusher"] = {
"name": self.handler.jenkins_username,
"email": self.handler.jenkins_email,
}
self.assertTrue(body["commits"][0]["author"])
self.assertTrue(body["commits"][0]["committer"])
body["commits"][0]["author"] = {
"name": self.handler.jenkins_name,
"email": self.handler.jenkins_email,
"username": self.handler.jenkins_username
}
body["commits"][0]["committer"] = body["commits"][0]["author"]
event = self.get_event_from_body(body)
self.assertTupleEqual((False, False, None), self.handler.check_branch_ignored(event))
self.assertTupleEqual((200, GithubPushNotifyMailHandler.MSG_OK), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
email = self.handler.get_commit_email(event, event.commits[0], False)
self.assertEqual(email.sender, "%s <%s>" % (self.handler.jenkins_name, self.handler.sender))
self.assertEqual(email.reply_to, "%s <%s>" % (self.handler.jenkins_name, self.handler.jenkins_email))
self.assertRegexpMatches(email.body, 'Branch: ' + event.ref[11:])
self.assertRegexpMatches(email.body, 'Author: Jenkins Nuxeo <jenkins@nuxeo.com>')
def test_payload_with_accents(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.assertTrue(body["commits"][0])
body["commits"][0]["message"] += u" héhé"
body["commits"][0]["committer"]["name"] += u" héhé"
event = PushEvent(None, None, body, True)
self.assertFalse(self.handler.is_bad_ref(event))
self.assertTupleEqual((False, False, None), self.handler.check_branch_ignored(event))
self.assertTupleEqual((200, GithubPushNotifyMailHandler.MSG_OK), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
email = self.handler.get_commit_email(event, event.commits[0], False)
self.assertEqual(email.sender, "Pierre-Gildas MILLON hehe <noreply@nuxeo.com>")
self.assertEqual(email.reply_to, "Pierre-Gildas MILLON hehe <pgmillon@nuxeo.com>")
self.assertEqual(email.subject, "%s: %s (branch@%s)" % (
event.repository.name,
"NXBT-1074: better comments hehe",
event.ref[11:]))
def test_private_repository(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.assertTrue(body["repository"])
body["repository"]["private"] = True
event = PushEvent(None, None, body, True)
email = self.handler.get_commit_email(event, event.commits[0], False)
self.assertEqual(email.to, "interne-checkins@lists.nuxeo.com")
def test_diff_retriever(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
event = PushEvent(None, None, body, True)
self.mocks.requester.requestJsonAndCheck.side_effect = Exception
self.assertTupleEqual((200, GithubPushNotifyMailHandler.MSG_OK), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
email = self.handler.get_commit_email(event, event.commits[0], False)
self.assertRegexpMatches(email.body, 'Could not read diff - see %s.diff for raw diff' %
event.commits[0].url)
def test_jira_regexp(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.assertTrue(body["commits"][0])
body["commits"][0]["message"] = "check regexp for NXP-8238 and nxp-666 and also NXS-1234 as well as " \
"NXCONNECT-1234"
event = PushEvent(None, None, body, True)
self.assertTupleEqual((False, False, None), self.handler.check_branch_ignored(event))
self.assertTupleEqual((200, GithubPushNotifyMailHandler.MSG_OK), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
email = self.handler.get_commit_email(event, event.commits[0], False)
self.assertRegexpMatches(email.body, 'JIRA: https://jira.nuxeo.com/browse/NXP-8238')
self.assertRegexpMatches(email.body, 'JIRA: https://jira.nuxeo.com/browse/NXP-666')
self.assertRegexpMatches(email.body, 'JIRA: https://jira.nuxeo.com/browse/NXS-1234')
self.assertRegexpMatches(email.body, 'JIRA: https://jira.nuxeo.com/browse/NXS-1234')
def test_jenkins_payload_with_ignore(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.config._config.set(self.handler.config_section, "ignored_repositories",
"qapriv.nuxeo.org-conf")
self.config._config.set(self.handler.config_section, "ignore_checks",
"nxtools.hooks.endpoints.webhook.github_handlers.push_notify_mail."
"repository_ignore")
event = PushEvent(None, None, body, True)
self.assertTupleEqual((False, False, None), self.handler.check_branch_ignored(event))
self.assertTupleEqual((200, GithubPushNotifyMailHandler.MSG_OK), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
self.assertTrue(body["pusher"])
self.assertTrue(body["commits"][0])
self.assertTrue(body["repository"])
body["commits"].append(body["commits"][0].copy())
body["commits"][0]["message"] = "NXP-8238: updated by SYSTEM."
body["commits"][1]["message"] = "NXP-8238: yo"
body["repository"]["name"] = "qapriv.nuxeo.org-conf"
body["pusher"] = {
"name": self.handler.jenkins_username,
"email": self.handler.jenkins_email,
}
event = PushEvent(None, None, body, True)
self.assertTupleEqual((False, False, None), self.handler.check_branch_ignored(event))
body["commits"][1]["message"] = "NXP-8238: updated by SYSTEM."
event = PushEvent(None, None, body, True)
response = GithubPushNotifyMailHandler.MSG_IGNORE_COMMITS % ", ".join([
event.commits[0].url, event.commits[1].url
])
self.assertTupleEqual((True, False, response), self.handler.check_branch_ignored(event))
self.assertTupleEqual((200, response), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
def test_standard_payload(self):
with GithubHookHandlerTest.payload_file('github_push') as payload:
body = self.get_json_body_from_payload(payload)
self.assertTrue(body["commits"][0])
event = PushEvent(None, None, body, True)
self.assertTupleEqual((False, False, None), self.handler.check_branch_ignored(event))
with open('nxtools/hooks/tests/resources/github_handlers/github_push.commit.diff') as diff_file, \
open('nxtools/hooks/tests/resources/github_handlers/github_push.email.txt') as email_file:
self.mocks.requester.requestJsonAndCheck.return_value = ({}, {'data': diff_file.read()})
self.mocks.repository_url.return_value = event.repository.url
self.assertTupleEqual((200, GithubPushNotifyMailHandler.MSG_OK), self.handler.handle(body))
self.email_service.sendemail.assert_called_once()
email = self.handler.get_commit_email(event, event.commits[0], False)
self.assertEqual(email.sender, "%s <%s>" % (event.commits[0].author.name, self.handler.sender))
self.assertEqual(email.reply_to, "%s <%s>" % (event.commits[0].author.name, event.commits[0].author.email))
self.assertMultiLineEqual(email_file.read(), email.body)
self.assertEqual(email.to, "ecm-checkins@lists.nuxeo.com")
self.mocks.requester.requestJsonAndCheck.assert_called_with("GET", event.repository.url + '/commits/' + event.commits[0].id, None,
RepositoryWrapper.GITHUB_DIFF_ACCEPT_HEADER, None)
|
[
"pgmillon@nuxeo.com"
] |
pgmillon@nuxeo.com
|
072e2c0969f1deda38a2d236caa3579400bd9720
|
32d1578c73b8aaf1fa4efe85e6645bf7fd0c13fc
|
/_WIP/circleDetect.py
|
21af9cac11da35357c5119a8dac91dd7b7ca517f
|
[] |
no_license
|
arelroche/AutoCheckers
|
25a6bd8e290a0d4a56f09692608c879a0c110c65
|
042d428eb8107af7918a6d3fe7a15695c06ebb11
|
refs/heads/master
| 2016-08-12T04:02:06.433157
| 2016-04-07T22:12:00
| 2016-04-07T22:12:00
| 50,674,520
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
import cv2
import numpy as np
#CvCapture * camera = cvCaptureFromCAM(CV_CAP_ANY)
#camera = cv2.CaptureFromCAM(CV_CAP_ANY)
#cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_WIDTH, 1920) # width of viewport of camera
#cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_HEIGHT, 1080) # height of ...
#img = cv2.QueryFrame(camera)
cam = cv2.VideoCapture(0)
ret_val, img = cam.read()
cv2.imshow('WHADDAP', img)
#img = cv2.imread('opencv_logo.png',0)
img = cv2.medianBlur(img,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',cimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"ashisghosh@live.com"
] |
ashisghosh@live.com
|
74798b159143bc4185b94ec639cc3f2139af5665
|
d8c4f1c25cc3d574b730abf11d7276af71378445
|
/main.py
|
cf70cc72c2157591bde6ea955863ac97f669db54
|
[] |
no_license
|
TeppeiIwaoka/myPortfolio
|
72f91127b4925864c1bd9de9ddd286bb89926154
|
4528002974cdb373a99344da1f6fe8971b322e59
|
refs/heads/master
| 2023-03-06T15:57:09.093496
| 2021-02-20T06:43:21
| 2021-02-20T06:43:21
| 340,577,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
from flask import Flask, render_template, redirect, url_for, flash
from flask_bootstrap import Bootstrap
import pandas as pd
from portfolio import Portfolio
app = Flask(__name__)
app.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'
Bootstrap(app)
portfolio_list = []
portfolio_df = pd.read_csv("data.csv")
for index, row in portfolio_df.iterrows():
portfolio_list.append(Portfolio(row["id"], row["name"], row["img_name"], row["detail"]))
@app.route('/')
def get_all_posts():
return render_template("index.html", portfolio_list=portfolio_list)
@app.route('/portfolio/<int:id>', methods=["GET", "POST"])
def detail_portfolio(id):
return render_template("detail.html", portfolio=portfolio_list[id-1])
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
[
"tep731kaizi@gmail.com"
] |
tep731kaizi@gmail.com
|
ba8731a70c64645bc17fa5ba6a5c3c163d06baf6
|
6daa8327721172d133c95535037ba6105265f74a
|
/other chapters/reverselist.py
|
db91792734539abb100a8ee53198062a72d03851
|
[] |
no_license
|
shaheershantk/Anand-Python
|
9f4a9a226caada1e9e180131d19c02f88571b059
|
d522b386fbe3ca5ddef6150f2b67a05f4a8adb79
|
refs/heads/master
| 2019-03-13T20:17:26.750850
| 2014-11-10T15:17:19
| 2014-11-10T15:17:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
a='apple'
if a in 'applearrsnncx':
print True
else:
print False
|
[
"shaheer.shan@gmail.com"
] |
shaheer.shan@gmail.com
|
1d0d9f547edc97c5d5bdfdb642f35d61dfa39938
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/web/v20210115/static_site_user_provided_function_app_for_static_site.py
|
c88e320226209cca78deb66fd5230b802c3d0415
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,672
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['StaticSiteUserProvidedFunctionAppForStaticSiteArgs', 'StaticSiteUserProvidedFunctionAppForStaticSite']
@pulumi.input_type
class StaticSiteUserProvidedFunctionAppForStaticSiteArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
function_app_name: Optional[pulumi.Input[str]] = None,
function_app_region: Optional[pulumi.Input[str]] = None,
function_app_resource_id: Optional[pulumi.Input[str]] = None,
is_forced: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a StaticSiteUserProvidedFunctionAppForStaticSite resource.
:param pulumi.Input[str] name: Name of the static site.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] function_app_name: Name of the function app to register with the static site.
:param pulumi.Input[str] function_app_region: The region of the function app registered with the static site
:param pulumi.Input[str] function_app_resource_id: The resource id of the function app registered with the static site
:param pulumi.Input[bool] is_forced: Specify <code>true</code> to force the update of the auth configuration on the function app even if an AzureStaticWebApps provider is already configured on the function app. The default is <code>false</code>.
:param pulumi.Input[str] kind: Kind of resource.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if function_app_name is not None:
pulumi.set(__self__, "function_app_name", function_app_name)
if function_app_region is not None:
pulumi.set(__self__, "function_app_region", function_app_region)
if function_app_resource_id is not None:
pulumi.set(__self__, "function_app_resource_id", function_app_resource_id)
if is_forced is not None:
pulumi.set(__self__, "is_forced", is_forced)
if kind is not None:
pulumi.set(__self__, "kind", kind)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the static site.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="functionAppName")
def function_app_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the function app to register with the static site.
"""
return pulumi.get(self, "function_app_name")
@function_app_name.setter
def function_app_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "function_app_name", value)
@property
@pulumi.getter(name="functionAppRegion")
def function_app_region(self) -> Optional[pulumi.Input[str]]:
"""
The region of the function app registered with the static site
"""
return pulumi.get(self, "function_app_region")
@function_app_region.setter
def function_app_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "function_app_region", value)
@property
@pulumi.getter(name="functionAppResourceId")
def function_app_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id of the function app registered with the static site
"""
return pulumi.get(self, "function_app_resource_id")
@function_app_resource_id.setter
def function_app_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "function_app_resource_id", value)
@property
@pulumi.getter(name="isForced")
def is_forced(self) -> Optional[pulumi.Input[bool]]:
"""
Specify <code>true</code> to force the update of the auth configuration on the function app even if an AzureStaticWebApps provider is already configured on the function app. The default is <code>false</code>.
"""
return pulumi.get(self, "is_forced")
@is_forced.setter
def is_forced(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_forced", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
class StaticSiteUserProvidedFunctionAppForStaticSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
function_app_name: Optional[pulumi.Input[str]] = None,
function_app_region: Optional[pulumi.Input[str]] = None,
function_app_resource_id: Optional[pulumi.Input[str]] = None,
is_forced: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Static Site User Provided Function App ARM resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] function_app_name: Name of the function app to register with the static site.
:param pulumi.Input[str] function_app_region: The region of the function app registered with the static site
:param pulumi.Input[str] function_app_resource_id: The resource id of the function app registered with the static site
:param pulumi.Input[bool] is_forced: Specify <code>true</code> to force the update of the auth configuration on the function app even if an AzureStaticWebApps provider is already configured on the function app. The default is <code>false</code>.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the static site.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StaticSiteUserProvidedFunctionAppForStaticSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Static Site User Provided Function App ARM resource.
:param str resource_name: The name of the resource.
:param StaticSiteUserProvidedFunctionAppForStaticSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StaticSiteUserProvidedFunctionAppForStaticSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
function_app_name: Optional[pulumi.Input[str]] = None,
function_app_region: Optional[pulumi.Input[str]] = None,
function_app_resource_id: Optional[pulumi.Input[str]] = None,
is_forced: Optional[pulumi.Input[bool]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StaticSiteUserProvidedFunctionAppForStaticSiteArgs.__new__(StaticSiteUserProvidedFunctionAppForStaticSiteArgs)
__props__.__dict__["function_app_name"] = function_app_name
__props__.__dict__["function_app_region"] = function_app_region
__props__.__dict__["function_app_resource_id"] = function_app_resource_id
__props__.__dict__["is_forced"] = is_forced
__props__.__dict__["kind"] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["created_on"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:web:StaticSiteUserProvidedFunctionAppForStaticSite"), pulumi.Alias(type_="azure-native:web/v20201201:StaticSiteUserProvidedFunctionAppForStaticSite"), pulumi.Alias(type_="azure-native:web/v20210101:StaticSiteUserProvidedFunctionAppForStaticSite"), pulumi.Alias(type_="azure-native:web/v20210201:StaticSiteUserProvidedFunctionAppForStaticSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(StaticSiteUserProvidedFunctionAppForStaticSite, __self__).__init__(
'azure-native:web/v20210115:StaticSiteUserProvidedFunctionAppForStaticSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StaticSiteUserProvidedFunctionAppForStaticSite':
"""
Get an existing StaticSiteUserProvidedFunctionAppForStaticSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StaticSiteUserProvidedFunctionAppForStaticSiteArgs.__new__(StaticSiteUserProvidedFunctionAppForStaticSiteArgs)
__props__.__dict__["created_on"] = None
__props__.__dict__["function_app_region"] = None
__props__.__dict__["function_app_resource_id"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return StaticSiteUserProvidedFunctionAppForStaticSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> pulumi.Output[str]:
"""
The date and time on which the function app was registered with the static site.
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter(name="functionAppRegion")
def function_app_region(self) -> pulumi.Output[Optional[str]]:
"""
The region of the function app registered with the static site
"""
return pulumi.get(self, "function_app_region")
@property
@pulumi.getter(name="functionAppResourceId")
def function_app_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource id of the function app registered with the static site
"""
return pulumi.get(self, "function_app_resource_id")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
f36b7b0cab0b8035a993b971e4007ba116125864
|
25418d09c0355c4f4536b87614f09c2d81c14ffc
|
/snake.py
|
c1c5a617b54b24f2936dc16e19c8f879d588a41f
|
[] |
no_license
|
yufangwen/snake
|
b34da9ceb8b94074e9fd44d2b5cb35489474dfb8
|
04ec4ee692f412b836ddc6d789ae63d115ea5534
|
refs/heads/master
| 2021-01-06T20:40:03.755631
| 2017-08-07T05:49:11
| 2017-08-07T05:49:11
| 99,540,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,385
|
py
|
import random
import select
import sys
import termios
import time
import tty
current_milli_time = lambda: int(round(time.time() * 1000))
class Snake:
def __init__(self):
self.body = [(5, 5), (6, 5), (7, 5)] # tail to head
self.direction = 'right'
def setdir(self, direction): # cannot turn back
dirs = ['left', 'up', 'down', 'right']
if (dirs.index(self.direction) + dirs.index(direction)) != 3:
self.direction = direction
def move(self, food):
direction = self.direction
x, y = self.body[-1]
if direction == 'right':
x += 1
elif direction == 'left':
x -= 1
elif direction == 'up':
y -= 1
elif direction == 'down':
y += 1
newhead = (x, y)
self.body.append(newhead)
if newhead != (food.x, food.y): # not eating
self.body.pop(0)
return newhead
class Food:
def __init__(self, xmax, ymax):
self.xmax = xmax
self.ymax = ymax
def feed(self):
self.x = random.randint(0, self.xmax)
self.y = random.randint(0, self.ymax)
class Game:
def __init__(self, height, width):
self.board = (height, width) # height and width
self.snake = Snake()
self.food = Food(width-1, height-1)
self.food.feed()
def render(self):
sys.stdout.write("\x1b[2J\x1b[H")
board = [[' ' for x in range(self.board[1])] for y in range(self.board[0])]
for c in self.snake.body:
board[c[1]][c[0]] = 'x'
board[self.food.y][self.food.x] = '*'
for line in board:
sys.stdout.write(''.join(line) + '<' + '\r\n')
sys.stdout.write('^' * (self.board[1] + 1) + '\r\n')
sys.stdout.flush()
def loop(self, old):
while True:
# t = threading.Timer(1.0, self.move_by_itself, [old])
# t.start()
self.render()
ch = None
rl, _, _ = select.select([sys.stdin], [], [], 0.15)
if rl:
ch = sys.stdin.read(1)
if ch == 'l':
self.snake.setdir('right')
elif ch == 'j':
self.snake.setdir('left')
elif ch == 'k':
self.snake.setdir('down')
elif ch == 'i':
self.snake.setdir('up')
elif ch == 'q':
self.quit(old)
return
else:
self.snake.setdir(self.snake.direction)
newhead = self.snake.move(self.food)
if not self.check(newhead):
self.quit(old)
return
if newhead == (self.food.x, self.food.y): # it is eating
self.food.feed()
def quit(self, oldsetting):
sys.stdout.write('life is too long' + '\r\n')
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldsetting)
sys.stdout.write("\033[?25h")
def check(self, newhead):
x, y = newhead
if 0 <= x < self.board[1] and 0 <= y < self.board[0]:
return True
else:
return False
def main(self):
old = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin)
sys.stdout.write("\033[?25l")
self.loop(old)
if __name__ == '__main__':
g = Game(20, 40)
g.main()
|
[
"yynyygy@gmail.com"
] |
yynyygy@gmail.com
|
34b7687e7ffa5870bb2abf402f4245d6f5a00b60
|
0636aa9b74672a522d56fa7d3985f6958062caa7
|
/Solutions/Euler Prob 9.py
|
4ff18fd8d7f6848257721e0ea04d5440500a0904
|
[] |
no_license
|
LeonidasRex/Project_Euler
|
34ad4bcfc123ae3da21589bb42f18f7c86f9dcf7
|
010faf852db5254661cb16faf0766a8e3b5c93ab
|
refs/heads/master
| 2020-08-06T01:59:26.139374
| 2013-06-05T16:52:05
| 2013-06-05T16:52:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
'''
Project Euler Problem #9:
A Pythagorean triplet is a set of three natural numbers, a b c, for which,
a2 + b2 = c2
For example, 32 + 42 = 9 + 16 = 25 = 52.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
'''
for a in range(1,1001):
for b in range(1,1001):
if a<b:
c=1000-a-b
if a**2+b**2 == c**2:
print("Pythagorean Triplet: ", a,b,c)
print("Product thereof: ", a*b*c)
|
[
"stilessc@gmail.com"
] |
stilessc@gmail.com
|
28d108589b0bccabf7c29553384b607a5d6be272
|
358145111dcc031668e1b3ed9c3b2e0ca874c58a
|
/复习_python/ProxyPool/run.py
|
7120f9ec38c6d1fc5863186ca1ebf4dab09e074f
|
[] |
no_license
|
leibushi/venv_test
|
63179ed4085bf39719380b225ec99b53cbc36529
|
2461b0e09726c473b825bad74ba69e5f39ae3404
|
refs/heads/master
| 2023-04-08T04:29:51.927671
| 2021-04-16T09:54:29
| 2021-04-16T09:54:29
| 256,421,826
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2021/3/9 14:52
# @Author : Mqz
# @FileName: run.py
from proxypool.scheduler import Scheduler
import argparse
parser = argparse.ArgumentParser(description="ProxyPool")
parser.add_argument('--processor', type=str, help='processor to run')
args = parser.parse_args()
if __name__ == '__main__':
if args.processor:
getattr(Scheduler(), f'run_({args.processor}')()
else:
Scheduler().run()
|
[
"MeI17771982161"
] |
MeI17771982161
|
b416966ec3aa7ac7d57149617c4caa3c750b3ad3
|
8f75acdcaa44b7b6e7fc614bc097dd5752daeeaf
|
/app/view.py
|
7b04a0a284bf73f2d0562b2a04f8a2f085386d9c
|
[
"MIT"
] |
permissive
|
williamHuang5468/PuppyPicture
|
f4e97f315d936629239dddeca8219355f3e126d0
|
c869305f3e499292af2199bda5b6788854fd5732
|
refs/heads/master
| 2021-01-22T08:47:46.468871
| 2017-02-14T16:26:00
| 2017-02-14T16:26:00
| 81,920,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
from app import app
from flask import Flask, jsonify
import sqlmodel
@app.route("/<slug>")
def get_puppy(slug):
result = sqlmodel.read(slug)
index, name, full, url = result
name, url = name.strip(), url.strip()
output ={
"name" : name,
"image_url" : url,
}
return jsonify(output)
|
[
"chobits5468@gmail.com"
] |
chobits5468@gmail.com
|
d6ef8085f2bdcf5ee8a5ac5b1b7805d136bfe8e1
|
d07070b37eb23a89dd9d697f45a9508f3d5a6290
|
/app.py
|
564fcc19160e9c8c4b64e1abec76344dbbd29153
|
[] |
no_license
|
klazarz/autonomousdata
|
123bf25dd83f446814607ee9e751129a2d59c1a5
|
453aced1a9989cf73a81b35c32a28dbd5b1e85c4
|
refs/heads/main
| 2023-08-23T14:31:18.081291
| 2021-10-08T21:00:41
| 2021-10-08T21:00:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
import socket
from bs4 import BeautifulSoup
import requests
import login
url ='https://www.dfb.de/3-liga/spieltagtabelle/?no_cache=1&spieledb_path=%2Fde%2Fcompetitions%2F3-liga%2Fseasons%2F2021-22%2Fmatchday%2Fcurrent'
r = requests.get(url)
soup = BeautifulSoup(r.content, "lxml")
print(soup.prettify())
|
[
"kevin.lazarz@oracle.com"
] |
kevin.lazarz@oracle.com
|
141f08f366c353ae4acbc123cb2b8bdca0494601
|
4ffa0ea3526482615da3e4f65d4018706d45e49d
|
/terraing/settings.py
|
409c903ed01b0001a20d061aa37b367ceb1eb8c5
|
[] |
no_license
|
GisHel/pagina
|
d7afa6ec6d3fce4d5b3d188fba6e6df80a7ebe33
|
d3cb1b03f964ade59013c9345005a1ec826c4dc3
|
refs/heads/master
| 2021-01-15T20:13:14.089104
| 2015-03-25T18:06:46
| 2015-03-25T18:06:46
| 31,960,139
| 1
| 0
| null | 2015-03-10T13:53:42
| 2015-03-10T13:53:42
| null |
UTF-8
|
Python
| false
| false
| 2,253
|
py
|
"""
Django settings for terraing project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'byvdks(cp-wg#r0*0p%ko8ow3^mdl94o&(feus=(fs=$t&$062'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_markdown',
'pagina',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'terraing.urls'
WSGI_APPLICATION = 'terraing.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates/'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'media'),
)
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
|
[
"vandermicra@hotmail.com"
] |
vandermicra@hotmail.com
|
560c4a9c0649fceddd4ad231faa46bda18a72dd5
|
407c3ee381ef44d36d6d82330b1e77fa16bf6122
|
/distance.py
|
23f1bec5e35590392c05799578d15bf72bbd8d2f
|
[] |
no_license
|
AllenShielder1994/MyCode
|
d8abcf6492915a68d7fffe90d166104a0123aa1a
|
e01384bcc89e69506c0acf4c098b792dedafbc60
|
refs/heads/master
| 2021-03-25T19:52:52.988153
| 2020-03-20T04:46:14
| 2020-03-20T04:46:14
| 247,642,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,696
|
py
|
import numpy as np
import cv2
from cv2 import cv2
import imutils
#Green
#lower = np.array([35,43,46])
#upper = np.array([77,255,255])
#RED
#lower = np.array([156,43,46])
#upper = np.array([180,255,255])
#YELLOW
#lower = np.array([26,43,46])
#upper = np.array([34,255,255])
#BLUE
lower = np.array([78,43,46])
upper = np.array([124,255,255])
green = (0, 255, 0)
blue = (255, 0, 0)
red = (0, 0, 255)
#调用笔记本内置摄像头,所以参数为0,如果有其他的摄像头可以调整参数为1,2
cap=cv2.VideoCapture(0)
while True:
sucess,img=cap.read() #从摄像头读取图片
# cv2.imshow("img",img)
hsv_frame = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)#将每一帧图片转化HSV空间颜色
mask = cv2.inRange(hsv_frame,lower,upper)
#cv2.imshow ("mask", mask)
conts,hier = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)#找出边界
cv2.drawContours(img,conts,-1,blue,1)#画出边框
dst = cv2.bitwise_and(img,img,mask=mask)#对每一帧进行位与操作,获取追踪图像的颜色
# cv2.imshow ("dst",dst)
#cv2.imshow ("img2",img)
for i in range(0,len(conts)):
x, y, w, h = cv2.boundingRect(conts[i])
cv2.rectangle(img, (x,y), (x+w,y+h), (153,153,0), 2)
cv2.imshow("img",img)
#print (conts)
cv2.imwrite("image2.jpg",img)
k=cv2.waitKey(1) #保持画面的持续。
if k == 27:
#通过esc键退出摄像
cv2.destroyAllWindows()
break
elif k==ord("s"):
#通过s键保存图片,并退出。
cv2.imwrite("image2.jpg",img)
cv2.destroyAllWindows()
break
#关闭摄像头
cap.release()
|
[
"AllenShielder1994@gmail"
] |
AllenShielder1994@gmail
|
d1b24a7fefdf62d576205a051ce817d63fb5715b
|
3d4a813ed74451f99d28d5697511f797c600aa33
|
/cracking_coding/7_OOP/singleton.py
|
2fece46f6ef6be0d6e96335f6404c74bf3984529
|
[] |
no_license
|
921kiyo/algorithms
|
6af48900c7479f8318df10169c22eee4bb229837
|
94d27453e5b6ab88f519c405fcb49fedbd12234c
|
refs/heads/master
| 2021-10-09T23:51:19.845248
| 2019-01-04T17:53:28
| 2019-01-04T17:53:28
| 76,173,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
class Singleton:
def __init__(self, decorated):
print("decorated ", decorated)
self._decorated = decorated
def instance(self):
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
print("self._decorated()", self._decorated())
return self._instance
def __call__(self):
return TypeError("Singletons must be accessed through 'instance()'.")
def __instancecheck__(self, inst):
print("instancecheck")
return isinstance(inst, self._decorated)
@Singleton
class Foo:
def __init__(self):
print("foo created")
f = Foo()
f = Foo.instance()
g = Foo.instance()
print(f == g)
|
[
"kk3317@vm-shell4.doc.ic.ac.uk"
] |
kk3317@vm-shell4.doc.ic.ac.uk
|
477e0bd62704234c4b45393d26431dae1fb08ac0
|
af147e3493938eba7fbf2001aff7e56520b297e7
|
/LeetCode-Easy/Most Common Word.py
|
2cb06cce3932790a5bc77e83cfe4b5c4b1df20cb
|
[] |
no_license
|
pranita-s/Python-codes
|
c4f8fa44a07e080953a646f5af9bbb4dd6dddefd
|
c2b3d19de222c6f7ae135f8c677902b7f7ea701b
|
refs/heads/master
| 2021-09-24T04:58:17.312686
| 2018-10-03T14:04:57
| 2018-10-03T14:04:57
| 109,510,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
# TIME - O(m + n)
# SPACE - O(m + n)
import collections
def mostCommonWord(paragraph, banned):
lookup = set(banned)
words = collections.Counter(word.strip("!?';.") for word in paragraph.lower().split())
result = ''
for word in counts:
if (not result or counts[word] > counts[result]) and \
word not in lookup:
result = word
return result
|
[
"noreply@github.com"
] |
pranita-s.noreply@github.com
|
bf71fd4bc7905f9e95be2083dbfc243497e3a581
|
1433054bc2cd0d6f6f0010cfd5bc9536d64f7de0
|
/schedule/models.py
|
b6ad3998a9243cdda967a928528bbb8fbf6f0c62
|
[] |
no_license
|
Pinaz993/SymposiumTimer
|
72b3a560394c6c1a327e22b89e37ffe2a6d91b49
|
c322a388d261c7b9aadd29ae464b21c635519341
|
refs/heads/master
| 2023-05-05T14:59:02.324198
| 2021-05-24T18:59:12
| 2021-05-24T18:59:12
| 357,275,692
| 0
| 0
| null | 2021-05-11T19:13:11
| 2021-04-12T17:06:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,015
|
py
|
from datetime import timedelta
from django.db import models
from django.utils.timezone import now
class Program(models.Model):
"""
A model to represent a program of timers that all have a duration and a label. The program will have a name so that
it can be selected in a drop down menu.
"""
name = models.CharField(max_length=254)
def __str__(self):
return self.name
def to_dict(self):
fields = {'start_time', 'name'}
return {key: self.__dict__[key] for key in self.__dict__.keys() & fields}
class Meta:
ordering = ('name',)
class Timer(models.Model):
"""
A model to represent a single timer to be displayed in a program. Each will have a duration in seconds and a
label to be displayed on the screen along with the timer itself. Each timer is tied exclusively to
one program via a foreign key.
"""
duration = models.DurationField()
label = models.CharField(max_length=62)
program = models.ForeignKey(Program, on_delete=models.CASCADE)
order = models.IntegerField("Drag to Reorder", default=0, blank=False, null=False,)
def __init__(self, *args, actual_duration=0, start_time=0, **kwargs):
if isinstance(kwargs.get('duration', None), int):
d = kwargs.pop('duration')
kwargs['duration'] = timedelta(seconds=d)
print(type(kwargs))
super().__init__(*args, **kwargs)
self.actual_duration = actual_duration
if start_time == 'now':
self.start_time = round(now().timestamp())
elif isinstance(start_time, int):
self.start_time = start_time
def __str__(self):
return str(self.label)
def to_dict(self):
fields = {'duration', 'label', 'start_time', 'actual_duration'}
rtn = {key: self.__dict__[key] for key in self.__dict__.keys() & fields}
rtn['duration'] = rtn.get('duration').total_seconds()
return rtn
class Meta:
ordering = ('order', 'label')
|
[
"pinaz993@gmail.com"
] |
pinaz993@gmail.com
|
70769fad60b605dbcb303dbc89427d8eccf77179
|
543e4a93fd94a1ebcadb7ba9bd8b1f3afd3a12b8
|
/maza/modules/creds/routers/technicolor/telnet_default_creds.py
|
980ff68f95cac218e0a1233c2b8c3486f099c9e9
|
[
"MIT"
] |
permissive
|
ArturSpirin/maza
|
e3127f07b90034f08ff294cc4afcad239bb6a6c3
|
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
|
refs/heads/master
| 2020-04-10T16:24:47.245172
| 2018-12-11T07:13:15
| 2018-12-11T07:13:15
| 161,144,181
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
from maza.core.exploit import *
from maza.modules.creds.generic.telnet_default import Exploit as TelnetDefault
class Exploit(TelnetDefault):
__info__ = {
"name": "Technicolor Router Default Telnet Creds",
"description": "Module performs dictionary attack against Technicolor Router Telnet service. "
"If valid credentials are found, they are displayed to the user.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"devices": (
"Technicolor Router",
),
}
target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)")
port = OptPort(23, "Target Telnet port")
threads = OptInteger(1, "Number of threads")
defaults = OptWordlist("admin:admin,admin:password,admin:1234,Administrator:", "User:Pass or file with default credentials (file://)")
|
[
"a.spirin@hotmail.com"
] |
a.spirin@hotmail.com
|
63a65db3990b1ce57f1e19b376c76051673b6a47
|
4958810ad94f12731e271ac930ee75046284c0a0
|
/ImageAI/digitclassifier.py
|
04851b74af25d990d1db1ccf541ecc102d45fff9
|
[] |
no_license
|
tensorcoder/AI
|
366d8b188c994d2d1d57c0dad9de225a74fa5ada
|
84bc152b6e4d511e519495cc08f70427419b7c70
|
refs/heads/master
| 2021-01-25T07:54:30.295346
| 2017-07-18T18:32:02
| 2017-07-18T18:32:02
| 93,680,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,734
|
py
|
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784]) # not a specific value but a placeholder that we'll input when we ask TF to run a computation
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.matmul(x, W) + b
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
#Weight Initialization
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
#Convolution and Pooling
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1,2,2,1], padding='SAME')
#First convolutional layer
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#Second convolutional layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
#Densely connected layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) +b_fc1)
#dropout layer
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#Readout layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" %(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
|
[
"m.kedziera@gmail.com"
] |
m.kedziera@gmail.com
|
dac9d82403d8aeb3bb3a2476cb84c8b617198acd
|
ce558d70bb9e6a084f3c29c904a5b38ba4b9574c
|
/script/greg_model.py
|
2049316dfb17802df2900c6927ef4945a790b8ad
|
[] |
no_license
|
lamsking/Intership
|
964b19dcd647d656d2a996a67410dbdd08ebf79b
|
20958153a6b615a9e411f45ac86256f0ded0679d
|
refs/heads/master
| 2022-07-06T23:58:41.153905
| 2020-05-18T15:09:52
| 2020-05-18T15:09:52
| 262,166,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,318
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 12 15:49:07 2019
@author: pam
"""
# -*- coding: utf-8 -*-
import warnings
warnings.filterwarnings("ignore")
import os, cv2, random
import numpy as np
import pandas as pd
#%pylab inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib import ticker
import seaborn as sns
#%matplotlib inline
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Input, Dropout, Flatten, Convolution2D, MaxPooling2D, Dense, Activation, SpatialDropout2D, BatchNormalization
from keras.optimizers import RMSprop
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
from keras.utils import np_utils
import os
import sys
# loading labels for each image from csv
data = pd.read_csv('data.csv')
labels = data.iloc[:,0:2]
########################
labels_1 = labels[labels['WD']==1]
labels_0 = labels[labels['WD']==0][:200]
f = [labels_1,labels_0]
######################################################
os.chdir("/home/inra-cirad/Bureau/MonDossier/")
X= labels.iloc[:, :1].values
y = labels.iloc[:, 1:3].values
# print(len(y))
Xs = []
print(cv2.imread(X[0][0],-1).shape)
for p in X:
Xs.append(cv2.imread(p[0],-1))
Xs = np.array(Xs)
# print(len(p))
#dataset_size = len(Xs)
# z = np.random.permutation(len(Xs))
# Xs = Xs[z]
#k = np.random.permutation(len(y))
# y = y[z]
#Xs2dim = Xs.reshape(dataset_size,-1)
# print(Xs)
Xs_train, Xs_test, y_train, y_test = train_test_split(Xs, y, test_size=0.3, shuffle=True)
Xs_train = Xs_train.reshape(Xs_train.shape[0],1,256,320)
Xs_test = Xs_test.reshape(Xs_test.shape[0],1,256,320)
#train_data = pd.concat([X_train, y_train]).drop_duplicates(keep=False)
#test_data=pd.concat([X_test,y_test]).drop_duplicates(keep=False)
# print(y_test)
unique, counts = np.unique(y_test, return_counts=True)
# print(dict(zip(unique, counts)))
# sys.exit(0)
'''
# Separating WD labels
wd_data = labels[labels['WD'] == 1]
print(wd_data)
wd_data.head()
# Splitting WD data into train and test
test_wd_data = wd_data.iloc[-4:,:]
train_wd_data = wd_data.iloc[:-4,:]
print(len(test_wd_data))
# Separating female labels
other_data = labels[labels['WD'] == 0]
other_data.head()
'''
'''
# Splitting male data into train and test
test_other_data = other_data.iloc[-16:,:]
train_other_data = other_data.iloc[:-16,:]
# total test data
test_indices = test_other_data.index.tolist() + test_wd_data.index.tolist()
test_data = labels.iloc[test_indices,:]
test_data.head()
# total train data
train_data = pd.concat([labels, test_data, test_data]).drop_duplicates(keep=False)
train_data.head()
'''
# train and test with image name along with paths
path = ''
#train_image_name = [path+each for each in train_data['image'].values.tolist()]
#test_image_name = [path+each for each in test_data['image'].values.tolist()]
# preparing data by processing images using opencv
ROWS =256
COLS = 320
CHANNELS = 1
def read_image(file_path):
img = cv2.imread(file_path, cv2.IMREAD_COLOR) #cv2.IMREAD_GRAYSCALE
return cv2.resize(img, (ROWS, COLS), interpolation=cv2.INTER_CUBIC)
def prep_data(images):
count = len(images)
data = np.ndarray((count, CHANNELS, ROWS, COLS), dtype=np.uint8)
for i, image_file in enumerate(images):
image = read_image(image_file)
data[i] = image.T
if i%5 == 0: print('Processed {} of {}'.format(i, count))
return data
'''
train = prep_data(train_image_name)
test = prep_data(test_image_name)
# checking count of male and females
sns.countplot(labels['WD'])
# plotting female and male side by side
def show_wd_and_other():
other = read_image(train_image_name[0])
wd = read_image(train_image_name[2])
pair = np.concatenate((other, wd), axis=1)
plt.figure(figsize=(10,5))
plt.imshow(pair)
plt.show()
show_wd_and_other()
# splitting path of all images
train_wd_image = []
train_other_image = []
for each in train_image_name:
if each in train_wd_data['image'].values:
train_wd_image.append(each)
else:
train_other_image.append(each)
'''
#Creating VGG 16 model for training
optimizer = RMSprop(lr=1e-4)
#optimizer = 'adam'
objective = 'binary_crossentropy'
def wd_other():
model = Sequential()
model.add(SpatialDropout2D(0.2, input_shape=(CHANNELS, ROWS, COLS)))
model.add(BatchNormalization())
model.add(Convolution2D(32, 3, 3, border_mode='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
model.add(Convolution2D(16, 3, 3, border_mode='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
# model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
# model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
# model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
# model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
# model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
# model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))
# #model.add(Convolution2D(256, 3, 3, border_mode='same', activation='relu'))#enlever
# model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
model.add(Flatten())
# model.add(Dense(256, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(256, activation='softmax'))
model.add(Dropout(0.5))
model.add(Dense(18, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# model.add(Activation('sigmoid'))
#model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer='rmsprop')
return model
model = wd_other()
model.summary()
#nb_epoch = 500
#batch_size = 4
#labs = labels.iloc[:,1].values.tolist()
#print(Xs2dim)
## Callback for loss logging per epoch
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.val_losses = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
early_stopping = EarlyStopping(monitor='val_loss', patience=250, verbose=1, mode='auto')
history = LossHistory()
print(y_train)
model.fit(Xs_train,y_train,validation_data=(Xs_test,y_test), epochs=10, batch_size=500,
verbose=2, shuffle=True, callbacks=[history, early_stopping])
#########
#prediction et matrice de confusion
predictions = model.predict(Xs_test, verbose=1)
predict = model.predict(Xs.reshape(Xs.shape[0],1,256,320))
np.array(np.c_[predict,y])
np.savetxt("classif_test.csv", np.c_[predict,y], delimiter=";")
# print(predictions)
# for i in range(0,len(y_test)):
# print(predictions[i][0],' = ',y_test[i][0])
loss = history.losses
val_loss = history.val_losses
# plt.xlabel('Epochs')
# plt.ylabel('Loss')
# plt.title('VGG-16 Loss Trend')
# plt.plot(loss, 'blue', label='Training Loss')
# plt.plot(val_loss, 'green', label='Validation Loss')
# plt.xticks(range(0,10)[0::2])
# plt.legend()
# plt.show()
# import seaborn as sn
# y_pred = []
# y_actuel = []
# mat_conf = []
# mat_conf.append(y_test)
# #print(len(predictions))
# for i in range(0,len(y_test)):
# y_actuel.append(y_test[i][0])
# if predictions[i, 0] >= 0.4450765:
# print('It is {:.7%} sure this is a WD'.format(predictions[i][0]))
# y_pred.append(1)
# else:
# print('It is {:.7%} sure this is a other'.format(1-predictions[i][0]))
# y_pred.append(0)
# data = {'yactuel':y_actuel,'ypred':y_pred}
# #print(data)
# df_mat = pd.DataFrame(data,columns = ['yactuel','ypred'])
# confusion_mat = pd.crosstab(df_mat['yactuel'],df_mat['ypred'],rownames=['Actuel'],colnames=['Predictiion'])
# print(confusion_mat)
# sn.heatmap(confusion_mat,annot=True)
# #######################################
# #enregistement des poids et du model
# model.save_weights('/home/pam/apprentissage/model/lundi08/wd_5000_other.h5')
# model.save('/home/pam/apprentissage/model/lundi08/model_5000.model')
#predictions = model.predict(test, verbose=0)
#print(predictions)
#loss = history.losses
#val_loss = history.val_losses
# plt.xlabel('Epochs')
# plt.ylabel('Loss')
# plt.title('VGG-16 Loss Trend')
# plt.plot(loss, 'blue', label='Training Loss')
# plt.plot(val_loss, 'green', label='Validation Loss')
# plt.xticks(range(0,nb_epoch)[0::2])
# plt.legend()
# plt.show()
# for i in range(0,12):
# if predictions[i, 0] >= 0.5:
# print('It is {:.2%} sure this is a other'.format(predictions[i][0]))
# else:
# print('It is {:.2%} sure this is a wd'.format(1-predictions[i][0]))
# print(test_image_name[i])
# plt.imshow(test[i].T)
# plt.show()
|
[
"oumaroulamine98@gmail.com"
] |
oumaroulamine98@gmail.com
|
b45d434d1d89a5b013673fd130656faaaa82b14f
|
b1a68e09ba4a24518ce5ad715b7caf88e7428d56
|
/schema.py
|
b171243c3978583d4ae6367209a59375b818de7e
|
[] |
no_license
|
BEaStia/sanic-graphql-pg
|
271160945f213775492210ef97f01a6f21472035
|
4280913399a3f320782823b5b925ff624ccb25c6
|
refs/heads/master
| 2021-01-23T22:57:48.525231
| 2017-09-09T11:10:36
| 2017-09-09T11:10:36
| 102,949,372
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import graphene
from graphene import relay
from graphene_sqlalchemy import SQLAlchemyConnectionField, SQLAlchemyObjectType
from models import User as UserModel
from models import Entity as EntityModel
class User(SQLAlchemyObjectType):
class Meta:
model = UserModel
interfaces = (relay.Node, )
class Entity(SQLAlchemyObjectType):
class Meta:
model = EntityModel
interfaces = (relay.Node, )
class Query(graphene.ObjectType):
node = relay.Node.Field()
user = graphene.Field(User)
users = graphene.List(User)
def resolve_users(self, info):
query = User.get_query(info) # SQLAlchemy query
return query.all()
schema = graphene.Schema(query=Query, types=[User, Entity])
|
[
"gophan1992@gmail.com"
] |
gophan1992@gmail.com
|
0f5a13ac6079eefa1bb891e6945ac036114eebc9
|
95f585b11475132c5f7b74a48a9bf08dc939af5a
|
/blog/migrations/0005_remove_blog_read_num.py
|
493790a7e730c9d5d09100cafa3f569a6f1bc63f
|
[] |
no_license
|
yuyukunn/myblog
|
4c0814c33934b8570e8855f9ea2368eaa51780d9
|
987383042f587fac4f1ef40adf51e681378006ba
|
refs/heads/master
| 2023-01-10T22:16:08.651579
| 2019-09-28T09:19:44
| 2019-09-28T09:19:44
| 209,440,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
# Generated by Django 2.2.4 on 2019-08-29 06:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_blog_read_num'),
]
operations = [
migrations.RemoveField(
model_name='blog',
name='read_num',
),
]
|
[
"983249451@qq.com"
] |
983249451@qq.com
|
2fa5e37d3c202e2ae621b5537919c525b196bbe7
|
663a91c519ec83a452fc09e9a03a0486c4439110
|
/app/blueprints/base/encryption.py
|
fc4b10b302274a53d45cb886d6ca67d3f32bfc28
|
[] |
no_license
|
carthach/schedulr
|
b6ba695c77f71d71e3ebf9ce8b9efae3afff6b31
|
e8b933bcfbc4f307d0c37b5eb9f9267cc8ae51e3
|
refs/heads/main
| 2023-03-22T16:52:35.845345
| 2021-03-11T06:12:25
| 2021-03-11T06:12:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
import os
from simplecrypt import encrypt, decrypt
def encrypt_string(plaintext):
key = os.environ.get('SECRET_KEY')
ciphertext = encrypt(key, plaintext)
return ciphertext
def decrypt_string(cipher):
key = os.environ.get('SECRET_KEY')
plaintext = decrypt(key, cipher).decode('utf-8')
return plaintext
|
[
"rickycharpentier@gmail.com"
] |
rickycharpentier@gmail.com
|
fda80d7c7873e181ef37a8d5bc9b6fcbf3cde78f
|
9a7964447d05e98dfc6ec1f2266fd87d3f9f0075
|
/app/api_1_0/authentication.py
|
88697043e2ac151688b1afc2c51952eba5f8595b
|
[
"MIT"
] |
permissive
|
thisisshrocit/ztool-backhend
|
2932d779eee8b509bf42a1c81f06640dba869d26
|
65c9f700d245ac434ca22d5aee052a302beeb502
|
refs/heads/master
| 2021-01-19T09:55:51.429940
| 2017-02-16T07:35:14
| 2017-02-16T07:35:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,759
|
py
|
from flask import g, jsonify, request
from flask_httpauth import HTTPBasicAuth
from ..models import User, AnonymousUser
from . import api_1_0
from .errors import unauthorized
from .constant import login_required_list
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(email_or_token, password):
if email_or_token == '':
g.current_user = AnonymousUser()
return True
if password == '':
g.current_user = User.verify_auth_token(email_or_token)
g.token_used = True
return g.current_user is not None
user = User.query.filter_by(email=email_or_token).first()
if not user:
return False
g.current_user = user
g.token_used = False
return user.verify_password(password)
@auth.error_handler
def auth_error():
return unauthorized('Invalid credentials')
@api_1_0.before_app_request
@auth.login_required
def before_request():
if request.method != 'OPTIONS':
if g.current_user.is_anonymous and request.endpoint:
if '.' in request.endpoint and request.endpoint.startswith('api_1_0') and request.endpoint.split('.')[1] in login_required_list:
return unauthorized('Unauthorized account')
else:
pass
@api_1_0.route('/token')
def get_token():
if g.current_user.is_anonymous or g.token_used:
return unauthorized('Invalid credentials')
return jsonify(token=g.current_user.generate_auth_token(expiration=86400), expiration=86400, email=g.current_user.email)
@api_1_0.route('/test')
@auth.login_required
def login_test():
if g.current_user == AnonymousUser():
return jsonify(status='error', data='Anonymous user!'), 401
else:
return jsonify(email=g.current_user.email, status='success')
|
[
"me@jack003.com"
] |
me@jack003.com
|
7dc4f5c7179d89692de7be2c86522194de1cfa3e
|
e6fef51adfce8cbf70fe1b54bfb71a5bc1b4b4d3
|
/django/loginRegistration/loginRegistration/settings.py
|
57efaf0fa2fc9552b34f8a882e85ef2a1fc60ca8
|
[] |
no_license
|
py1-10-2017/nathan-m-python1
|
dee4e3ecc4b079929a5da63104507f099271a0de
|
73bef2f826422158d0838a141e2e4b59cdf83880
|
refs/heads/master
| 2021-09-10T15:42:34.264552
| 2018-03-28T14:16:50
| 2018-03-28T14:16:50
| 106,894,965
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,159
|
py
|
"""
Django settings for loginRegistration project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b(%)@%zoz+%ubb=0o8$)o&jpl#ei(%^myu$fbr*11#y41*2$j&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.login_registration'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'loginRegistration.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'loginRegistration.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"nortmahoney@gmail.com"
] |
nortmahoney@gmail.com
|
88b87e552ef1adfbb60c80914b314490c65cb183
|
2b0c08a1b217bedc53b8b67c15a94205f261b3fa
|
/03_ML/m10_pipeline_gridSearch1_iris.py
|
1742d10c9e8ea8afb946ad998d488bcc54f29e1f
|
[] |
no_license
|
kimsoosoo0928/BITCAPM_AI_CLASS
|
385d1db23a9efebe90b68c9b6595067ff1869b76
|
e9d093fbb00fdc5b1f0e85f5d6c31fd23d94e366
|
refs/heads/main
| 2023-07-18T23:49:02.197472
| 2021-08-30T06:51:43
| 2021-08-30T06:51:43
| 399,345,983
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,944
|
py
|
from sklearn.datasets import load_diabetes, load_iris
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, cross_val_score, GridSearchCV, train_test_split, RandomizedSearchCV
import warnings
warnings.filterwarnings('ignore')
from sklearn.metrics import accuracy_score
import time
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
#1. 데이터
datasets = load_iris()
x = datasets.data
y = datasets.target
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, shuffle=True, random_state=66)
n_splits=5
kfold = KFold(n_splits=n_splits, shuffle=True, random_state=66)
# parmeters = [
# {'n_jobs' : [-1], 'n_estimators' : [100, 200], 'max_depth' : [6, 8, 10], 'min_samples_leaf' : [5, 7, 10]},
# {'n_jobs' : [-1], 'max_depth' : [6, 8, 10], 'min_samples_leaf' : [3, 6, 9, 11], 'min_samples_split' : [3, 4, 5]},
# {'n_jobs' : [-1], 'min_samples_leaf' : [3, 5, 7], 'min_samples_split' : [3, 4, 5]},
# {'n_jobs' : [-1], 'min_samples_split' : [2, 3, 5, 10]}
# ]
# parmeters = [
# {'randomforestclassifier__n_jobs' : [-1], 'randomforestclassifier__n_estimators' : [100, 200], 'randomforestclassifier__max_depth' : [6, 8, 10], 'randomforestclassifier__min_samples_leaf' : [5, 7, 10]},
# {'randomforestclassifier__n_jobs' : [-1], 'randomforestclassifier__max_depth' : [6, 8, 10], 'randomforestclassifier__min_samples_leaf' : [3, 6, 9, 11], 'randomforestclassifier__min_samples_split' : [3, 4, 5]},
# {'randomforestclassifier__n_jobs' : [-1], 'randomforestclassifier__min_samples_leaf' : [3, 5, 7], 'randomforestclassifier__min_samples_split' : [3, 4, 5]},
# {'randomforestclassifier__n_jobs' : [-1], 'randomforestclassifier__min_samples_split' : [2, 3, 5, 10]}
# ]
parmeters = [
{'rf__n_jobs' : [-1], 'rf__n_estimators' : [100, 200], 'rf__max_depth' : [6, 8, 10], 'rf__min_samples_leaf' : [5, 7, 10]},
{'rf__n_jobs' : [-1], 'rf__max_depth' : [6, 8, 10], 'rf__min_samples_leaf' : [3, 6, 9, 11], 'rf__min_samples_split' : [3, 4, 5]},
{'rf__n_jobs' : [-1], 'rf__min_samples_leaf' : [3, 5, 7], 'rf__min_samples_split' : [3, 4, 5]},
{'rf__n_jobs' : [-1], 'rf__min_samples_split' : [2, 3, 5, 10]}
]
# 2. 모델 구성
# pipe = make_pipeline(MinMaxScaler(), RandomForestClassifier())
pipe = Pipeline([('scaler', MinMaxScaler()), ('rf', RandomForestClassifier())])
#! Pipeline은 전체를 list로 감싸야 한다.
#! 약어를 정할 수 있다. ('rf', RandomForestClassifier()) -> RandomForestClassifier를 rf라고 한다.
model = RandomizedSearchCV(pipe, parmeters, cv=kfold, verbose=1)
#! pipe라는 모델에는 parmeters를 가지고 있지 않아서 이렇게 사용 불가(parmeters는 RandomForestClassifier 파라미터이다.)
#^ 파라미터에 어떤 모델의 파라미터인지 모델명 명시 ex){모델명(소문자)__파라미터 : 0 } -> {randomforestclassifier__n_jobs' : [-1]}
#! pipe는 랩핑한 모델
# 3. 컴파일, 훈련
start_time = time.time()
model.fit(x_train, y_train)
end_time = time.time() - start_time
# 4. 평가, 예측
print('최적의 매개변수 : ', model.best_estimator_)
print('best_params_ : ', model.best_params_)
print('best_score_ : ', model.best_score_)
print('model.score : ', model.score(x_test, y_test))
y_predict = model.predict(x_test)
print('정답률 : ', accuracy_score(y_test, y_predict))
print('걸린 시간 : ', end_time)
'''
최적의 매개변수 : Pipeline(steps=[('scaler', MinMaxScaler()),
('rf',
RandomForestClassifier(max_depth=8, min_samples_leaf=3,
min_samples_split=5, n_jobs=-1))])
best_params_ : {'rf__n_jobs': -1, 'rf__min_samples_split': 5, 'rf__min_samples_leaf': 3, 'rf__max_depth': 8}
best_score_ : 0.95
model.score : 0.9333333333333333
정답률 : 0.9333333333333333
걸린 시간 : 11.75481629371643
'''
|
[
"kimsoosoo0928@gmail.com"
] |
kimsoosoo0928@gmail.com
|
93b651e8c698f4f1d5aa832c177c67a6a41cf433
|
e02ed92f564da0421a124359fad87c467c25d07e
|
/Part 1 Exercise 9.1.py
|
6aab72f150db6489e21c47b4f0504598f3d066c3
|
[] |
no_license
|
ZandbergenM/Homework-week-5_Zandbergen
|
a5565e23bb44da71d62501166c855be714bf5b9d
|
5bb6906cd4be1ac121c788ca39b8e833ef6cb7b9
|
refs/heads/main
| 2022-12-29T20:02:59.201154
| 2020-10-18T08:32:53
| 2020-10-18T08:32:53
| 305,056,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 362
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[7]:
#9.1Write a program that reads words.txt and prints only the words with more than 20 characters
fin = open('words.txt')
def read(x):
for line in fin:
word = line.strip()
if len(word)>20:
print(word)
read(fin)
# In[6]:
pwd
# In[ ]:
# In[ ]:
# In[ ]:
|
[
"noreply@github.com"
] |
ZandbergenM.noreply@github.com
|
8959da063f764c002ae65ed049748b6291ec43db
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_gooks.py
|
8a2800a5ed0d45d1b6901685ac2ed2ea326ccdf2
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 214
|
py
|
#calss header
class _GOOKS():
def __init__(self,):
self.name = "GOOKS"
self.definitions = gook
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['gook']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
85939232ef344819edd0a6ebae7850091ec28d47
|
aaae9c5b3be233b8953b457977cd5d82e4d9ef12
|
/contrail-analyticsdb/hooks/contrail_analyticsdb_hooks.py
|
380f871ea4f8d5b309c44ac83d382bea0e2e032d
|
[
"Apache-2.0"
] |
permissive
|
mfmsilva/tf-charms
|
2b615db17c080165f343b5f2f0000c8c28f38794
|
b0d0283c07740338919d845d47911ecf1496039d
|
refs/heads/master
| 2022-07-03T19:50:24.915057
| 2020-05-11T17:27:47
| 2020-05-11T18:55:00
| 263,873,681
| 0
| 0
| null | 2020-05-14T09:38:18
| 2020-05-14T09:38:18
| null |
UTF-8
|
Python
| false
| false
| 4,571
|
py
|
#!/usr/bin/env python3
import sys
from charmhelpers.core.hookenv import (
Hooks,
UnregisteredHookError,
config,
log,
relation_get,
related_units,
relation_ids,
status_set,
relation_set,
)
import contrail_analyticsdb_utils as utils
import common_utils
import docker_utils
hooks = Hooks()
config = config()
@hooks.hook("install.real")
def install():
status_set('maintenance', 'Installing...')
# TODO: try to remove this call
common_utils.fix_hostname()
docker_utils.install()
utils.update_charm_status()
@hooks.hook("config-changed")
def config_changed():
utils.update_nrpe_config()
if config.changed("control-network"):
settings = {'private-address': common_utils.get_ip()}
rnames = ("contrail-analyticsdb", "analyticsdb-cluster")
for rname in rnames:
for rid in relation_ids(rname):
relation_set(relation_id=rid, relation_settings=settings)
config["config_analytics_ssl_available"] = common_utils.is_config_analytics_ssl_available()
config.save()
docker_utils.config_changed()
utils.update_charm_status()
# leave it as latest - in case of exception in previous steps
# config.changed doesn't work sometimes...
if config.get("saved-image-tag") != config["image-tag"]:
utils.update_ziu("image-tag")
config["saved-image-tag"] = config["image-tag"]
config.save()
@hooks.hook("contrail-analyticsdb-relation-joined")
def analyticsdb_joined():
settings = {'private-address': common_utils.get_ip()}
relation_set(relation_settings=settings)
def _value_changed(rel_data, rel_key, cfg_key):
if rel_key not in rel_data:
# data is absent in relation. it means that remote charm doesn't
# send it due to lack of information
return False
value = rel_data[rel_key]
if value is not None and value != config.get(cfg_key):
config[cfg_key] = value
return True
elif value is None and config.get(cfg_key) is not None:
config.pop(cfg_key, None)
return True
return False
@hooks.hook("contrail-analyticsdb-relation-changed")
def analyticsdb_changed():
data = relation_get()
_value_changed(data, "auth-info", "auth_info")
_value_changed(data, "orchestrator-info", "orchestrator_info")
_value_changed(data, "maintenance", "maintenance")
_value_changed(data, "controller_ips", "controller_ips")
_value_changed(data, "controller_data_ips", "controller_data_ips")
# TODO: handle changing of all values
# TODO: set error if orchestrator is changing and container was started
utils.update_ziu("analyticsdb-changed")
utils.update_charm_status()
@hooks.hook("contrail-analyticsdb-relation-departed")
def analyticsdb_departed():
count = 0
for rid in relation_ids("contrail-analyticsdb"):
for unit in related_units(rid):
if relation_get("unit-type", unit, rid) == "controller":
count += 1
if count == 0:
for key in ["auth_info", "orchestrator_info"]:
config.pop(key, None)
utils.update_charm_status()
@hooks.hook("analyticsdb-cluster-relation-joined")
def analyticsdb_cluster_joined():
settings = {'private-address': common_utils.get_ip()}
relation_set(relation_settings=settings)
@hooks.hook("analyticsdb-cluster-relation-changed")
def analyticsdb_cluster_changed():
utils.update_ziu("cluster-changed")
@hooks.hook('tls-certificates-relation-joined')
def tls_certificates_relation_joined():
settings = common_utils.get_tls_settings(common_utils.get_ip())
relation_set(relation_settings=settings)
@hooks.hook('tls-certificates-relation-changed')
def tls_certificates_relation_changed():
if common_utils.tls_changed(utils.MODULE, relation_get()):
utils.update_charm_status()
@hooks.hook('tls-certificates-relation-departed')
def tls_certificates_relation_departed():
if common_utils.tls_changed(utils.MODULE, None):
utils.update_charm_status()
@hooks.hook("update-status")
def update_status():
utils.update_ziu("update-status")
utils.update_charm_status()
@hooks.hook("upgrade-charm")
def upgrade_charm():
utils.update_charm_status()
@hooks.hook('nrpe-external-master-relation-changed')
def nrpe_external_master_relation_changed():
utils.update_nrpe_config()
def main():
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log("Unknown hook {} - skipping.".format(e))
if __name__ == "__main__":
main()
|
[
"andrey-mp@yandex.ru"
] |
andrey-mp@yandex.ru
|
217bc5d3a299667d66483f2cb5c9a3aff2b7f119
|
ae4969954630c3684f795cfa316537fee8b92386
|
/splicing_outlier_calling/pre_process.py
|
de9a5a6eb1fffaf81e6880c1cd8c90ab7ee96387
|
[] |
no_license
|
BennyStrobes/process_rv_data
|
d84f024e695c52b1652daf38ee7704c98d5a13b7
|
32c49470e45f80701ddaea3dba94e9c34cb52005
|
refs/heads/master
| 2021-05-02T11:23:25.031244
| 2018-03-13T13:44:39
| 2018-03-13T13:44:39
| 120,776,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,425
|
py
|
import numpy as np
import os
import sys
import pdb
##Get list of tuples. Where tuple[0] is GTEx tissue name conventional string and tuple[1] is the string of the tissue name that appears
##in sample_attribute_file
def get_tissue_pairs(tissue_list_input_file):
f = open(tissue_list_input_file)
tissue_pairs = []
for line in f:
line = line.rstrip()
data = line.split('\t')
tissue_pairs.append((data[0], data[1]))
return tissue_pairs
#Extract individuals (first line) of the covariate file in the form of a dictionary
def get_eqtl_indi_ids_from_covariate_file(file_name):
ids = {}
f = open(file_name)
for line in f:
line = line.rstrip()
data = line.split()
for ele in data[1:]:
ids[ele] = 1
break
return ids
##Extract all GTEx RNAseq samples that belong to this tissue and write them to output_file
def extract_tissue_specific_samples(sample_attribute_file, conventional_tissue_name, attribute_file_tissue_name, indis, output_file, covariate_directory_v6):
f = open(sample_attribute_file)
t = open(output_file, 'w')
# Get list of gtex individual ids used in eqtl analysis. Extract this from the covariate files used in the eqtl analysis
eqtl_indi_ids = get_eqtl_indi_ids_from_covariate_file(covariate_directory_v6 + conventional_tissue_name + '.covariates.txt')
header_count = 0
used_tissue_indis = {} # keep track of which (tissue,indi_ids) have been used to ensure we do not have more than one individual per tissue
count = 0 # Keep track of how many samples are in each tissue
sample_ids = {}
for line in f:
line = line.rstrip()
data = line.split('\t')
if header_count == 0: # skip the header
header_count = header_count + 1
continue
if len(data) < 31: # Skip Short lines (they are not RNA seq)
continue
tiss = data[13]
sample_id = data[0] # ID corresponding to RNA seq sample
info = data[0].split('-') # Extracting individual id from sample id
indi_id = info[0] + '-' + info[1] # individual id
sample_annotation = data[28] # 'USE ME' or 'FLAGGED'
tissue_indi = tiss + '_' + indi_id
if indi_id not in eqtl_indi_ids: # Ignore samples not used in eqtl analysis
continue
if tiss != attribute_file_tissue_name: # We only care about samples with the correct tissue type
continue
if sample_annotation != 'USE ME': # Ignore flagged samples
continue
if tissue_indi in used_tissue_indis: # keep track of which (tissue,indi_ids) have been used to ensure we do not have more than one individual per tissue
print('ERRRROOROROROR')
pdb.set_trace()
indis[indi_id] = 1 # keep track of which individuals are used
used_tissue_indis[tissue_indi] = 1
count = count + 1
if sample_id in sample_ids: # make sure there are no repeats in sample_ids per tissue (or ever!)
print('erororoor')
sample_ids[sample_id] = 1
#Write to output in alphabetical order
for sample_id in sorted(sample_ids.keys()):
t.write(sample_id + '\n')
t.close()
if len(sample_ids) != len(eqtl_indi_ids): # Double check to ensure that the sample ids we have extracted are the same length as individual ids used in eqtl analysis
print('EROROROROROR')
pdb.set_trace()
return indis, count
#Create a file that contains all individuals across all of the tissues.
def write_all_individuals(output_file, indis):
t = open(output_file, 'w')
for indi in sorted(indis):
t.write(indi + '\n')
t.close()
sample_attribute_file = sys.argv[1] # File containing which GTEx samples are to be used for each tissue. Filter list on val(column[28]) == 'USE ME'. Also filter if length(line) < 30 (not full/complete)
tissue_list_input_file = sys.argv[2] # List of gtex tissues. First colum is traditional GTEx tissue name. Second column is GTEx tissue name as listed in $sample_attribute_file
pre_process_output_dir = sys.argv[3] # output_dir location
covariate_directory_v6 = sys.argv[4] # Used to filter tissue specific samples.
##################################################################################################################
#This script produces lists of what gtex samples will be used in which tissue
#Output files made (all we be written in pre_process_output_dir)
##1. *_rnaseq_sample_ids.txt where * is the conventional_tissue_name. These files are simply a list of all GTEx RNASEQ samples used for this tissue.
##2. all_individuals.txt. A list of all of the individuals across all tissues.
##3. tissue_sample_counts.txt. A table that contains information on how many gtex samples are in each tissue. Column 0 is the gtex tissue name and column 1 is the number of samples in that tissue.
#NOTE: By samples, I mean RNA-seq samples. By individuals, I mean actual people. So for gtex 1 individual may have multiple samples. But 1 sample only has 1 individual.
###################################################################################################################
tissue_pairs = get_tissue_pairs(tissue_list_input_file) # Extract array of tuples. Where each tuple is the two ways to spell the tissue type
indis = {} # Keep track of all individuals we are going to test
t1 = open(pre_process_output_dir + 'tissue_sample_counts.txt', 'w')
#Loop through tisssues
for tissue_pair in tissue_pairs:
conventional_tissue_name = tissue_pair[0]
attribute_file_tissue_name = tissue_pair[1] # gtex id format used in sample_attribute_file
tissue_specific_sample_file = pre_process_output_dir + conventional_tissue_name + '_rnaseq_sample_ids.txt' # tissue_specific file that contains the sample ids in that tissue
#main script to extract the samples in this tissue
indis, tissue_specific_sample_count = extract_tissue_specific_samples(sample_attribute_file, conventional_tissue_name, attribute_file_tissue_name, indis, tissue_specific_sample_file, covariate_directory_v6)
t1.write(conventional_tissue_name + '\t' + str(tissue_specific_sample_count) + '\n')
t1.close()
#Create a file that contains all individuals across all of the tissues.
all_indi_output_file = pre_process_output_dir + 'all_individuals.txt'
write_all_individuals(all_indi_output_file, indis)
|
[
"bstrober3@gmail.com"
] |
bstrober3@gmail.com
|
76a39b53458953ae5c942270504cbf79b9007c50
|
f70b2ca7e639423ac47097b5a304b31bc8e75ddd
|
/hbmqtt/__init__.py
|
6e4ee646c237735f3848b1d90e87274351a9f105
|
[
"MIT"
] |
permissive
|
gitter-badger/hbmqtt
|
1bfc0d6b6501acad9595084b916ef223e798dd99
|
79d07685663a9342f2096487ef682af700b05047
|
refs/heads/develop
| 2021-01-24T16:02:50.494238
| 2015-06-30T12:31:19
| 2015-06-30T12:31:19
| 38,318,868
| 0
| 0
| null | 2015-06-30T15:58:00
| 2015-06-30T15:57:59
| null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
VERSION = (0, 2, 0, 'alpha', 0)
|
[
"nico@beerfactory.org"
] |
nico@beerfactory.org
|
84b24c633b70c909cbcb33987e50db04839f9c23
|
5a9e78250f7372ef2e19958c06978985a4d21359
|
/BasicApp/views.py
|
000632b9ee2eded3f338221c174d7d12eb60f625
|
[] |
no_license
|
KaptejnSzyma/ClassBasedViews
|
500c8f105965595404de30649cdc20e2f382b697
|
727f6d24af44795fbb70aed0e82232ff41f3ab6d
|
refs/heads/master
| 2020-03-29T13:30:41.687487
| 2018-09-27T16:10:44
| 2018-09-27T16:10:44
| 149,966,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
from django.shortcuts import render
from django.views.generic import (View, TemplateView,
ListView, DetailView,
CreateView, UpdateView,
DeleteView)
from . import models
from django.core.urlresolvers import reverse_lazy
class IndexView(TemplateView):
template_name = 'index.html'
class SchoolListView(ListView):
context_object_name = 'schools'
model = models.School
class SchoolDetailView(DetailView):
context_object_name = 'school_detail'
model = models.School
template_name = 'BasicApp/school_detail.html'
class SchoolCreateView(CreateView):
fields = ('name', 'principal', 'location')
model = models.School
class SchoolUpdateView(UpdateView):
fields = ('name', 'principal')
model = models.School
class SchoolDeleteView(DeleteView):
model = models.School
success_url = reverse_lazy("basic_app:list")
|
[
"szymon.rzadca@gmail.com"
] |
szymon.rzadca@gmail.com
|
e1a376b6b17541bc109543e9c46855bb10132c87
|
b2f3a076bb2640f227340cabc5d641ce39632c73
|
/Week 1/arg.py
|
46532d860469b5c283e7d6cac150626f25c4540e
|
[] |
no_license
|
rosmoke/DCU-YEAR2
|
6694568a48265a7e1d46f20d315753705827d233
|
a99ee6c3f1090838ac60c6bb9252bbc194181535
|
refs/heads/master
| 2021-05-04T06:34:12.791397
| 2016-10-10T12:43:51
| 2016-10-10T12:43:51
| 70,486,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
import sys
d = sys.argv[1]
print(d)
|
[
"danielasofiei@yahoo.ie"
] |
danielasofiei@yahoo.ie
|
51f4e9020500cd42b9cbb0a6524ddeadd2fbbc9e
|
0f8b56ae8147577afe811f4e6f1c0c3f263c7eb2
|
/src/fc_convert.py
|
689e6af4a64c9cea5389efebc5215b805c6b4118
|
[] |
no_license
|
JiangQH/Hypercolumns-Based-Surface-Normal-Recovery
|
ee6ba295e292d8f9ee6660874e56a7c85fa4e987
|
db8a116b64982e902eda292aa1a6a6016fea7f17
|
refs/heads/master
| 2021-06-14T17:45:55.245742
| 2017-01-15T03:18:34
| 2017-01-15T03:18:34
| 55,966,961
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,512
|
py
|
import caffe
# load the vgg net
vgg_16 = caffe.Net('../model/vgg_16/VGG_16_deploy.prototxt',
'../model/vgg_16/VGG_ILSVRC_16_layers.caffemodel',
caffe.TEST)
params = ['fc6', 'fc7', 'fc8']
fc_params = {pr: (vgg_16.params[pr][0].data, vgg_16.params[pr][1].data) for pr in params}
for fc in fc_params:
print '{} weights are {} dimensional and bias are {} dimentional'.format(fc, fc_params[fc][0].shape,
fc_params[fc][1].shape)
#print [(k, v.data.shape) for k, v in vgg_16.blobs.items()]
print [(k, v.data.shape) for k, v in vgg_16.blobs.items()]
# the fully-conv net
vgg_16_full_conv = caffe.Net('../model/vgg_16_full_conv.prototxt',
'../model/vgg_16/VGG_ILSVRC_16_layers.caffemodel',
caffe.TEST)
fc_conv_params = ['fc6-conv', 'fc7-conv', 'fc8-conv']
conv_params = {pr: (vgg_16_full_conv.params[pr][0].data,
vgg_16_full_conv.params[pr][1].data) for pr in fc_conv_params}
for fc in conv_params:
print '{} weights are {} dimensional and bias are {} dimentional'.format(fc, conv_params[fc][0].shape,
conv_params[fc][1].shape)
for pr, pr_conv in zip(params, fc_conv_params):
conv_params[pr_conv][0].flat = fc_params[pr][0].flat
conv_params[pr_conv][1][...] = fc_params[pr][1]
vgg_16_full_conv.save('../model/VGG_16_full_conv.caffemodel')
|
[
"qinhongjiang@zju.edu.cn"
] |
qinhongjiang@zju.edu.cn
|
ab42f22a4e3e9f7ce0b09850827ba7ff92854944
|
b8a77ee310e5a8710706ec940ad50d158c053907
|
/1/1.18.2xinghao.py
|
9d1e51895beb2eb0625e99a374ba01ff6a92d5df
|
[] |
no_license
|
black-star32/cookbook
|
526308f28aa960a5a2b99ea9a32aff1f1b151430
|
5e9f69f35258084224621d23d5826ab04268b547
|
refs/heads/master
| 2020-04-29T20:55:45.593173
| 2019-05-05T03:05:29
| 2019-05-05T03:05:29
| 176,397,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 895
|
py
|
# python中 * 的用法
# 1.表示乘号
# 2.表示倍数,例如:
def T(msg, time=1):
print((msg + ' ') * time)
T('hi', 3)
# 单个 *
# (1)、如:*parameter是用来接受任意多个参数并将其放在一个元组中。
def demo(*p):
print(p)
demo(1, 2, 3)
# (2)、函数在调用多个参数时,在列表、元组、集合、字典及其他可迭代对象作为实参,并在前面加 *
# 如 *(1,2,3)解释器将自动进行解包然后传递给多个单变量参数(参数个数要对应相等)。
def d(a, b, c):
print(a, b, c)
d(1, 2, 3)
a = [1, 2, 3]
b = [1, 2, 3]
c = [1, 2, 3]
d(a, b, c)
d(*a)
# 4、两个 **
# 如: **parameter用于接收类似于关键参数一样赋值的形式的多个实参放入字典中(即把该函数的参数转换为字典)。
def demo(**p):
for i in p.items():
print(i)
demo(x=1, y=2)
|
[
"453431821@qq.com"
] |
453431821@qq.com
|
46ae371434b34ec1f03902283881e48c229e0ae6
|
7be056eb23515c8bd42769fe5d5c365dc16d9a76
|
/Crear_archivo.py
|
8facf7d97cc31cd5ca9e4d4fc0ccf96324482f6e
|
[] |
no_license
|
ZayBit/Python-Crear-archivos-simple-proyecto
|
c31feda203c5dbe4d4dc796157362b077a5c8c6f
|
e571feebb2a80a4f15694b2ed18cf8901f74d298
|
refs/heads/master
| 2020-07-10T10:48:20.026118
| 2019-08-25T04:18:21
| 2019-08-25T04:18:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
# Import de os para limpiar la consola con os.system("cls")
import os
# Funcion de para crear un nuevo archivo
def createFile():
# Nombre del archivo
print ("Nombre del archivo")
nameFile = input()
# Añadir una extension al archivo que se creara
print ("Nombre de la extension (Ej: txt,py,etc)")
extensionType = input()
# Si el nameFile no esta vacio y extensionType igualmente
if nameFile != "" and extensionType != "":
# Nombre del archivo concatenado con la extension del archivo (Ej. hola.txt)
fileName = nameFile + '.' + extensionType
# Abre un archivo para escribir, crea el archivo si no existe
fileCreate = open(fileName,"w+")
print("archivo creado exitosamente")
# Para agregar contenido al archivo creado
print("Desea crear un contenido para el archivo? (Y / N)")
# La entrada que recibira para la siguiente condicion se pasa a minusculas
condition = input().lower()
# Si la tecla es y == si (deseo continuar)
if condition == "y":
# Almacenar lo que recibira la entrada con input() para despues crear una condicion
print("Escribe el contenido (Finaliza el programa con ENTER)")
content = input()
# Si el contenido recibido tiene mas de un caracter
if content != "":
# Abre un archivo para agregar, crea el archivo si no existe
f = open(fileName,"a+")
# Añade el contenido antes escrito al archivo creado
f.write(content)
# Cierre del archivo abierto
f.close()
# Limpiar la consola
os.system("cls")
print("Contenido agregado, Presiona cualquier tecla para cerrar el programa")
input()
else:
print("Presiona cualquier tecla para cerrar el programa")
input()
else:
print("hay un error: Falto el nombre del archivo o la extension")
# Ejecutar la siguiente funcion
createFile()
|
[
"frank_dcoder@hotmail.com"
] |
frank_dcoder@hotmail.com
|
55f5b5e050101220dc734e5ca00dfb61aff7bfc2
|
615e3cdc2c136b2f66b5c553d375823d3580fd08
|
/mundo3/venv/Scripts/pip3.7-script.py
|
5fbeae62b9543b82408d1ca1050366d630a6c596
|
[] |
no_license
|
Android-Ale/PracticePython
|
859a084e224cfb52eed573e38d7d9dc91f405885
|
cab2ac7593deb22e6bb05a95ecd19a8ea2c96b0a
|
refs/heads/master
| 2023-05-06T06:33:36.724569
| 2021-05-15T00:12:06
| 2021-05-15T00:12:06
| 369,307,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
#!C:\Users\Alpha\PycharmProjects\mundo3\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
|
[
"alesson9silva@gmail.com"
] |
alesson9silva@gmail.com
|
fd7ea4054b2620b7833e02dfd04cbf0b396aa44b
|
05a10473091d29db159b3345cfc5bbba52bc7c51
|
/ex9.py
|
79e599473fdd5b24b5ea88052bde104547e3661e
|
[] |
no_license
|
willfu3/Han-Academy-Coding-Class
|
5b523415ae2a8be62075e1f3ea7b6cb2f588b33b
|
c35f271093f1596e8c536587780cf4fee0066a8e
|
refs/heads/master
| 2021-07-09T18:42:19.325588
| 2017-10-08T20:13:19
| 2017-10-08T20:13:19
| 106,207,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 358
|
py
|
import random
number = random.randint(1, 100)
tries = 0;
prevguess = -1;
while True:
stringguess = raw_input("guess: ")
guess = int (stringguess)
if not guess == prevguess:
tries += 1
prevguess = guess
if guess < number:
print("too low")
elif guess > number:
print("too high")
else:
print("correct")
print("tries: " + str(tries))
break
|
[
"ziranfu@Zirans-MacBook-Pro.local"
] |
ziranfu@Zirans-MacBook-Pro.local
|
0801b46b6440f1ad1ff3e824b86706f1c12303a5
|
1d7a2ac35ace0ace728e70a849da7abb4a76f18d
|
/Escalones/prueba_csv.py
|
8e1a1d99c1ac3875158c31e892c56b33db15ff08
|
[] |
no_license
|
Jeisongarcia9713/Control_Robot
|
df7d740e266e86093b03b40c24fc938346c34e7c
|
920a846f3b9d41e388f003b0e4139e2c550d0231
|
refs/heads/master
| 2021-04-15T20:00:31.877773
| 2020-04-23T19:52:11
| 2020-04-23T19:52:11
| 249,295,556
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
import pandas as pd
import csv
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
datos=pd.read_csv('VelocidadAngular4.txt',header=0)
tiempo=datos['t']
VL=datos['i1']/60
VR=datos['i2']/60
motor1=datos['i3']/60
motor2=datos['i4']/60
motor3=datos['i5']/60
motor4=datos['i6']/60
W=datos['i7']
Angulo=datos['i8']
fig, ax = plt.subplots()
ax.plot(tiempo, VR)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',
title='About as simple as it gets, folks')
ax.grid()
plt.hold(True);
ax.plot(tiempo, VL)
ax.plot(tiempo, motor1)
ax.plot(tiempo, motor2)
ax.plot(tiempo, motor3)
ax.plot(tiempo, motor4)
ax.plot(tiempo, W)
ax.legend(['VR','VL','motor1','motor2','motor3','motor4','W'])
plt.show()
print(np.mean(motor1))
print(np.mean(motor2))
print(np.mean(motor3))
print(np.mean(motor4))
print(np.mean(W))
|
[
"jegarciat@correo.udistrital.edu.co"
] |
jegarciat@correo.udistrital.edu.co
|
74c693c36ec36c9029d22123a40817590f16a51c
|
80e5090a2dda10d982c58224e43575942bfe82d8
|
/python/py_exception_assertion/challenge/1/exception_assertion_challenge.py
|
07ed844dd7cb861e13c14b2934cbe522017e0c89
|
[] |
no_license
|
cskamil/PcExParser
|
3c2035f064a0f4e117695c2b848c87a9b4e4421a
|
688638f6c3fa3d31c4f8be6391147d773e1aa9dd
|
refs/heads/master
| 2023-05-01T10:45:33.725819
| 2023-02-16T03:19:09
| 2023-02-16T03:19:09
| 94,392,330
| 1
| 0
| null | 2022-11-16T00:39:30
| 2017-06-15T02:42:19
|
Java
|
UTF-8
|
Python
| false
| false
| 2,924
|
py
|
'''
@goalDescription(Construct a program that takes in a decimal larger than 1.0, and prints whether it is divisible by 0.5. There should be try-except blocks and assertion statements to handle the situation where the entry is not a valid decimal, or it is not greater than 1.0.)
@name(Raising Exceptions With Assertions)
@distractor{code(assert int(entry) > 1, 'Incorrect entry. Entry is not greater than 1.0.'), helpDescription()}
@distractor{code(assert int(entry) >= 1, 'Incorrect entry. Entry is not greater than 1.0.'), helpDescription()}
'''
# Step 1: Define the entry used by the program
entry = input('Please enter a decimal larger than 1.0: ')
# Step 2: Define the program
'''@helpDescription(The try blocks tries to execute the code within it. The statements in the try block may throw several types of exceptions.)'''
try:
'''@helpDescription("assert" statement assumes that the condition is True. If it is not, an AssertionError is thrown with the customizable error statement that follow the comma. If the AssertionError is not handled, it is automatically raised to the outer loop to be handled.)'''
'''@helpDescription(In this statement, we assume that a decimal point "." is in the entry. If it is, An AssertionError is thrown that displays the error "Incorrect entry. Please enter a decimal.")'''
assert '.' in entry, 'Incorrect entry. Please enter a decimal.'
'''@blank(We assume the entry is a float that is greater than 1.0. If it is not, an AssertionError with the line "Incorrect entry. Number is not larger than 1.0" is thrown.)'''
assert float(entry) > 1, 'Incorrect entry. Entry is not greater than 1.0.'
'''@helpDescription(If the entry is not a valid number resulting in a ValueError being thrown, this "except" block will catch it.)'''
except ValueError:
'''@helpDescription(If a ValueError is thrown, this line is printed.)'''
print('Incorrect entry. Please enter a valid decimal number.')
'''@helpDescription(If an AssertionError is thrown by an assertion statement in the "try" block, it will be intercepted here. The custom AssertionError will be saved as the user-defined variable "e".)'''
except AssertionError as error:
'''@helpDescription(The AssertionError saved as variable "error" will be printed here.)'''
print(error)
'''@helpDescription("Else" block executes if there are no errors. In this case, the entry must be a decimal greater than 1.0.)'''
else:
'''@helpDescription(If the entry is valid, the if-else block checks if the entry is divisible by 0.5. If entry modulo 0.5 equals to 0, the number is divisible by 0.5.)'''
if float(entry) % 0.5 == 0:
print('Correct entry. The number is divisible by 0.5.')
'''@helpDescription(If the entry is valid and the entry modulo 0.5 is not 0, then the number is not divisible by 0.5.)'''
else:
print('Correct entry. The number is not divisible by 0.5.')
|
[
"cskamil@gmail.com"
] |
cskamil@gmail.com
|
b7d77c8289d285f822abfe65c4847f4f4158ea64
|
670c844e5cfa1cdf11212cc53972ecd8f7a25949
|
/python/test/test_OneThreeTwoPattern.py
|
cda75bb9a052a48f244aadd31e9bb0165a6d143e
|
[] |
no_license
|
IamConstantine/LeetCodeFiddle
|
74d580a0741e40397f1283beadd023f9b9323abd
|
5ec509505a394d53517fb005bbeb36745f06596a
|
refs/heads/master
| 2022-05-31T05:25:14.273605
| 2022-05-23T02:46:47
| 2022-05-23T02:46:47
| 100,910,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from unittest import TestCase
from OneThreeTwoPattern import find132pattern
class Test(TestCase):
def test_find132pattern(self):
self.assertEqual(False, find132pattern([1, 2, 3, 4]))
self.assertEqual(True, find132pattern([3, 1, 4, 2]))
|
[
"vishalskumar12@gmail.com"
] |
vishalskumar12@gmail.com
|
6b7abd96ecbb7cfa29b2fcac04282fd635e11d40
|
09d79c3509252cfccac35bb28de9a0379094823a
|
/alx/manage.py
|
e8d4eb94d3bf60dfc0acba8fd7b49a23dc8ee099
|
[] |
no_license
|
marianwitkowski/python2311
|
73ad491016cd6d0010d0203db43aca2c6debe0ad
|
9bbeca3fb6d8658a1321ab099ff2102cd7de76e0
|
refs/heads/master
| 2023-01-22T13:13:56.695680
| 2020-12-02T14:58:15
| 2020-12-02T14:58:15
| 315,350,865
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 659
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'alx.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"marian.witkowski@gmail.com"
] |
marian.witkowski@gmail.com
|
f4d5d09bdb229613906bbbbc629c5eada724940f
|
ee824fa57eafa8cf54320203da22e70952acf395
|
/estoque/urls.py
|
a8fa56f51d4d5083214e158823c70b43c089ea5c
|
[] |
no_license
|
luizrenatolr/CadastroCompraProdutoDjango
|
a9e7cbb0b316bad6e50008edbaf2302f5706d2ad
|
792dc05836eeb732502a18a7441e3a6c66899772
|
refs/heads/master
| 2020-03-22T13:32:08.218353
| 2018-07-07T20:41:03
| 2018-07-07T20:41:03
| 140,114,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('login/', views.login_view, name='login'),
path('lista-compra/', views.listar_compras, name='lista_compra'),
path('realizar-compra/', views.realizar_compra, name='realizar_compra'),
path('logout/', views.logout_view, name='logout'),
path('cadastrar-produto/', views.cadastrar_produto, name='cadastrar_produto'),
path('', views.inicio, name='inicio')
]
|
[
""
] | |
101e407c7acc1dbde95edb4fb5860bcf6001cbf0
|
a508ffe0942f75721d4623fcda9e57808f93f07d
|
/7a/main.py
|
580734f6da382e661e5d68fb833a9142dc259c32
|
[] |
no_license
|
ag8/magic
|
3a14a81f3c06fa67cd77de07045ee3dc3899ca7f
|
2768fc7490e6cc55b522be68926ad24d3caa939c
|
refs/heads/master
| 2021-01-22T06:49:29.561849
| 2017-10-30T23:34:57
| 2017-10-30T23:34:57
| 102,300,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,170
|
py
|
import sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import overlap_input
from constants import FLAGS
from vae import VariationalAutoencoder
# Load MNIST data in a format suited for tensorflow.
# The script input_data is available under this URL:
# https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/examples/tutorials/mnist/input_data.py
# import input_data
# mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# n_samples = mnist.train.num_examples
# Get input data
images_batch, labels_batch = overlap_input.inputs(normalize=True, reshape=True)
n_samples = FLAGS.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
class TrainingException(Exception):
pass
def train(network_architecture, sess, learning_rate=0.001,
batch_size=FLAGS.BATCH_SIZE, training_epochs=10, display_step=5):
vae = VariationalAutoencoder(network_architecture, sess=sess,
transfer_fct=tf.nn.softplus, # FIXME: Fix numerical issues instead of just using tanh
learning_rate=learning_rate,
batch_size=batch_size)
try:
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
# batch_xs, _ = mnist.train.next_batch(batch_size)
batch_xs = images_batch.eval()
# Fit training using batch data
cost = vae.partial_fit(batch_xs)
if np.isnan(cost):
raise TrainingException("Got cost=nan")
print("Epoch: (" + str(epoch) + "/" + str(training_epochs) + "); i: (" + str(i) + "/" + str(
total_batch) + "). Current cost: " + str(cost) + "")
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch + 1),
"cost=", "{:.9f}".format(avg_cost))
except KeyboardInterrupt:
pass
return vae
with tf.Session() as sess:
# Define the network architecture
network_architecture = \
dict(num_neurons_recognition_layer_1=500, # 1st layer encoder neurons
num_neurons_recognition_layer_2=500, # 2nd layer encoder neurons
num_neurons_generator_layer_1=500, # 1st layer decoder neurons
num_neurons_generator_layer_2=500, # 2nd layer decoder neurons
num_input_neurons=FLAGS.IMAGE_SIZE * FLAGS.IMAGE_SIZE * 2, # MNIST data input (img shape: 28*28)
n_z=2) # dimensionality of latent space
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
try:
# Train the autoencoder for 75 epochs
vae = train(network_architecture, sess=sess, training_epochs=75)
# Initialize all variables
# sess.run(tf.global_variables_initializer())
print("Reconstructing test input..."),
# Display the input reconstruction
x_sample = images_batch.eval()
x_sample = np.reshape(x_sample, newshape=[-1, FLAGS.IMAGE_SIZE * FLAGS.IMAGE_SIZE * 2])
x_reconstruct = vae.reconstruct(x_sample)
plt.figure(figsize=(8, 12))
for i in range(5):
# print("x_sample[i] shape: "),
# print(np.shape(x_sample[i]))
# print("")
# print("x_reconstruct[i] shape: ")
# print(np.shape(x_reconstruct[i]))
# print("")
plt.subplot(5, 2, 2 * i + 1)
plt.imshow(x_sample[i].reshape(200, 200, 2)[:, :, 0], vmin=0, vmax=1, cmap="gray")
plt.title("Test input")
plt.colorbar()
plt.subplot(5, 2, 2 * i + 2)
plt.imshow(x_reconstruct[i].reshape(200, 200, 2)[:, :, 0], vmin=0, vmax=1, cmap="gray")
plt.title("Reconstruction")
plt.colorbar()
plt.tight_layout()
plt.savefig('foo.png')
print("Done!")
print("Sampling 2d latent space..."),
nx = ny = 20
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
canvas = np.empty((200 * ny, 200 * nx))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
z_mu = np.array([[xi, yi]] * vae.batch_size)
x_mean = vae.generate(z_mu)
canvas[(nx - i - 1) * 200:(nx - i) * 200, j * 200:(j + 1) * 200] = x_mean[0].reshape(200, 200, 2)[:, :,
0]
plt.figure(figsize=(8, 10))
Xi, Yi = np.meshgrid(x_values, y_values)
plt.imshow(canvas, origin="upper", cmap="gray")
plt.tight_layout()
plt.savefig('latent_space_2d_sampling.png')
print("Done!")
except KeyboardInterrupt:
print("Good-by!")
sys.exit(0)
|
[
"andrew2000g@gmail.com"
] |
andrew2000g@gmail.com
|
de708aaa979599b2d6db0a5f4f2aae32ec9ee164
|
f21ce1669b00d80e8d064363342bafe6cc2bca71
|
/personal_website/authuser/models.py
|
3351f461005ff3e61aa8a39ff00b51dc4986ff0a
|
[] |
no_license
|
sandipan898/personal-website
|
760a87b42373c0098d67dd3bedb96bac16147e38
|
62ae9dc2be63f9b7d4297596dcffa329e2d9b961
|
refs/heads/main
| 2023-06-30T03:03:42.374597
| 2021-07-31T21:31:41
| 2021-07-31T21:31:41
| 328,332,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, blank=True)
bio = models.CharField(max_length=2000, null=True, blank=True)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return self.user.username
@property
def imageURL(self):
try:
url = self.image.url
except:
url = ''
return url
@receiver(post_save, sender=User)
def update_userprofile_signal(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
instance.userprofile.save()
|
[
"sandipan.das898@gmail.com"
] |
sandipan.das898@gmail.com
|
5c31db258bef00591c87b00f77fd2d40fa916811
|
54c8a1c0cb93794cfc1afe1937f50b81345bec0f
|
/home_page.py
|
a3ac5319ac75c66cc7ded60d225d78d55bf8f396
|
[] |
no_license
|
lalitv19/Stock-Prediction-Analysis
|
5ed2e9200a331b9b5d66b85532b5970fea3b5e63
|
a79bc9526468f7c2fdb8b2ac06874da19af06338
|
refs/heads/master
| 2022-04-25T09:24:16.493313
| 2020-04-26T22:03:11
| 2020-04-26T22:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,282
|
py
|
#Please import all the packages by pip install
import pandas_datareader as web
import pandas as pd
import numpy as np
import datetime as dt
from datetime import timedelta
import calendar
import pandas_market_calendars as mcal
import descriptive_analytics as da
import predictive_analytics as pa
import candlestick_timeseries as ct
import warnings
from PIL import Image
#For ignoring the warnings
warnings.filterwarnings("ignore")
def getCompanyDetails(companyTicker):
#Checking if entered ticker is registered on NASDAQ
all_symbols= web.get_nasdaq_symbols()
company_details = pd.read_csv("http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchan0ge=nasdaq&render=download", usecols = [*range(0,7)])
symbols = list(all_symbols.loc[:, "NASDAQ Symbol"])
# Input from the user for Ticker symbol
if companyTicker in symbols:
company_details = company_details.loc[company_details['Symbol'] == companyTicker]
company_details.index = range(len(company_details))
symbol_details = all_symbols.loc[all_symbols['NASDAQ Symbol'] == companyTicker]
symbol_details.index = range(len(symbol_details))
company_name = company_details["Name"][0]
print("*" *100)
print("\nGeneral Information about the Company\n ")
print("Company Name : ",company_details["Name"][0]) # Access elements such as Name, Market cap, etc from the csv for that company
print("Last Sale Information : ",company_details["LastSale"][0])
print("Market Cap : ",company_details["MarketCap"][0])
print("IPO years : ",company_details["IPOyear"][0])
print("Sector of the company : ",company_details["Sector"][0])
print("Industry the company belong to : ",company_details["industry"][0])
print("NextShares Information : ",symbol_details["NextShares"][0])
print("*" *100)
return companyTicker,company_name
else:
print("Invalid Ticker Symbol.Please Re-enter Valid ticker symbol")
main()
#Default mode that is 52 weeks(1 Year)
def default_mode(today_date,companyTicker,company_name):
startDate = today_date- timedelta(days=364)
endDate = today_date
getdata(startDate,endDate,companyTicker,company_name)
#Custom mode that is user can enter any range of dates
def custom_mode(companyTicker,company_name):
dateValidation(companyTicker,company_name)
#Checks the date validation and convert from string to datetime format and pass to date range function
def dateValidation(companyTicker,company_name):
date_entry = input('Enter a start date in YYYY-MM-DD format:')
isValidStartDate = validate_date_format(date_entry)
if(isValidStartDate):
year, month, day = map(int, date_entry.split('-'))
date1 = dt.datetime(year, month, day)
isValidStartDateRange = validate_date_range(date1)
if(isValidStartDateRange):
endDateValidation(date1,companyTicker,company_name)
else:
dateValidation(companyTicker,company_name)
else:
dateValidation(companyTicker,company_name)
#It only checks user inputed end dates and validates them
def endDateValidation(startDate,companyTicker,company_name):
date_entry = input('Enter a end date in YYYY-MM-DD format:')
isValidEndDate = validate_date_format(date_entry)
year, month, day = map(int, date_entry.split('-'))
endDate = dt.datetime(year, month, day)
isValidEndDateRange = validate_date_range(endDate)
if(isValidEndDate):
year, month, day = map(int, date_entry.split('-'))
endDate = dt.datetime(year, month, day)
isValidEndDateRange = validate_date_range(endDate)
if(isValidEndDateRange):
if(startDate <= endDate):
validateWorkingDays(startDate,endDate,companyTicker,company_name)
else:
print("Seems the start Date is greater than end Date ..!! Please enter end date past start Date")
endDateValidation(startDate,companyTicker,company_name)
else:
endDateValidation(startDate,companyTicker,company_name)
else:
endDateValidation(startDate,companyTicker,company_name)
#It checks the format of the function which should be year-month-date
def validate_date_format(date_string):
try:
if(dt.datetime.strptime(date_string, '%Y-%m-%d')):
return True
else:
return False
except ValueError:
print("***Incorrect date format***\n") # Catch int() exception
#It checks if the date range is a valid range
def validate_date_range(date_string):
today_date = dt.datetime.now()
#formatted_today_date = ('%s-%s-%s' % (today_date.year, today_date.month, today_date.day))
try:
if date_string <= today_date:
return True
else:
print("Date entered seems to be future Date which is not a valid use case\n")
return False
except TypeError:
print("***Date entered seems to be future Date which is not a valid use case***\n")
#It checks for valid working days and skips saturdays, sundays and holiday dates
def validateWorkingDays(startDate, endDate,companyTicker,company_name):
nyse = mcal.get_calendar('NYSE')
isValidWorkingDays = False
isvaliddate = nyse.valid_days(startDate, endDate)
if ((abs(endDate-startDate).days)) <= 1:
start_Day = calendar.day_name[startDate.weekday()]
end_Day = calendar.day_name[endDate.weekday()]
if( (start_Day == "Saturday" and end_Day=="Sunday") or (start_Day == "Saturday" and end_Day=="Saturday") or (start_Day == "Sunday" and end_Day=="Sunday")):
print("The Day of end Date:", end_Day)
print("The Day of start Date:", start_Day)
isValidWorkingDays = False
elif((pd.Timestamp(startDate) not in isvaliddate) and (pd.Timestamp(endDate) not in isvaliddate)):
isValidWorkingDays = False
else:
isValidWorkingDays = True
else:
isValidWorkingDays = True
if(isValidWorkingDays):
getdata(startDate,endDate,companyTicker,company_name)
#This function will fetch data from the website and prints it
def getdata(start_date,end_date,company,company_name):
dataset= web.DataReader(company,'yahoo',start_date,end_date)
print(f"The stock value for first 5 days {company_name} is : \n",dataset.head())
print(f"The stock value for last 5 days {company_name} is : \n",dataset.tail())
close = dataset['Close']
print("*" *100)
print(f"\nMaximum Close Price for {company_name} : ",np.max(close)) # Access elements such as Name, Market cap, etc from the csv for that company
print(f"\nMinimum Close Price for {company_name} : ",np.min(close))
print("*" *100)
mainmenu(dataset,start_date,end_date,company_name,company)
#The function where Descriptive, Predective and Visulisation, also this menu has a option to go back to main menu so the user **
#** can traverse between menus fro performing any kind of operations
def mainmenu(dataset,start_date,end_date,company_name,company):
print("*" *100)
print("\nWelcome to Stock Market Analysis")
print(f"1) Stock Analysis for {company_name}")
print(f"2) Stock Prediction for {company_name}")
print(f"3) Visualisation for {company_name}")
print("4) Home Menu to Change the Mode")
print("*" *100)
question = input("\nFrom the given options select any?\n")
try:
if question == "1":
da.descriptive_mode(dataset,start_date,end_date,company_name,company)
elif question == "2":
pa.predictive_mode(dataset,start_date,end_date,company_name,company)
elif question == "3":
ct.plotData(dataset,start_date,end_date,company_name,company)
mainmenu(dataset,start_date,end_date,company_name,company)
elif question == "4":
menu(company,company_name)
else:
print("Invalid Option. Please Re-enter the option you would like to choose. \n")
print("\n")
mainmenu(dataset,start_date,end_date,company_name,company)
except ValueError:
print("***Invalid Option. Please Re-enter the option you would like to choose.***\n")
mainmenu(dataset,start_date,end_date,company_name,company)
#Enter ticker symbol for which you want the data to be printed or analyzed
def main():
tickerSymbol = input("Please enter Company Ticker: ").upper()
companyTicker, company_name = getCompanyDetails(tickerSymbol)
menu(companyTicker, company_name)
#Menu for default mode or custom mode and user can only exit the code from this menu if code does not break anywhere
def menu(companyTicker,company_name):
print("*" *100)
print("\t\tWe do have more functionalities to explore.")
print("\nChoose from the below option")
print("1. Analysis for 1 year(Default mode)\n2. Variable Date Range(Custom mode)\n3. Check for another Company\n4. Exit")
print("*" *100)
question = input("\nFrom the given options select any?\n")
try:
if question == "1":
print("Welcome to 1 year Analysis(Default mode)\n")
today_date = dt.datetime.now()
default_mode(today_date,companyTicker,company_name)
elif question == "2":
print("Welcome to Custom Mode(Variable Date Range)\n")
custom_mode(companyTicker,company_name)
elif question == "3":
main()
elif question == "4":
exit()
else:
print("Invalid Option. Please Re-enter the option you would like to choose. \n")
menu(companyTicker,company_name)
except ValueError:
print("***Invalid Option. Please Re-enter the option you would like to choose.***\n")
menu(companyTicker,company_name)
#Beginning of the code
if __name__ == "__main__":
print("\n"+"*" *100)
print("\t\tWelcome to The Data Whisperers Stock Market Analysis Project")
print("*" *100)
option = input("Do you want the flow chart for the entire project please Press 1 or press anything to Continue with the normal flow\n")
try:
if option == "1":
img = Image.open('Flow_Chart.jpg')
img.show()
except ValueError:
print("Please enter correct choice Y/N")
main()
|
[
"noreply@github.com"
] |
lalitv19.noreply@github.com
|
9e21a9283b6e462755a39d4174705ef6d4380d1b
|
2827d7a837eb29c3cb07793ab6d3d5a753e18669
|
/alipay/aop/api/request/AlipayCommerceLotteryPresentlistQueryRequest.py
|
3ed44f8e695e01f038dcf08768a6e8f476ae06b2
|
[
"Apache-2.0"
] |
permissive
|
shaobenbin/alipay-sdk-python
|
22e809b8f5096bec57d2bb25414f64bdc87fa8b3
|
5232ad74dff2e8a6e0e7646ab3318feefa07a37d
|
refs/heads/master
| 2020-03-21T04:51:39.935692
| 2018-06-21T07:03:31
| 2018-06-21T07:03:31
| 138,131,022
| 0
| 0
| null | 2018-06-21T06:50:24
| 2018-06-21T06:50:24
| null |
UTF-8
|
Python
| false
| false
| 4,021
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceLotteryPresentlistQueryModel import AlipayCommerceLotteryPresentlistQueryModel
class AlipayCommerceLotteryPresentlistQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceLotteryPresentlistQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceLotteryPresentlistQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._notify_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.lottery.presentlist.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
31f76246510b89268fc4b16de3d456aac7978a02
|
3fd82399f9498d9dc03aa3bb76c4526754da5c54
|
/ijik/monitor.py
|
df0ba8a7b9e9ad3e5155bd7fe1f6bdfa176f773b
|
[] |
no_license
|
teknologiakerho/ijik2
|
ac722190d569700776913920a6fd051836ac6cb9
|
8f22b544fb86c5661c31c7c93b6dd6c73e0f8b32
|
refs/heads/master
| 2023-03-04T00:43:13.809848
| 2021-02-01T03:59:07
| 2021-02-01T03:59:07
| 330,416,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,527
|
py
|
import collections
import csv
import functools
import html
import io
import re
import fastapi
import ijik
class Hooks:
@ijik.hookspec
def ijik_monitor_setup(monitor, router):
pass
class Monitor:
table_template = "monitor/table.html"
main_template = "monitor/index.html"
field_template = "monitor/field.html"
def __init__(self, *, pluginmanager, templates, get_view):
self.pluginmanager = pluginmanager
self.table_template = templates.get_template(self.table_template)
self.main_template = templates.get_template(self.main_template)
self.field_template = templates.get_template(self.field_template)
self.get_view = get_view
self.fields = {}
def view(self, db, key):
widgets = self.get_view(key, db)
if widgets is None:
return None
view = View(self)
for widget in widgets:
widget(view, monitor=self)
return view
def setup(self, router):
self.pluginmanager.hook.ijik_monitor_setup(monitor=self, router=router)
def add_field(self, cls, name=None, prio=0, **opt):
if name is None:
name = next(opt[x] for x in ("plaintext", "html", "json")).__name__
field = Field(name=name, prio=prio, **opt)
if cls in self.fields:
self.fields[cls].append(field)
self.fields[cls].sort(key=lambda c: (c.prio, c.name))
else:
self.fields[cls] = [field]
return field
def field(self, *args, loc="plaintext", **kwargs):
def deco(f):
if kwargs.get("html") == "auto":
kwargs["html"] = self._auto_html(f)
return self.add_field(*args, **kwargs, **{loc: f})
return deco
def get_fields(self, cls):
try:
return self.fields[cls]
except KeyError:
return ()
def render_view(self, **context):
return self.main_template.render(**context)
def render_table(self, **context):
return self.table_template.render(**context)
def _auto_html(self, f):
def render_html(entity):
return self.field_template.render({"value": f(entity)})
return render_html
class View:
def __init__(self, monitor):
self.monitor = monitor
self.widgets = []
self.downloads = {}
def add_download(self, id, dl):
self.downloads[id] = dl
def get_download(self, id):
try:
dl = self.downloads[id]
except KeyError:
return None
return dl()
def add_widget(self, render, navi=None):
self.widgets.append((render, navi))
def render(self, **context):
return self.monitor.render_view(
**context,
widgets = (w(**context) for w,_ in self.widgets),
navis = (n for _,n in self.widgets if n is not None)
)
class Table:
def __init__(self, title, get_entities, id=None, columns=None):
self.title = title
self.get_entities = get_entities
self.columns = columns
if id:
self.id = id
def __call__(self, view, monitor):
self.monitor = monitor
view.add_widget(self.render, navi=(self.id, self.title))
view.add_download(f"{self.id}.csv", self.download)
def render(self, **context):
return self.monitor.render_table(**context, table=self)
def download(self):
return "text/csv", self.data.plaintext.csv
@functools.cached_property
def id(self):
return re.sub(r'[^\w-]+', "-", self.title).strip("-").lower()
@functools.cached_property
def entities(self):
return self.get_entities()
@functools.cached_property
def data(self):
colprio = collections.defaultdict(lambda: float("inf"))
values = []
for e in self.entities:
vals = {}
for field in self.monitor.get_fields(e.__class__.__name__):
colprio[field.name] = min(field.prio, colprio[field.name])
vals[field.name] = field(e)
values.append(vals)
if self.columns:
columns = [c for c in self.columns if c in colprio]
else:
columns = sorted(colprio, key=lambda name: colprio[name])
return TableData(columns, values)
class TableData:
def __init__(self, columns, values):
self.columns = columns
self.values = values
@property
def width(self):
return len(self.columns)
@property
def height(self):
return len(self.values)
@property
def rows(self):
for v in self.values:
yield (v.get(c, Value.empty) for c in self.columns)
@functools.cached_property
def csv(self):
out = io.StringIO()
writer = csv.writer(out, delimiter=';')
writer.writerow(self.columns)
writer.writerows((c.plaintext for c in r) for r in self.rows)
return out.getvalue()
@functools.cached_property
def plaintext(self):
return self._filter_attr("plaintext")
@functools.cached_property
def html(self):
return self._filter_attr("html")
@functools.cached_property
def json(self):
return self._filter_attr("json")
def _filter_attr(self, attr):
# select only columns which have at least one row with `attr`
columns = set()
for v in self.values:
columns.update(c for c,x in v.items() if getattr(x, attr) is not None)
# have all columns
if len(columns) == len(self.columns):
return self
return TableData([c for c in self.columns if c in columns], self.values)
class Field:
def __init__(self, name, prio, *, plaintext=None, html=None, json=None):
self.name = name
self.prio = prio
self._plaintext = plaintext
self._html = html
self._json = json
def plaintext(self, f):
self._plaintext = f
return self
def html(self, f):
self._html = f
return self
def json(self, f):
self._json = f
return self
def __call__(self, entity):
return Value(self, entity)
class Value:
def __init__(self, field, entity):
self.field = field
self.entity = entity
@functools.cached_property
def plaintext(self):
if self.field._plaintext:
return self.field._plaintext(self.entity)
@functools.cached_property
def html(self):
if self.field._html:
return self.field._html(self.entity)
if self.field._html is False:
return
if self.plaintext is not None:
return html.escape(str(self.plaintext))
@functools.cached_property
def json(self):
if self.field._json:
return self.field._json(self.entity)
if self.field._json is False:
return
return self.plaintext
class empty:
plaintext = ""
html = ""
json = None
class KeyRegistry:
def __init__(self):
self.views = {}
def __call__(self, key, *args, **kwargs):
try:
view = self.views[key]
except KeyError:
return None
return view(*args, **kwargs)
def add_view(self, key, view):
self.views[key] = view
return view
def view(self, key):
def deco(view):
return self.add_view(key, view)
return deco
|
[
"tl@cat.pm"
] |
tl@cat.pm
|
73cee0f13c58dcf564d63b0a94b91ed9059b482b
|
75262e26d53201a2f142fa333e96f6325289843c
|
/mattstuff/modomics_csv_tool.py
|
f651e404e8ea0dafb81c1cfa7293bdf814807a22
|
[] |
no_license
|
ModSquad2020/SD_Matt
|
4ec705aa41c2250d021fc624a66ac3cd6d1e25c0
|
56da68fcbd095c3dbf5db3d94d22abf94cfe6e0b
|
refs/heads/main
| 2023-03-14T01:06:46.617478
| 2021-03-03T04:16:57
| 2021-03-03T04:16:57
| 305,460,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
'''
Looks like we want the 5th column and the format wack as heck.
I downloaded E.Coli tRNAs, visualized as Modomics Symbols, and
it looks like they have a pattern of
name-of mod[show modidication pathway]\');return false">
in front of every modification.Potentail use for regular expressions.
lowercase|number (for unknown amount of space)[show modification pathway]\');return true|false">
'''
|
[
"matt.kozu@gmail.com"
] |
matt.kozu@gmail.com
|
553186729974dfaff8398beaca45f2845bf8da60
|
c234b8c3bfe8cca26a61118a46acbd6dffdef837
|
/python assignment11/Module4/Question-06/flatter_list.py
|
72c8294ddad39bb5f88426e1b3798a64567f47fb
|
[] |
no_license
|
SejalChourasia/python-assignment
|
6dbd702e340518b0aac094fdee9c3fecfdcb1e48
|
702fe6b3cba740cd00dbe7b1c78bb9992e77a0d7
|
refs/heads/master
| 2020-12-15T11:04:37.367463
| 2020-01-25T15:49:48
| 2020-01-25T15:49:48
| 235,083,900
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
l=[[int(i) for i in range(10)]for j in range(10)]
print('Unflattened list',l)
flatten=[i for sublist in l for i in sublist if i<5]
'''
for sublist in l:
for i in sublist:
flatten.append(i)
'''
print('flattened list with less than 5 ',flatten)
|
[
"noreply@github.com"
] |
SejalChourasia.noreply@github.com
|
6d8afbb844d2e9fdb38b4fce51cb1183de14c6ab
|
3f4f2bb867bf46818802c87f2f321a593f68aa90
|
/smile/bin/activate-global-python-argcomplete
|
21a6f61e1ff6b53be2b7b307e4aa76a46c62d189
|
[] |
no_license
|
bopopescu/Dentist
|
56f5d3af4dc7464544fbfc73773c7f21a825212d
|
0122a91c1f0d3d9da125234a8758dea802cd38f0
|
refs/heads/master
| 2022-11-23T12:42:23.434740
| 2016-09-19T15:42:36
| 2016-09-19T15:42:36
| 282,608,405
| 0
| 0
| null | 2020-07-26T08:30:16
| 2020-07-26T08:30:16
| null |
UTF-8
|
Python
| false
| false
| 3,256
|
#!/SHARED-THINGS/ONGOING/We.smile/smile/bin/python
# PYTHON_ARGCOMPLETE_OK
# Copyright 2012-2013, Andrey Kislyuk and argcomplete contributors.
# Licensed under the Apache License. See https://github.com/kislyuk/argcomplete for more info.
'''
Activate the generic bash-completion script for the argcomplete module.
'''
import os, sys, argparse, argcomplete, shutil, fileinput
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
dest_opt = parser.add_argument("--dest", help="Specify the bash completion modules directory to install into", default="/etc/bash_completion.d")
parser.add_argument("--user", help="Install into user directory (~/.bash_completion.d/)", action='store_true')
parser.add_argument("--no-defaults", dest="use_defaults", action="store_false", default=True,
help="When no matches are generated, do not fallback to readline\'s default completion")
parser.add_argument("--complete-arguments", nargs=argparse.REMAINDER,
help="arguments to call complete with; use of this option discards default options")
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.user:
args.dest = os.path.expanduser("~/.bash_completion.d/")
if not os.path.exists(args.dest):
try:
os.mkdir(args.dest)
except Exception as e:
parser.error("Path {d} does not exist and could not be created: {e}".format(d=args.dest, e=e))
elif not os.path.exists(args.dest) and args.dest != '-':
parser.error("Path {d} does not exist".format(d=args.dest))
activator = os.path.join(os.path.dirname(argcomplete.__file__), 'bash_completion.d', 'python-argcomplete.sh')
if args.complete_arguments is None:
complete_options = '-o nospace -o default -o bashdefault' if args.use_defaults else '-o nospace -o bashdefault'
else:
complete_options = " ".join(args.complete_arguments)
complete_call = "complete{} -D -F _python_argcomplete_global".format(" " + complete_options if complete_options else "")
def replaceCompleteCall(line):
if line.startswith("complete") and "_python_argcomplete_global" in line:
return complete_call+('\n' if line.endswith('\n') else '')
else:
return line
if args.dest == '-':
for l in open(activator):
sys.stdout.write(replaceCompleteCall(l))
else:
dest = os.path.join(args.dest, "python-argcomplete.sh")
sys.stdout.write("Installing bash completion script " + dest)
if not args.use_defaults:
sys.stdout.write(" without -o default")
elif args.complete_arguments:
sys.stdout.write(" with options: " + complete_options)
sys.stdout.write("\n")
try:
shutil.copy(activator, dest)
if not args.complete_arguments is None or not args.use_defaults:
for l in fileinput.input(dest, inplace=True):
# fileinput with inplace=True redirects stdout to the edited file
sys.stdout.write(replaceCompleteCall(l))
except Exception as e:
err = str(e)
if args.dest == dest_opt.default:
err += "\nPlease try --user to install into a user directory, or --dest to specify the bash completion modules directory"
parser.error(err)
|
[
"jamaalaraheem@gmail.com"
] |
jamaalaraheem@gmail.com
|
|
e39b536d7f60bf324bc7edab967e0ce81795eeaa
|
aee61ceed0cffd9aa8048ae1ae65277c91c078a8
|
/create_ec2.py
|
f7b5ec159c8a2d44edb0a56037a2bb7b7d206c3d
|
[] |
no_license
|
narayanareddy641/aws_programs
|
fd87565e9a78223d9d33e5b6a742cf6309ce4170
|
216caf736574074316bca9e17ce82ac615b6b3e9
|
refs/heads/master
| 2021-01-05T19:15:57.921442
| 2020-02-17T14:19:22
| 2020-02-17T14:19:22
| 241,112,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
import boto3
ec2 = boto3.resource('ec2')
# create a new EC2 instance
instances = ec2.create_instances(
ImageId='ami-09a4a9ce71ff3f20b',
MinCount=1,
MaxCount=1,
InstanceType='t2.micro',
KeyName='reddy-key'
)
print(instances)
|
[
"noreply@github.com"
] |
narayanareddy641.noreply@github.com
|
dcad98b8060b86ce63c094f703ef2726a6debf3a
|
c0b310a0b8037f92ac431cd694168ea847f8fe09
|
/fetchingNewStores.py
|
f6ce685f21be002659817cc2438c6fd8be1ccead
|
[] |
no_license
|
lndhub/lightningnetworkstores.github.io
|
192321b81b32374c170f102882e5afd396450c61
|
b7e2af9453e6d3511a464eadceace6c096ba47d8
|
refs/heads/master
| 2020-03-23T03:33:58.541427
| 2018-07-15T08:21:01
| 2018-07-15T08:21:01
| 141,037,097
| 1
| 1
| null | 2018-07-15T15:10:35
| 2018-07-15T15:10:35
| null |
UTF-8
|
Python
| false
| false
| 3,877
|
py
|
# this script looks at the names of the nodes, detects those that have a name for a website and tries to open the website.
# if a positive response is obtained, this might be a new store :)
import requests
import re
# needs to be improved with https://www.robtex.com/lightning/node/?sort=-discovered
nodes = requests.get('https://shabang.io/nodes.json')
if nodes.status_code != 200:
exit()
nodes = nodes.json()
nodes = nodes['nodes']
nodenames = [x['alias'] for x in nodes if 'alias' in x.keys()]
pattern = '[a-zA-Z]{2,}\.{1}[a-zA-Z]{2,3}'
selection = [x for x in nodenames if re.match(pattern,x)!=None]
selection = [x.replace('?',"") for x in selection]
selection = [x.replace(' ',"") for x in selection]
old = ['fdisk.ln.node', 'btc.lnetwork.tokyo', 'CryptoAdvisoryGroup.io', 'opennode.co', 'Ionic.Release', 'ozupek.com.tr', 'ZAP.COOL', 'forkliner.com', 'mn.lnd.lightning.rip', 'store.edberg.eu', 'LND.rompert.com', 'QR.CR', 'martijnbolt.com', 'BX.in.th', 'resharesat.com', 'mainnet.yalls.org', 'elec.luggs.co', 'BitStarz.com', 'ln.mempool.co', 'Billfodl.com', 'cryptonoobs.club', 'backup.bbt.tc', 'lncast.com', 'CoinMall.com', 'lnhub.us', 'CL.rompert.com', 'cybergeld.info', 'embedded.cash', 'LNTURKEY.NET', 'BitBargain.co.uk', 'Ionic.Release', 'lightning.exposed', 'lightningpay.me', 'ln.inazuma.cc', 'Stadicus.com', 'ln.bbt.tc', 'BitBargain.co.uk', 'distributed.love', 'the.lightning.land', 'elec.luggs.co', 'FOURLEAF.life', 'livingroomofsatoshi.com', 'mainnet.lnd.resdat', 'quantumgear.io', 'ln.heimburg.se', 'inabottle.io', 'BHB.network', 'ln.keff.org', 'BitBargain.co.uk', 'john.zweng.at', 'lightstorm.cc', 'mainnet.yalls.org', 'lightning.nicolas-dorier.com', 'SQUADSYSTEM.COM', 'ln.hkjn.me', 'POOLIN.COM', 'TokenSoft.io', 'hodl.me.2nyt', 'zbiornik.com', 'coinfinity.co', 'COINMINER.SPACE', 'PARK.IO', 'lnd.rows.io', 'ln.google.com', 'aspinall.io', 'ASTERIOS.TM', 'bitcoin.co', 'gnet.me', 'lightningramp.com', 'thunder.node', 'BIGHT.nl', 'refractionx.com']
old2 = ['inabottle.io', 'ZAP.COOL', 'BitBargain.co.uk', 'ln.mallorn.de', 'ln.taborsky.cz', 'Bight.nl', 'ln.vanovcan.net', 'resharesat.com', 'rompert.com', 'Billfodl.com', 'freedomnode.com', 'DatPay.Me', 'COINMINER.SPACE', 'bitmynt.no', 'POOLIN.COM', 'LightningPay.me', 'BitStarz.com', 'BitBargain.co.uk', 'skyrus.net', 'LEVENTGUNAY.COM', 'inazuma.cc', 'distributed.love', 'lightningbtc.shop', 'skyrus.net', 'lightstorm.cc', 'LivingRoomOfSatoshi.com', 'ln.mempool.co', 'matt.drollette.com', 'BTC.NETWORK', 'bitcoinsupermarkt.de', 'quantumgear.io', 'arihanc.com', 'TheCrypto.City', 'mainnet.yalls.org', 'cryptohead.de', 'BX.in.th', 'embedded.cash', 'lnstat.ideoflux.com', 'btcpay.cash', 'tondro.club', 'cryptopolitics.global', 'PARK.IO', 'mainnet.yalls.org', 'BubbleCoin.lol', 'lngate.tokyo', 'john.zweng.at', 'zbiornik.com', 'martijnbolt.com', 'cybergeld.info', 'TokenSoft.io', 'revealer.cc', 'DavinciCodes.net', 'www.bankofcrypto.info', 'tanjalo.com', 'mainnet.yalls.org', 'ASTERIOS.TM', 'BHB.network', 'FOURLEAF.life', 'zap.wizb.it', 'SQUADSYSTEM.COM', 'CoinMall.com', 'gnet.me', 'graph.lndexplorer.com', 'BitBargain.co.uk', 'QR.CR', 'bitfree.io', 'ln.hkjn.me', 'Waldo.fun', 'CryptoAdvisoryGroup.io', 'coinpanic.com', 'shop.sprovoost.nl']
old3 = old+old2+['ozupek.com.tr', 'startln.com', 'forkliner.com', 'bankless.io', 'BTC.COM', 'lncast.com', 'HodlMonkey.com']
old4 = old3 + ['masteringlightning.com', 'LivingRoomofSatoshi.com', 'mottods.com', 'lightningshop.eu', 'Grunch.fun', 'lightningnode.cz', 'Bitrefill.com', 'Byteball.be', 'moneyclub.network']
newSelection = []
for site in selection:
if site not in old4:
try:
response = requests.get('http://'+site,timeout=7)
if response.status_code == 200:
newSelection.append(site)
except:
pass
for site in newSelection:
print(site)
|
[
"noreply@github.com"
] |
lndhub.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.