hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
540d12bf750ecb2e910c29f067d4164a9bcaf207
| 30,598
|
py
|
Python
|
file.py
|
Tawfiq-MoonHacker/metis_video
|
8d63ac458b8b6bfa48a1ec5476dc47be1987f42a
|
[
"Apache-2.0"
] | null | null | null |
file.py
|
Tawfiq-MoonHacker/metis_video
|
8d63ac458b8b6bfa48a1ec5476dc47be1987f42a
|
[
"Apache-2.0"
] | null | null | null |
file.py
|
Tawfiq-MoonHacker/metis_video
|
8d63ac458b8b6bfa48a1ec5476dc47be1987f42a
|
[
"Apache-2.0"
] | null | null | null |
file_forgot_password = ["""<!DOCTYPE html>
<html>
<head>
<title></title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<style type="text/css">
@media screen {
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 400;
src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v11/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 700;
src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v11/qdgUG4U09HnJwhYI-uK18wLUuEpTyoUstqEm5AMlJo4.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 400;
src: local('Lato Italic'), local('Lato-Italic'), url(https://fonts.gstatic.com/s/lato/v11/RYyZNoeFgb0l7W3Vu1aSWOvvDin1pK8aKteLpeZ5c0A.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 700;
src: local('Lato Bold Italic'), local('Lato-BoldItalic'), url(https://fonts.gstatic.com/s/lato/v11/HkF_qI1x_noxlxhrhMQYELO3LdcAZYWl9Si6vvxL-qU.woff) format('woff');
}
}
/* CLIENT-SPECIFIC STYLES */
body,
table,
td,
a {
-webkit-text-size-adjust: 100%%;
-ms-text-size-adjust: 100%%;
}
table,
td {
mso-table-lspace: 0pt;
mso-table-rspace: 0pt;
}
img {
-ms-interpolation-mode: bicubic;
}
/* RESET STYLES */
img {
border: 0;
height: auto;
line-height: 100%%;
outline: none;
text-decoration: none;
}
table {
border-collapse: collapse !important;
}
body {
height: 100%% !important;
margin: 0 !important;
padding: 0 !important;
width: 100%% !important;
}
/* iOS BLUE LINKS */
a[x-apple-data-detectors] {
color: inherit !important;
text-decoration: none !important;
font-size: inherit !important;
font-family: inherit !important;
font-weight: inherit !important;
line-height: inherit !important;
}
/* MOBILE STYLES */
@media screen and (max-width:600px) {
h1 {
font-size: 32px !important;
line-height: 32px !important;
}
}
/* ANDROID CENTER FIX */
div[style*="margin: 16px 0;"] {
margin: 0 !important;
}
</style>
</head>
<body style="background-color: #f4f4f4; margin: 0 !important; padding: 0 !important;">
<!-- HIDDEN PREHEADER TEXT -->
<div style="display: none; font-size: 1px; color: #fefefe; line-height: 1px; font-family: 'Lato', Helvetica, Arial, sans-serif; max-height: 0px; max-width: 0px; opacity: 0; overflow: hidden;"> We're thrilled to have you here! Get ready to dive into your new account. </div>
<table border="0" cellpadding="0" cellspacing="0" width="100%%">
<!-- LOGO -->
<tr>
<td bgcolor="#FFA73B" align="center">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td align="center" valign="top" style="padding: 40px 10px 40px 10px;"> </td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#FFA73B" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="center" valign="top" style="padding: 40px 20px 20px 20px; border-radius: 4px 4px 0px 0px; color: #111111; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 48px; font-weight: 400; letter-spacing: 4px; line-height: 48px;">
<h1 style="font-size: 48px; font-weight: 400; margin: 2;">Welcome!</h1> <img src=" https://img.icons8.com/clouds/100/000000/handshake.png" width="125" height="120" style="display: block; border: 0px;" />
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 40px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">We're excited to have you get started. First, you need to confirm your account. Just press the button below.</p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left">
<table width="100%%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td bgcolor="#ffffff" align="center" style="padding: 20px 30px 60px 30px;">
<table border="0" cellspacing="0" cellpadding="0">
<tr>
<td align="center" style="border-radius: 3px;" bgcolor="#FFA73B"><a href="secret_key" target="_blank" style="font-size: 20px; font-family: Helvetica, Arial, sans-serif; color: #ffffff; text-decoration: none; color: #ffffff; text-decoration: none; padding: 15px 25px; border-radius: 2px; border: 1px solid #FFA73B; display: inline-block;">Reset Password</a></td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr> <!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 0px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">If that doesn't work, copy and paste the following link in your browser:</p>
</td>
</tr> <!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 20px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;"><a href="#" target="_blank" style="color: #FFA73B;">""","""</a></p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 20px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">If you have any questions, just reply to this email—we're always happy to help out.</p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 40px 30px; border-radius: 0px 0px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">Cheers,BBB Team</p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 30px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#FFECD1" align="center" style="padding: 30px 30px 30px 30px; border-radius: 4px 4px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<h2 style="font-size: 20px; font-weight: 400; color: #111111; margin: 0;">Need more help?</h2>
<p style="margin: 0;"><a href="#" target="_blank" style="color: #FFA73B;">We’re here to help you out</a></p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#f4f4f4" align="left" style="padding: 0px 30px 30px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 14px; font-weight: 400; line-height: 18px;">
<p style="margin: 0;">If these emails get annoying, please feel free to <a href="#" target="_blank" style="color: #111111; font-weight: 700;">unsubscribe</a>.</p>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>
"""]
file_verification_email = ["""<!DOCTYPE html>
<html>
<head>
<title></title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<style type="text/css">
@media screen {
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 400;
src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v11/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 700;
src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v11/qdgUG4U09HnJwhYI-uK18wLUuEpTyoUstqEm5AMlJo4.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 400;
src: local('Lato Italic'), local('Lato-Italic'), url(https://fonts.gstatic.com/s/lato/v11/RYyZNoeFgb0l7W3Vu1aSWOvvDin1pK8aKteLpeZ5c0A.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 700;
src: local('Lato Bold Italic'), local('Lato-BoldItalic'), url(https://fonts.gstatic.com/s/lato/v11/HkF_qI1x_noxlxhrhMQYELO3LdcAZYWl9Si6vvxL-qU.woff) format('woff');
}
}
/* CLIENT-SPECIFIC STYLES */
body,
table,
td,
a {
-webkit-text-size-adjust: 100%%;
-ms-text-size-adjust: 100%%;
}
table,
td {
mso-table-lspace: 0pt;
mso-table-rspace: 0pt;
}
img {
-ms-interpolation-mode: bicubic;
}
/* RESET STYLES */
img {
border: 0;
height: auto;
line-height: 100%%;
outline: none;
text-decoration: none;
}
table {
border-collapse: collapse !important;
}
body {
height: 100%% !important;
margin: 0 !important;
padding: 0 !important;
width: 100%% !important;
}
/* iOS BLUE LINKS */
a[x-apple-data-detectors] {
color: inherit !important;
text-decoration: none !important;
font-size: inherit !important;
font-family: inherit !important;
font-weight: inherit !important;
line-height: inherit !important;
}
/* MOBILE STYLES */
@media screen and (max-width:600px) {
h1 {
font-size: 32px !important;
line-height: 32px !important;
}
}
/* ANDROID CENTER FIX */
div[style*="margin: 16px 0;"] {
margin: 0 !important;
}
</style>
</head>
<body style="background-color: #f4f4f4; margin: 0 !important; padding: 0 !important;">
<!-- HIDDEN PREHEADER TEXT -->
<div style="display: none; font-size: 1px; color: #fefefe; line-height: 1px; font-family: 'Lato', Helvetica, Arial, sans-serif; max-height: 0px; max-width: 0px; opacity: 0; overflow: hidden;"> We're thrilled to have you here! Get ready to dive into your new account. </div>
<table border="0" cellpadding="0" cellspacing="0" width="100%%">
<!-- LOGO -->
<tr>
<td bgcolor="#FFA73B" align="center">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td align="center" valign="top" style="padding: 40px 10px 40px 10px;"> </td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#FFA73B" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="center" valign="top" style="padding: 40px 20px 20px 20px; border-radius: 4px 4px 0px 0px; color: #111111; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 48px; font-weight: 400; letter-spacing: 4px; line-height: 48px;">
<h1 style="font-size: 48px; font-weight: 400; margin: 2;">Welcome!</h1> <img src=" https://img.icons8.com/clouds/100/000000/handshake.png" width="125" height="120" style="display: block; border: 0px;" />
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 40px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">We're excited to have you get started. First, you need to confirm your account. Just press the button below.</p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left">
<table width="100%%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td bgcolor="#ffffff" align="center" style="padding: 20px 30px 60px 30px;">
<table border="0" cellspacing="0" cellpadding="0">
<tr>
<td align="center" style="border-radius: 3px;" bgcolor="#FFA73B"><a href="secret_key" target="_blank" style="font-size: 20px; font-family: Helvetica, Arial, sans-serif; color: #ffffff; text-decoration: none; color: #ffffff; text-decoration: none; padding: 15px 25px; border-radius: 2px; border: 1px solid #FFA73B; display: inline-block;">Confirm Email</a></td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr> <!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 0px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">If that doesn't work, copy and paste the following link in your browser:</p>
</td>
</tr> <!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 20px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;"><a href="#" target="_blank" style="color: #FFA73B;">""", """</a></p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 20px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">If you have any questions, just reply to this email—we're always happy to help out.</p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 40px 30px; border-radius: 0px 0px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">Cheers,BBB Team</p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 30px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#FFECD1" align="center" style="padding: 30px 30px 30px 30px; border-radius: 4px 4px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<h2 style="font-size: 20px; font-weight: 400; color: #111111; margin: 0;">Need more help?</h2>
<p style="margin: 0;"><a href="#" target="_blank" style="color: #FFA73B;">We’re here to help you out</a></p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#f4f4f4" align="left" style="padding: 0px 30px 30px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 14px; font-weight: 400; line-height: 18px;">
<p style="margin: 0;">If these emails get annoying, please feel free to <a href="#" target="_blank" style="color: #111111; font-weight: 700;">unsubscribe</a>.</p>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>
"""]
file_username = ["""<!DOCTYPE html>
<html>
<head>
<title></title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<style type="text/css">
@media screen {
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 400;
src: local('Lato Regular'), local('Lato-Regular'), url(https://fonts.gstatic.com/s/lato/v11/qIIYRU-oROkIk8vfvxw6QvesZW2xOQ-xsNqO47m55DA.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: normal;
font-weight: 700;
src: local('Lato Bold'), local('Lato-Bold'), url(https://fonts.gstatic.com/s/lato/v11/qdgUG4U09HnJwhYI-uK18wLUuEpTyoUstqEm5AMlJo4.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 400;
src: local('Lato Italic'), local('Lato-Italic'), url(https://fonts.gstatic.com/s/lato/v11/RYyZNoeFgb0l7W3Vu1aSWOvvDin1pK8aKteLpeZ5c0A.woff) format('woff');
}
@font-face {
font-family: 'Lato';
font-style: italic;
font-weight: 700;
src: local('Lato Bold Italic'), local('Lato-BoldItalic'), url(https://fonts.gstatic.com/s/lato/v11/HkF_qI1x_noxlxhrhMQYELO3LdcAZYWl9Si6vvxL-qU.woff) format('woff');
}
}
/* CLIENT-SPECIFIC STYLES */
body,
table,
td,
a {
-webkit-text-size-adjust: 100%%;
-ms-text-size-adjust: 100%%;
}
table,
td {
mso-table-lspace: 0pt;
mso-table-rspace: 0pt;
}
img {
-ms-interpolation-mode: bicubic;
}
/* RESET STYLES */
img {
border: 0;
height: auto;
line-height: 100%%;
outline: none;
text-decoration: none;
}
table {
border-collapse: collapse !important;
}
body {
height: 100%% !important;
margin: 0 !important;
padding: 0 !important;
width: 100%% !important;
}
/* iOS BLUE LINKS */
a[x-apple-data-detectors] {
color: inherit !important;
text-decoration: none !important;
font-size: inherit !important;
font-family: inherit !important;
font-weight: inherit !important;
line-height: inherit !important;
}
/* MOBILE STYLES */
@media screen and (max-width:600px) {
h1 {
font-size: 32px !important;
line-height: 32px !important;
}
}
/* ANDROID CENTER FIX */
div[style*="margin: 16px 0;"] {
margin: 0 !important;
}
</style>
</head>
<body style="background-color: #f4f4f4; margin: 0 !important; padding: 0 !important;">
<!-- HIDDEN PREHEADER TEXT -->
<div style="display: none; font-size: 1px; color: #fefefe; line-height: 1px; font-family: 'Lato', Helvetica, Arial, sans-serif; max-height: 0px; max-width: 0px; opacity: 0; overflow: hidden;"> We're thrilled to have you here! Get ready to dive into your new account. </div>
<table border="0" cellpadding="0" cellspacing="0" width="100%%">
<!-- LOGO -->
<tr>
<td bgcolor="#FFA73B" align="center">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td align="center" valign="top" style="padding: 40px 10px 40px 10px;"> </td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#FFA73B" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="center" valign="top" style="padding: 40px 20px 20px 20px; border-radius: 4px 4px 0px 0px; color: #111111; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 48px; font-weight: 400; letter-spacing: 4px; line-height: 48px;">
<h1 style="font-size: 48px; font-weight: 400; margin: 2;">Welcome!</h1> <img src=" https://img.icons8.com/clouds/100/000000/handshake.png" width="125" height="120" style="display: block; border: 0px;" />
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 40px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">We're excited to have you here. here is your username: secret_key</p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left">
<table width="100%%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td bgcolor="#ffffff" align="center" style="padding: 20px 30px 60px 30px;">
<table border="0" cellspacing="0" cellpadding="0">
</table>
</td>
</tr>
</table>
</td>
</tr> <!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 0px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;"></p>
</td>
</tr> <!-- COPY -->
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 20px 30px 20px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;"><a href="#" target="_blank" style="color: #FFA73B;">""", """</a></p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 20px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">If you have any questions, just reply to this email—we're always happy to help out.</p>
</td>
</tr>
<tr>
<td bgcolor="#ffffff" align="left" style="padding: 0px 30px 40px 30px; border-radius: 0px 0px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<p style="margin: 0;">Cheers,BBB Team</p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 30px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#FFECD1" align="center" style="padding: 30px 30px 30px 30px; border-radius: 4px 4px 4px 4px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 18px; font-weight: 400; line-height: 25px;">
<h2 style="font-size: 20px; font-weight: 400; color: #111111; margin: 0;">Need more help?</h2>
<p style="margin: 0;"><a href="#" target="_blank" style="color: #FFA73B;">We’re here to help you out</a></p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td bgcolor="#f4f4f4" align="center" style="padding: 0px 10px 0px 10px;">
<table border="0" cellpadding="0" cellspacing="0" width="100%%" style="max-width: 600px;">
<tr>
<td bgcolor="#f4f4f4" align="left" style="padding: 0px 30px 30px 30px; color: #666666; font-family: 'Lato', Helvetica, Arial, sans-serif; font-size: 14px; font-weight: 400; line-height: 18px;">
<p style="margin: 0;">If these emails get annoying, please feel free to <a href="#" target="_blank" style="color: #111111; font-weight: 700;">unsubscribe</a>.</p>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>
"""]
def call(file,link):
return file.replace('secret_key',link)
def forgot_password1(link):
return call(file_forgot_password[0],link)
def verification_email(link):
return call(file_verification_email[0],link)
def return_username(string1):
return call(file_username[0],string1)
| 48.878594
| 410
| 0.483365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30,185
| 0.986309
|
540dd2561e2691981ca01c923a08b27ff3e24e2d
| 2,106
|
py
|
Python
|
builder/schedule.py
|
MyConbook/datatool
|
1c12bb5124b48ae827c4832896fd81bf711ad44e
|
[
"Apache-2.0"
] | null | null | null |
builder/schedule.py
|
MyConbook/datatool
|
1c12bb5124b48ae827c4832896fd81bf711ad44e
|
[
"Apache-2.0"
] | null | null | null |
builder/schedule.py
|
MyConbook/datatool
|
1c12bb5124b48ae827c4832896fd81bf711ad44e
|
[
"Apache-2.0"
] | null | null | null |
import icalendar
import urllib2
import hashlib
import string
import re
import datetime
from database import Database, TableHandler
class Schedule(TableHandler):
columns = [("Title", Database.C_TCN), "Description", ("Category", Database.C_TCN), ("Location", Database.C_TCN), ("StartDate", Database.C_D), ("EndDate", Database.C_D)]
def __init__(self, options):
TableHandler.__init__(self, "schedule", self.columns)
self.options = options
def download(self):
# Download schedule
mysock = urllib2.urlopen(self.options.calendar_url);
self.file_content = mysock.read();
mysock.close();
def calculate_hash(self, version):
# Calculate MD5 sum
m = hashlib.md5();
hash_text = re.sub("DTSTAMP:.+\\r\\n", "", self.file_content)
m.update(hash_text);
md5 = m.hexdigest();
return version.set_calendar_checksum(md5)
def parse(self, db, output_json):
if not self.file_content:
raise ValueError("file_content is empty")
self.create_table(db)
real_tz = self.options.timezone
json_out = []
# Parse calendar
cal = icalendar.Calendar.from_ical(self.file_content)
for component in cal.walk("VEVENT"):
title = component["summary"]
try:
desc = component["description"]
except KeyError:
desc = None
category = component.decoded("categories", None)
loc = component.decoded("location", "(None)")
origstart = component["dtstart"].dt
startdate = origstart
if not isinstance(startdate, datetime.datetime):
# Item is all-day
continue
if not startdate.tzinfo:
startdate = real_tz.localize(startdate)
startdate = real_tz.normalize(startdate.astimezone(real_tz)).isoformat()
if not "dtend" in component:
# Item has start time but not end time
enddate = origstart
else:
enddate = component["dtend"].dt
if not enddate.tzinfo:
enddate = real_tz.localize(enddate)
enddate = real_tz.normalize(enddate.astimezone(real_tz)).isoformat()
values = [title, desc, category, loc, startdate, enddate]
args = self.insert_row(db, values)
json_out.append(args)
output_json.update({self.table_name: json_out})
| 30.085714
| 169
| 0.7132
| 1,973
| 0.936847
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.144824
|
540df69177e6fb46fc901283c03665c416fc9242
| 24,887
|
py
|
Python
|
src/OFS/CopySupport.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | 1
|
2018-12-07T21:19:58.000Z
|
2018-12-07T21:19:58.000Z
|
src/OFS/CopySupport.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
src/OFS/CopySupport.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Copy interface
"""
from cgi import escape
from marshal import dumps
from marshal import loads
import re
import sys
import tempfile
from urllib import quote
from urllib import unquote
import warnings
from zlib import compress
from zlib import decompress
import transaction
from AccessControl import ClassSecurityInfo
from AccessControl import getSecurityManager
from AccessControl.class_init import InitializeClass
from AccessControl.Permissions import view_management_screens
from AccessControl.Permissions import copy_or_move
from AccessControl.Permissions import delete_objects
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from App.Dialogs import MessageDialog
from App.special_dtml import HTML
from App.special_dtml import DTMLFile
from ExtensionClass import Base
from webdav.Lockable import ResourceLockedError
from zExceptions import Unauthorized, BadRequest
from ZODB.POSException import ConflictError
from zope.interface import implements
from zope.event import notify
from zope.lifecycleevent import ObjectCopiedEvent
from zope.lifecycleevent import ObjectMovedEvent
from zope.container.contained import notifyContainerModified
from OFS.event import ObjectWillBeMovedEvent
from OFS.event import ObjectClonedEvent
from OFS.interfaces import ICopyContainer
from OFS.interfaces import ICopySource
from OFS.Moniker import loadMoniker
from OFS.Moniker import Moniker
from OFS.subscribers import compatibilityCall
class CopyError(Exception):
pass
copy_re = re.compile('^copy([0-9]*)_of_(.*)')
_marker=[]
class CopyContainer(Base):
"""Interface for containerish objects which allow cut/copy/paste"""
implements(ICopyContainer)
security = ClassSecurityInfo()
# The following three methods should be overridden to store sub-objects
# as non-attributes.
def _setOb(self, id, object):
setattr(self, id, object)
def _delOb(self, id):
delattr(self, id)
def _getOb(self, id, default=_marker):
if hasattr(aq_base(self), id):
return getattr(self, id)
if default is _marker:
raise AttributeError(id)
return default
def manage_CopyContainerFirstItem(self, REQUEST):
return self._getOb(REQUEST['ids'][0])
def manage_CopyContainerAllItems(self, REQUEST):
return [self._getOb(i) for i in REQUEST['ids']]
security.declareProtected(delete_objects, 'manage_cutObjects')
def manage_cutObjects(self, ids=None, REQUEST=None):
"""Put a reference to the objects named in ids in the clip board"""
if ids is None and REQUEST is not None:
return eNoItemsSpecified
elif ids is None:
raise ValueError('ids must be specified')
if type(ids) is type(''):
ids=[ids]
oblist=[]
for id in ids:
ob=self._getOb(id)
if ob.wl_isLocked():
raise ResourceLockedError('Object "%s" is locked via WebDAV'
% ob.getId())
if not ob.cb_isMoveable():
raise CopyError(eNotSupported % escape(id))
m = Moniker(ob)
oblist.append(m.dump())
cp=(1, oblist)
cp=_cb_encode(cp)
if REQUEST is not None:
resp=REQUEST['RESPONSE']
resp.setCookie('__cp', cp, path='%s' % cookie_path(REQUEST))
REQUEST['__cp'] = cp
return self.manage_main(self, REQUEST)
return cp
security.declareProtected(view_management_screens, 'manage_copyObjects')
def manage_copyObjects(self, ids=None, REQUEST=None, RESPONSE=None):
"""Put a reference to the objects named in ids in the clip board"""
if ids is None and REQUEST is not None:
return eNoItemsSpecified
elif ids is None:
raise ValueError('ids must be specified')
if type(ids) is type(''):
ids=[ids]
oblist=[]
for id in ids:
ob=self._getOb(id)
if not ob.cb_isCopyable():
raise CopyError(eNotSupported % escape(id))
m = Moniker(ob)
oblist.append(m.dump())
cp=(0, oblist)
cp=_cb_encode(cp)
if REQUEST is not None:
resp=REQUEST['RESPONSE']
resp.setCookie('__cp', cp, path='%s' % cookie_path(REQUEST))
REQUEST['__cp'] = cp
return self.manage_main(self, REQUEST)
return cp
def _get_id(self, id):
# Allow containers to override the generation of
# object copy id by attempting to call its _get_id
# method, if it exists.
match = copy_re.match(id)
if match:
n = int(match.group(1) or '1')
orig_id = match.group(2)
else:
n = 0
orig_id = id
while 1:
if self._getOb(id, None) is None:
return id
id='copy%s_of_%s' % (n and n+1 or '', orig_id)
n=n+1
security.declareProtected(view_management_screens, 'manage_pasteObjects')
def manage_pasteObjects(self, cb_copy_data=None, REQUEST=None):
"""Paste previously copied objects into the current object.
If calling manage_pasteObjects from python code, pass the result of a
previous call to manage_cutObjects or manage_copyObjects as the first
argument.
Also sends IObjectCopiedEvent and IObjectClonedEvent
or IObjectWillBeMovedEvent and IObjectMovedEvent.
"""
if cb_copy_data is not None:
cp = cb_copy_data
elif REQUEST is not None and REQUEST.has_key('__cp'):
cp = REQUEST['__cp']
else:
cp = None
if cp is None:
raise CopyError(eNoData)
try:
op, mdatas = _cb_decode(cp)
except:
raise CopyError(eInvalid)
oblist = []
app = self.getPhysicalRoot()
for mdata in mdatas:
m = loadMoniker(mdata)
try:
ob = m.bind(app)
except ConflictError:
raise
except:
raise CopyError(eNotFound)
self._verifyObjectPaste(ob, validate_src=op+1)
oblist.append(ob)
result = []
if op == 0:
# Copy operation
for ob in oblist:
orig_id = ob.getId()
if not ob.cb_isCopyable():
raise CopyError(eNotSupported % escape(orig_id))
try:
ob._notifyOfCopyTo(self, op=0)
except ConflictError:
raise
except:
raise CopyError(MessageDialog(
title="Copy Error",
message=sys.exc_info()[1],
action='manage_main'))
id = self._get_id(orig_id)
result.append({'id': orig_id, 'new_id': id})
orig_ob = ob
ob = ob._getCopy(self)
ob._setId(id)
notify(ObjectCopiedEvent(ob, orig_ob))
self._setObject(id, ob)
ob = self._getOb(id)
ob.wl_clearLocks()
ob._postCopy(self, op=0)
compatibilityCall('manage_afterClone', ob, ob)
notify(ObjectClonedEvent(ob))
if REQUEST is not None:
return self.manage_main(self, REQUEST, update_menu=1,
cb_dataValid=1)
elif op == 1:
# Move operation
for ob in oblist:
orig_id = ob.getId()
if not ob.cb_isMoveable():
raise CopyError(eNotSupported % escape(orig_id))
try:
ob._notifyOfCopyTo(self, op=1)
except ConflictError:
raise
except:
raise CopyError(MessageDialog(
title="Move Error",
message=sys.exc_info()[1],
action='manage_main'))
if not sanity_check(self, ob):
raise CopyError(
"This object cannot be pasted into itself")
orig_container = aq_parent(aq_inner(ob))
if aq_base(orig_container) is aq_base(self):
id = orig_id
else:
id = self._get_id(orig_id)
result.append({'id': orig_id, 'new_id': id})
notify(ObjectWillBeMovedEvent(ob, orig_container, orig_id,
self, id))
# try to make ownership explicit so that it gets carried
# along to the new location if needed.
ob.manage_changeOwnershipType(explicit=1)
try:
orig_container._delObject(orig_id, suppress_events=True)
except TypeError:
orig_container._delObject(orig_id)
warnings.warn(
"%s._delObject without suppress_events is discouraged."
% orig_container.__class__.__name__,
DeprecationWarning)
ob = aq_base(ob)
ob._setId(id)
try:
self._setObject(id, ob, set_owner=0, suppress_events=True)
except TypeError:
self._setObject(id, ob, set_owner=0)
warnings.warn(
"%s._setObject without suppress_events is discouraged."
% self.__class__.__name__, DeprecationWarning)
ob = self._getOb(id)
notify(ObjectMovedEvent(ob, orig_container, orig_id, self, id))
notifyContainerModified(orig_container)
if aq_base(orig_container) is not aq_base(self):
notifyContainerModified(self)
ob._postCopy(self, op=1)
# try to make ownership implicit if possible
ob.manage_changeOwnershipType(explicit=0)
if REQUEST is not None:
REQUEST['RESPONSE'].setCookie('__cp', 'deleted',
path='%s' % cookie_path(REQUEST),
expires='Wed, 31-Dec-97 23:59:59 GMT')
REQUEST['__cp'] = None
return self.manage_main(self, REQUEST, update_menu=1,
cb_dataValid=0)
return result
security.declareProtected(view_management_screens, 'manage_renameForm')
manage_renameForm = DTMLFile('dtml/renameForm', globals())
security.declareProtected(view_management_screens, 'manage_renameObjects')
def manage_renameObjects(self, ids=[], new_ids=[], REQUEST=None):
"""Rename several sub-objects"""
if len(ids) != len(new_ids):
raise BadRequest('Please rename each listed object.')
for i in range(len(ids)):
if ids[i] != new_ids[i]:
self.manage_renameObject(ids[i], new_ids[i], REQUEST)
if REQUEST is not None:
return self.manage_main(self, REQUEST, update_menu=1)
return None
security.declareProtected(view_management_screens, 'manage_renameObject')
def manage_renameObject(self, id, new_id, REQUEST=None):
"""Rename a particular sub-object.
"""
try:
self._checkId(new_id)
except:
raise CopyError(MessageDialog(
title='Invalid Id',
message=sys.exc_info()[1],
action ='manage_main'))
ob = self._getOb(id)
if ob.wl_isLocked():
raise ResourceLockedError('Object "%s" is locked via WebDAV'
% ob.getId())
if not ob.cb_isMoveable():
raise CopyError(eNotSupported % escape(id))
self._verifyObjectPaste(ob)
try:
ob._notifyOfCopyTo(self, op=1)
except ConflictError:
raise
except:
raise CopyError(MessageDialog(
title="Rename Error",
message=sys.exc_info()[1],
action ='manage_main'))
notify(ObjectWillBeMovedEvent(ob, self, id, self, new_id))
try:
self._delObject(id, suppress_events=True)
except TypeError:
self._delObject(id)
warnings.warn(
"%s._delObject without suppress_events is discouraged." %
self.__class__.__name__, DeprecationWarning)
ob = aq_base(ob)
ob._setId(new_id)
# Note - because a rename always keeps the same context, we
# can just leave the ownership info unchanged.
try:
self._setObject(new_id, ob, set_owner=0, suppress_events=True)
except TypeError:
self._setObject(new_id, ob, set_owner=0)
warnings.warn(
"%s._setObject without suppress_events is discouraged." %
self.__class__.__name__, DeprecationWarning)
ob = self._getOb(new_id)
notify(ObjectMovedEvent(ob, self, id, self, new_id))
notifyContainerModified(self)
ob._postCopy(self, op=1)
if REQUEST is not None:
return self.manage_main(self, REQUEST, update_menu=1)
return None
# Why did we give this a manage_ prefix if its really
# supposed to be public since it does its own auth ?
#
# Because it's still a "management" function.
security.declarePublic('manage_clone')
def manage_clone(self, ob, id, REQUEST=None):
"""Clone an object, creating a new object with the given id.
"""
if not ob.cb_isCopyable():
raise CopyError(eNotSupported % escape(ob.getId()))
try:
self._checkId(id)
except:
raise CopyError(MessageDialog(
title='Invalid Id',
message=sys.exc_info()[1],
action ='manage_main'))
self._verifyObjectPaste(ob)
try:
ob._notifyOfCopyTo(self, op=0)
except ConflictError:
raise
except:
raise CopyError(MessageDialog(
title="Clone Error",
message=sys.exc_info()[1],
action='manage_main'))
orig_ob = ob
ob = ob._getCopy(self)
ob._setId(id)
notify(ObjectCopiedEvent(ob, orig_ob))
self._setObject(id, ob)
ob = self._getOb(id)
ob._postCopy(self, op=0)
compatibilityCall('manage_afterClone', ob, ob)
notify(ObjectClonedEvent(ob))
return ob
def cb_dataValid(self):
# Return true if clipboard data seems valid.
try: cp=_cb_decode(self.REQUEST['__cp'])
except: return 0
return 1
def cb_dataItems(self):
# List of objects in the clip board
try: cp=_cb_decode(self.REQUEST['__cp'])
except: return []
oblist=[]
app = self.getPhysicalRoot()
for mdata in cp[1]:
m = loadMoniker(mdata)
oblist.append(m.bind(app))
return oblist
validClipData=cb_dataValid
def _verifyObjectPaste(self, object, validate_src=1):
# Verify whether the current user is allowed to paste the
# passed object into self. This is determined by checking
# to see if the user could create a new object of the same
# meta_type of the object passed in and checking that the
# user actually is allowed to access the passed in object
# in its existing context.
#
# Passing a false value for the validate_src argument will skip
# checking the passed in object in its existing context. This is
# mainly useful for situations where the passed in object has no
# existing context, such as checking an object during an import
# (the object will not yet have been connected to the acquisition
# heirarchy).
if not hasattr(object, 'meta_type'):
raise CopyError(MessageDialog(
title = 'Not Supported',
message = ('The object <em>%s</em> does not support this' \
' operation' % escape(absattr(object.id))),
action = 'manage_main'))
if not hasattr(self, 'all_meta_types'):
raise CopyError(MessageDialog(
title = 'Not Supported',
message = 'Cannot paste into this object.',
action = 'manage_main'))
method_name = None
mt_permission = None
meta_types = absattr(self.all_meta_types)
for d in meta_types:
if d['name'] == object.meta_type:
method_name = d['action']
mt_permission = d.get('permission')
break
if mt_permission is not None:
sm = getSecurityManager()
if sm.checkPermission(mt_permission, self):
if validate_src:
# Ensure the user is allowed to access the object on the
# clipboard.
try:
parent = aq_parent(aq_inner(object))
except:
parent = None
if not sm.validate(None, parent, None, object):
raise Unauthorized(absattr(object.id))
if validate_src == 2: # moving
if not sm.checkPermission(delete_objects, parent):
raise Unauthorized('Delete not allowed.')
else:
raise CopyError(MessageDialog(
title = 'Insufficient Privileges',
message = ('You do not possess the %s permission in the '
'context of the container into which you are '
'pasting, thus you are not able to perform '
'this operation.' % mt_permission),
action = 'manage_main'))
else:
raise CopyError(MessageDialog(
title = 'Not Supported',
message = ('The object <em>%s</em> does not support this '
'operation.' % escape(absattr(object.id))),
action = 'manage_main'))
InitializeClass(CopyContainer)
class CopySource(Base):
"""Interface for objects which allow themselves to be copied."""
implements(ICopySource)
# declare a dummy permission for Copy or Move here that we check
# in cb_isCopyable.
security = ClassSecurityInfo()
security.setPermissionDefault(copy_or_move, ('Anonymous', 'Manager'))
def _canCopy(self, op=0):
"""Called to make sure this object is copyable.
The op var is 0 for a copy, 1 for a move.
"""
return 1
def _notifyOfCopyTo(self, container, op=0):
"""Overide this to be pickly about where you go!
If you dont want to go there, raise an exception. The op variable is 0
for a copy, 1 for a move.
"""
pass
def _getCopy(self, container):
# Commit a subtransaction to:
# 1) Make sure the data about to be exported is current
# 2) Ensure self._p_jar and container._p_jar are set even if
# either one is a new object
transaction.savepoint(optimistic=True)
if self._p_jar is None:
raise CopyError(
'Object "%s" needs to be in the database to be copied' %
`self`)
if container._p_jar is None:
raise CopyError(
'Container "%s" needs to be in the database' %
`container`)
# Ask an object for a new copy of itself.
f=tempfile.TemporaryFile()
self._p_jar.exportFile(self._p_oid,f)
f.seek(0)
ob=container._p_jar.importFile(f)
f.close()
return ob
def _postCopy(self, container, op=0):
# Called after the copy is finished to accomodate special cases.
# The op var is 0 for a copy, 1 for a move.
pass
def _setId(self, id):
# Called to set the new id of a copied object.
self.id=id
def cb_isCopyable(self):
# Is object copyable? Returns 0 or 1
if not (hasattr(self, '_canCopy') and self._canCopy(0)):
return 0
if not self.cb_userHasCopyOrMovePermission():
return 0
return 1
def cb_isMoveable(self):
# Is object moveable? Returns 0 or 1
if not (hasattr(self, '_canCopy') and self._canCopy(1)):
return 0
if hasattr(self, '_p_jar') and self._p_jar is None:
return 0
try: n=aq_parent(aq_inner(self))._reserved_names
except: n=()
if absattr(self.id) in n:
return 0
if not self.cb_userHasCopyOrMovePermission():
return 0
return 1
def cb_userHasCopyOrMovePermission(self):
if getSecurityManager().checkPermission(copy_or_move, self):
return 1
InitializeClass(CopySource)
def sanity_check(c, ob):
# This is called on cut/paste operations to make sure that
# an object is not cut and pasted into itself or one of its
# subobjects, which is an undefined situation.
ob = aq_base(ob)
while 1:
if aq_base(c) is ob:
return 0
inner = aq_inner(c)
if aq_parent(inner) is None:
return 1
c = aq_parent(inner)
def absattr(attr):
if callable(attr): return attr()
return attr
def _cb_encode(d):
return quote(compress(dumps(d), 9))
def _cb_decode(s):
return loads(decompress(unquote(s)))
def cookie_path(request):
# Return a "path" value for use in a cookie that refers
# to the root of the Zope object space.
return request['BASEPATH1'] or "/"
fMessageDialog = HTML("""
<HTML>
<HEAD>
<TITLE>&dtml-title;</TITLE>
</HEAD>
<BODY BGCOLOR="#FFFFFF">
<FORM ACTION="&dtml-action;" METHOD="GET" <dtml-if
target>TARGET="&dtml-target;"</dtml-if>>
<TABLE BORDER="0" WIDTH="100%%" CELLPADDING="10">
<TR>
<TD VALIGN="TOP">
<BR>
<CENTER><B><FONT SIZE="+6" COLOR="#77003B">!</FONT></B></CENTER>
</TD>
<TD VALIGN="TOP">
<BR><BR>
<CENTER>
<dtml-var message>
</CENTER>
</TD>
</TR>
<TR>
<TD VALIGN="TOP">
</TD>
<TD VALIGN="TOP">
<CENTER>
<INPUT TYPE="SUBMIT" VALUE=" Ok ">
</CENTER>
</TD>
</TR>
</TABLE>
</FORM>
</BODY></HTML>""", target='', action='manage_main', title='Changed')
eNoData=MessageDialog(
title='No Data',
message='No clipboard data found.',
action ='manage_main',)
eInvalid=MessageDialog(
title='Clipboard Error',
message='The data in the clipboard could not be read, possibly due ' \
'to cookie data being truncated by your web browser. Try copying ' \
'fewer objects.',
action ='manage_main',)
eNotFound=MessageDialog(
title='Item Not Found',
message='One or more items referred to in the clipboard data was ' \
'not found. The item may have been moved or deleted after you ' \
'copied it.',
action ='manage_main',)
eNotSupported=fMessageDialog(
title='Not Supported',
message=(
'The action against the <em>%s</em> object could not be carried '
'out. '
'One of the following constraints caused the problem: <br><br>'
'The object does not support this operation.'
'<br><br>-- OR --<br><br>'
'The currently logged-in user does not have the <b>Copy or '
'Move</b> permission respective to the object.'
),
action ='manage_main',)
eNoItemsSpecified=MessageDialog(
title='No items specified',
message='You must select one or more items to perform ' \
'this operation.',
action ='manage_main'
)
| 33.952251
| 79
| 0.571704
| 19,747
| 0.793466
| 0
| 0
| 0
| 0
| 0
| 0
| 6,983
| 0.280588
|
540f2aeaaaa0eee405144a0e73dbe2a9199426b2
| 1,697
|
py
|
Python
|
Search in a Binary Search Tree.py
|
frank0215/Leetcode_python
|
9428ded4f9abd347b12bfef8aa1dd2d177f3afea
|
[
"MIT"
] | null | null | null |
Search in a Binary Search Tree.py
|
frank0215/Leetcode_python
|
9428ded4f9abd347b12bfef8aa1dd2d177f3afea
|
[
"MIT"
] | null | null | null |
Search in a Binary Search Tree.py
|
frank0215/Leetcode_python
|
9428ded4f9abd347b12bfef8aa1dd2d177f3afea
|
[
"MIT"
] | null | null | null |
class TreeNode:
def __init__(self, val, left=None, right=None):
self.val = val
self.right = right
self.left = left
class Solution:
def searchBST(self, root, val):
# if not root:
# return None
# if root.val == val:
# return root
# l = self.searchBST(root.left, val)
# if l:
# return l
# r = self.searchBST(root.right, val)
# if r:
# return r
# return None
# 二元搜尋
# 尾遞迴 可寫成迴圈
# if not root:
# return None
# if root.val == val:
# return root
# if root.val < val:
# return self.searchBST(root.right, val)
# return self.searchBST(root.left, val)
# while True:
# if not root:
# return None
# if root.val == val:
# return root
# if root.val < val:
# root = root.right
# else:
# root = root.left
while root:
if root.val == val:
return root
if root.val < val:
root = root.right
else:
root = root.left
return None
if __name__ == '__main__':
#root = [4,2,7,1,3]
# root = TreeNode(4)
# root.left = TreeNode(2)
# root.left.left = TreeNode(1)
# root.right = TreeNode(7)
# root.left.right = TreeNode(3)
# val = 2
root = TreeNode(4, TreeNode(2, TreeNode(1), TreeNode(3)), TreeNode(7))
val = 2
print(Solution().searchBST(root, val).val) # output [2,1,3]
| 23.569444
| 74
| 0.446081
| 1,328
| 0.771644
| 0
| 0
| 0
| 0
| 0
| 0
| 782
| 0.454387
|
541271297050a8dd2d540558c72d94871ade9a15
| 1,019
|
py
|
Python
|
src/data/process_HJ_data.py
|
simran-grewal/COVID-19-Data-Analysis
|
8751aa75c451e956d5b1d1e2d6f2ffbec8dc673a
|
[
"FTL"
] | null | null | null |
src/data/process_HJ_data.py
|
simran-grewal/COVID-19-Data-Analysis
|
8751aa75c451e956d5b1d1e2d6f2ffbec8dc673a
|
[
"FTL"
] | null | null | null |
src/data/process_HJ_data.py
|
simran-grewal/COVID-19-Data-Analysis
|
8751aa75c451e956d5b1d1e2d6f2ffbec8dc673a
|
[
"FTL"
] | null | null | null |
import subprocess
import os
import numpy as np
import pandas as pd
from datetime import datetime
def store_relational_JH_data():
pd_raw = pd.read_csv('../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
pd_data_base = pd_raw.rename(columns={'Country/Region': 'country', 'Province/State': 'state'})
pd_data_base['state'] = pd_data_base['state'].fillna('no')
pd_data_base.drop(['Lat', 'Long'], axis=1, inplace=True)
pd_relational_model = pd_data_base.set_index(['state', 'country']) \
.T.stack(level=[0, 1]) \
.reset_index() \
.rename(columns = {'level_0': 'date', 0: 'confirmed'},)
pd_relational_model['date'] = pd_relational_model.date.astype('datetime64[ns]')
pd_relational_model.to_csv('../data/processed/Covid_relational_confirmed.csv', sep = ';', index=False)
print('Number of rows stored:'+ str(pd_relational_model.shape[0]))
if __name__ == '__main__':
store_relational_JH_data()
| 42.458333
| 134
| 0.703631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 336
| 0.329735
|
5414770e7a98cb88e29d0f46a7ec2949accc8e44
| 1,023
|
py
|
Python
|
wasm-server.py
|
erique/pt2-clone
|
f3376a5a40316f15e82feaa321673a8611c31a53
|
[
"BSD-3-Clause"
] | null | null | null |
wasm-server.py
|
erique/pt2-clone
|
f3376a5a40316f15e82feaa321673a8611c31a53
|
[
"BSD-3-Clause"
] | null | null | null |
wasm-server.py
|
erique/pt2-clone
|
f3376a5a40316f15e82feaa321673a8611c31a53
|
[
"BSD-3-Clause"
] | null | null | null |
# Python 3
import sys
import socketserver
from http.server import SimpleHTTPRequestHandler
class WasmHandler(SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory="release/emscripten", **kwargs)
def do_GET(self):
if self.path == '/':
self.path = '/pt2-clone.html'
return super().do_GET()
def end_headers(self):
self.send_header("Cross-Origin-Opener-Policy", "same-origin")
self.send_header("Cross-Origin-Embedder-Policy", "require-corp")
SimpleHTTPRequestHandler.end_headers(self)
# Python 3.7.5 adds in the WebAssembly Media Type. If this is an older
# version, add in the Media Type.
if sys.version_info < (3, 7, 5):
WasmHandler.extensions_map['.wasm'] = 'application/wasm'
if __name__ == '__main__':
PORT = 8080
with socketserver.TCPServer(("", PORT), WasmHandler) as httpd:
print("Listening on port {}. Press Ctrl+C to stop.".format(PORT))
httpd.serve_forever()
| 34.1
| 73
| 0.670577
| 513
| 0.501466
| 0
| 0
| 0
| 0
| 0
| 0
| 320
| 0.312805
|
5414822b05eb873a76f59d1b99fc4fca71aebfa2
| 1,850
|
py
|
Python
|
stage.py
|
boularbahsmail/Asteroids-Game
|
ef4ae8d2e66ea2875aba83f512610b4da56c9ef1
|
[
"CNRI-Python"
] | 2
|
2021-03-25T23:02:50.000Z
|
2021-03-26T10:41:33.000Z
|
stage.py
|
boularbahsmail/Asteroids-Game
|
ef4ae8d2e66ea2875aba83f512610b4da56c9ef1
|
[
"CNRI-Python"
] | null | null | null |
stage.py
|
boularbahsmail/Asteroids-Game
|
ef4ae8d2e66ea2875aba83f512610b4da56c9ef1
|
[
"CNRI-Python"
] | null | null | null |
import pygame
import sys
import os
from pygame.locals import *
class Stage:
# Set up the PyGame surface
def __init__(self, caption, dimensions=None):
pygame.init()
# If no screen size is provided pick the first available mode
if dimensions == None:
dimensions = pygame.display.list_modes()[0]
pygame.display.set_mode(dimensions, FULLSCREEN)
pygame.mouse.set_visible(False)
# pygame.display.set_mode(dimensions)
pygame.display.set_caption(caption)
self.screen = pygame.display.get_surface()
self.spriteList = []
self.width = dimensions[0]
self.height = dimensions[1]
self.showBoundingBoxes = False
# Add sprite to list then draw it as a easy way to get the bounding rect
def addSprite(self, sprite):
self.spriteList.append(sprite)
sprite.boundingRect = pygame.draw.aalines(
self.screen, sprite.color, True, sprite.draw())
def removeSprite(self, sprite):
self.spriteList.remove(sprite)
def drawSprites(self):
for sprite in self.spriteList:
sprite.boundingRect = pygame.draw.aalines(
self.screen, sprite.color, True, sprite.draw())
if self.showBoundingBoxes == True:
pygame.draw.rect(self.screen, (255, 255, 255),
sprite.boundingRect, 1)
def moveSprites(self):
for sprite in self.spriteList:
sprite.move()
if sprite.position.x < 0:
sprite.position.x = self.width
if sprite.position.x > self.width:
sprite.position.x = 0
if sprite.position.y < 0:
sprite.position.y = self.height
if sprite.position.y > self.height:
sprite.position.y = 0
| 29.83871
| 76
| 0.600541
| 1,783
| 0.963784
| 0
| 0
| 0
| 0
| 0
| 0
| 197
| 0.106486
|
5414c4d1cd6ab405e144265b135b1ae64b919a77
| 2,596
|
py
|
Python
|
web/MicroservicesAsAservice/src/notes/app.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
web/MicroservicesAsAservice/src/notes/app.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
web/MicroservicesAsAservice/src/notes/app.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify, render_template_string
import redis
import requests
import re
import json
import sys
app = Flask(__name__)
@app.route('/getid/<username>')
def getid(username):
red = redis.Redis(host="redis_users")
return red.get(username).decode()
@app.route('/useraction', methods=["POST"])
def useraction():
mode = request.form.get("mode")
username = request.form.get("username")
if mode == "register":
r = requests.get('http://redis_userdata:5000/adduser')
port = int(r.text)
red = redis.Redis(host="redis_users")
red.set(username, port)
return ""
elif mode == "adddata":
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
requests.post(f"http://redis_userdata:5000/putuser/{port}", json={
request.form.get("key"): request.form.get("value")
})
return ""
elif mode == "getdata":
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
r = requests.get(f"http://redis_userdata:5000/getuser/{port}")
return jsonify(r.json())
elif mode == "bioadd":
bio = request.form.get("bio")
bio = bio.replace(".", "").replace("_", "").\
replace("{", "").replace("}", "").\
replace("(", "").replace(")", "").\
replace("|", "")
bio = re.sub(r'\[\[([^\[\]]+)\]\]', r'{{data["\g<1>"]}}', bio)
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
requests.post(f"http://redis_userdata:5000/bio/{port}", json={
"bio": bio
})
return ""
elif mode == "bioget":
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
r = requests.get(f"http://redis_userdata:5000/bio/{port}")
return r.text
elif mode == "keytransfer":
red = redis.Redis(host="redis_users")
port = red.get(username).decode()
red2 = redis.Redis(host="redis_userdata",
port=int(port))
red2.migrate(request.form.get("host"),
request.form.get("port"),
[request.form.get("key")],
0, 1000,
copy=True, replace=True)
return ""
@app.route("/render", methods=["POST"])
def render_bio():
data = request.json.get('data')
if data is None:
data = {}
return render_template_string(request.json.get('bio'), data=data)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 33.282051
| 74
| 0.552003
| 0
| 0
| 0
| 0
| 2,371
| 0.913328
| 0
| 0
| 590
| 0.227273
|
54156bd3caed9e41e591c736c817a669d7acd84f
| 1,195
|
py
|
Python
|
divisiones.py
|
badgercl/scrappers-congreso-chile
|
ae2f3f451f6fa90a4964f2c476aecdfc2c5254ee
|
[
"MIT"
] | null | null | null |
divisiones.py
|
badgercl/scrappers-congreso-chile
|
ae2f3f451f6fa90a4964f2c476aecdfc2c5254ee
|
[
"MIT"
] | null | null | null |
divisiones.py
|
badgercl/scrappers-congreso-chile
|
ae2f3f451f6fa90a4964f2c476aecdfc2c5254ee
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
import json
import pprint
url = 'https://www.bcn.cl/siit/divisionelectoral/index.htm'
res = requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
rows = soup.find_all('tbody')[1].find_all('tr')
region = ""
circunscripcion = ""
distrito = ""
comuna = ""
regiones = {}
comunas = {}
for row in rows:
tds = row.find_all('td')
if len(tds) == 4:
region = tds[0].text.strip()
circunscripcion = tds[1].text.split()[0].replace('ª','')
distrito = tds[2].text.split()[1]
comuna = tds[3].a.text.replace('*', '').strip()
regiones[region] = {
circunscripcion: { distrito: [comuna] }
}
elif len(tds) == 2:
distrito = tds[0].text.split()[1]
comuna = tds[1].a.text.replace('*', '').strip()
regiones[region][circunscripcion][distrito] = [comuna]
else:
comuna = tds[0].a.text.replace('*', '').strip().lower()
regiones[region][circunscripcion][distrito].append(comuna)
comunas[comuna] = {
'circunscripcion': circunscripcion,
'distrito': distrito,
'region': region
}
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(comunas)
with open('output/divisiones.json', 'w') as json_file:
json.dump(comunas, json_file)
| 23.9
| 60
| 0.661925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 172
| 0.143813
|
5419760ed6778cfb73074dd68ffc3b8c58ad8780
| 685
|
py
|
Python
|
aws-auth-manager/src/awsauthmanager/app.py
|
zepplen/aws_tokens
|
9249ba00ea0b39ac3c523a9ea3ee2485436e84ef
|
[
"MIT"
] | null | null | null |
aws-auth-manager/src/awsauthmanager/app.py
|
zepplen/aws_tokens
|
9249ba00ea0b39ac3c523a9ea3ee2485436e84ef
|
[
"MIT"
] | null | null | null |
aws-auth-manager/src/awsauthmanager/app.py
|
zepplen/aws_tokens
|
9249ba00ea0b39ac3c523a9ea3ee2485436e84ef
|
[
"MIT"
] | null | null | null |
####################################################
# (C) Mark Trimmer, 2016, All Rights Reserved
#
# File Name: app.py
#
# Creation Date: 28-12-2016
#
# Created By: Mark Trimmer
#
# Purpose:
#
####################################################
from __future__ import print_function
from credentialfile import CredentialFile
from ststoken import StsToken
class App(object):
def __init__(self, options):
self.options = options
self.credential_file = CredentialFile(path=options['credential_file'], profile=options['profile_name'])
self.credential_file.back_fill_user_data()
self.sts = StsToken(self.credential_file)
self.sts.get_auth()
| 26.346154
| 111
| 0.608759
| 324
| 0.472993
| 0
| 0
| 0
| 0
| 0
| 0
| 267
| 0.389781
|
5419a767eec4a75f25fccde0e39c22e20a25e3d6
| 486
|
py
|
Python
|
setup.py
|
datamachines/classification-banner
|
53ae7ea1104000e60474955f7603c46024a1d06f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
datamachines/classification-banner
|
53ae7ea1104000e60474955f7603c46024a1d06f
|
[
"Apache-2.0"
] | 1
|
2021-06-11T15:30:31.000Z
|
2021-06-14T12:58:21.000Z
|
setup.py
|
datamachines/classification-banner
|
53ae7ea1104000e60474955f7603c46024a1d06f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='classification-banner',
version='1.0.0',
description='Classification banner compatable with GTK3 and X11.',
author='Mike May',
author_email='mikemay@datamachines.io',
url='https://www.github.com/datamachines/classification-banner',
packages=find_packages(),
scripts=["bin/classification-banner"],
data_files=[("classification-banner", ["style.css"])]
)
| 32.4
| 72
| 0.687243
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 259
| 0.532922
|
541ac4d6365b97db764dba02574e6974751b26ba
| 371
|
py
|
Python
|
synapyse/impl/activation_functions/linear.py
|
synapyse/synapyse
|
8c9ff53ede2d83af27ce771ce1b6ea6a32155b02
|
[
"MIT"
] | 4
|
2015-09-19T11:02:56.000Z
|
2019-03-27T11:42:12.000Z
|
synapyse/impl/activation_functions/linear.py
|
synapyse/synapyse
|
8c9ff53ede2d83af27ce771ce1b6ea6a32155b02
|
[
"MIT"
] | null | null | null |
synapyse/impl/activation_functions/linear.py
|
synapyse/synapyse
|
8c9ff53ede2d83af27ce771ce1b6ea6a32155b02
|
[
"MIT"
] | 1
|
2019-10-29T16:24:28.000Z
|
2019-10-29T16:24:28.000Z
|
from synapyse.base.activation_functions.activation_function import ActivationFunction
__author__ = 'Douglas Eric Fonseca Rodrigues'
class Linear(ActivationFunction):
def calculate_output(self):
return self.x
def calculate_derivative(self):
return 1.0
def clone(self):
clone = Linear()
clone.x = self.x
return clone
| 23.1875
| 85
| 0.698113
| 236
| 0.636119
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.086253
|
541acc6e7acab303b472692f00ec1a571d8c6ad5
| 1,027
|
py
|
Python
|
demo/app/libs/description.py
|
suenklerhaw/seoeffekt
|
0a31fdfa1a7246da37e37bf53c03d94c5f13f095
|
[
"MIT"
] | 1
|
2022-02-15T14:03:10.000Z
|
2022-02-15T14:03:10.000Z
|
demo/app/libs/description.py
|
suenklerhaw/seoeffekt
|
0a31fdfa1a7246da37e37bf53c03d94c5f13f095
|
[
"MIT"
] | null | null | null |
demo/app/libs/description.py
|
suenklerhaw/seoeffekt
|
0a31fdfa1a7246da37e37bf53c03d94c5f13f095
|
[
"MIT"
] | null | null | null |
#check description
from bs4 import BeautifulSoup
import lxml.html
def check_description(tree):
description = ""
xpath_meta = "//meta[@name='description']/@content"
xpath_og_property = "//meta[@property='og:description']/@content"
xpath_og_name = "//meta[@name='og:description']/@content"
meta_content = str(tree.xpath(xpath_meta))
og_property_content = str(tree.xpath(xpath_og_property))
og_name = str(tree.xpath(xpath_og_name))
if(len(meta_content) > 5 or len(og_property_content) > 5 or len(og_name) > 5):
if len(og_name) > 5:
description = og_name
elif len(og_property_content) > 5:
description = og_property_content
else:
description = meta_content
description = description[2:-2]
description = description.replace("'", "")
description = description.replace('"', "")
description = description.replace(':', "")
description = description.replace(',', "")
return description
| 32.09375
| 82
| 0.641675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.159688
|
541b0dd061b1cb2fb2dd1d60f50109368732184b
| 919
|
py
|
Python
|
backend/projectfiles/GradleProjectFile.py
|
karllindmark/IsYourProjectUpToDate
|
ce2df36b8fa39a4732b05dfd75558a914e4e990b
|
[
"Apache-2.0"
] | null | null | null |
backend/projectfiles/GradleProjectFile.py
|
karllindmark/IsYourProjectUpToDate
|
ce2df36b8fa39a4732b05dfd75558a914e4e990b
|
[
"Apache-2.0"
] | null | null | null |
backend/projectfiles/GradleProjectFile.py
|
karllindmark/IsYourProjectUpToDate
|
ce2df36b8fa39a4732b05dfd75558a914e4e990b
|
[
"Apache-2.0"
] | null | null | null |
import re
from backend.projectfiles.GenericProjectFile import GenericProjectFile
QUOTE = r'(?:["|\'])'
STRING = r'([\w\.\-\+]+)'
GAV_REGEXP = QUOTE + '(?:' + ":".join([STRING, STRING, STRING]) + ')' + QUOTE
class GradleProjectFile(GenericProjectFile):
""" Gradle project file implementation to extract dependencies """
def extract(self):
dependencies = []
for line in self.result.iter_lines():
results = re.match('.*' + GAV_REGEXP + '.*', line)
if results:
group = results.group(1)
artifact = results.group(2)
version = results.group(3)
dependencies.append({'group': group,
'artifact': artifact,
'version': version,
'gav': ":".join([group, artifact, version])})
return dependencies
| 34.037037
| 82
| 0.516866
| 705
| 0.767138
| 0
| 0
| 0
| 0
| 0
| 0
| 148
| 0.161045
|
541c51e665974394ae0ab412789deb2f54ac881a
| 1,879
|
py
|
Python
|
python/rhinoscripts/example_csv_loading.py
|
tasbolat1/hmv-s16
|
7863c66ed645b463b72aef98a5c484a18cc9f396
|
[
"BSD-3-Clause"
] | 1
|
2020-10-10T21:27:30.000Z
|
2020-10-10T21:27:30.000Z
|
python/rhinoscripts/example_csv_loading.py
|
tasbolat1/hmv-s16
|
7863c66ed645b463b72aef98a5c484a18cc9f396
|
[
"BSD-3-Clause"
] | null | null | null |
python/rhinoscripts/example_csv_loading.py
|
tasbolat1/hmv-s16
|
7863c66ed645b463b72aef98a5c484a18cc9f396
|
[
"BSD-3-Clause"
] | null | null | null |
"""Example code for importing a single rigid body trajectory into Rhino from a Optitrack CSV file.
Copyright (c) 2016, Garth Zeglin. All rights reserved. Licensed under the terms
of the BSD 3-clause license as included in LICENSE.
Example code for generating a path of Rhino 'planes' (e.g. coordinate frame)
from a trajectory data file. The path is returned as a list of Plane objects.
Each plane is created using an origin vector and X and Y basis vectors. The
time stamps and Z basis vectors in the trajectory file are ignored.
"""
# Load the Rhino API.
import rhinoscriptsyntax as rs
# Make sure that the Python libraries also contained within this course package
# are on the load path. This adds the parent folder to the load path, assuming that this
# script is still located with the rhinoscripts/ subfolder of the Python library tree.
import sys, os
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
# Load the Optitrack CSV file parser module.
import optitrack.csv_reader as csv
from optitrack.geometry import *
# Find the path to the test data file located alongside the script.
filename = os.path.join( os.path.abspath(os.path.dirname(__file__)), "sample_optitrack_take.csv")
# Read the file.
take = csv.Take().readCSV(filename)
# Print out some statistics
print "Found rigid bodies:", take.rigid_bodies.keys()
# Process the first rigid body into a set of planes.
bodies = take.rigid_bodies.values()
# for now:
xaxis = [1,0,0]
yaxis = [0,1,0]
if len(bodies) > 0:
body = bodies[0]
for pos,rot in zip(body.positions, body.rotations):
if pos is not None and rot is not None:
xaxis, yaxis = quaternion_to_xaxis_yaxis(rot)
plane = rs.PlaneFromFrame(pos, xaxis, yaxis)
# create a visible plane, assuming units are in meters
rs.AddPlaneSurface( plane, 0.1, 0.1 )
| 36.134615
| 98
| 0.734433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,132
| 0.602448
|
541d99a0b05066f7ae098ca89430a31ca86b09cb
| 91,180
|
py
|
Python
|
4_Nback/Nback_practice_lastrun.py
|
Brinks0211/cognitive_paradigms_patients
|
30e3f8268e5c2b5ebfffcc4ebbcb46d8e60d039e
|
[
"MIT"
] | 2
|
2020-07-01T12:53:40.000Z
|
2020-07-01T13:30:23.000Z
|
4_Nback/Nback_practice_lastrun.py
|
Brinks0211/cognitive_paradigms_patients
|
30e3f8268e5c2b5ebfffcc4ebbcb46d8e60d039e
|
[
"MIT"
] | null | null | null |
4_Nback/Nback_practice_lastrun.py
|
Brinks0211/cognitive_paradigms_patients
|
30e3f8268e5c2b5ebfffcc4ebbcb46d8e60d039e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v2020.1.3),
on 六月 15, 2020, at 09:02
If you publish work using this script the most relevant publication is:
Peirce J, Gray JR, Simpson S, MacAskill M, Höchenberger R, Sogo H, Kastman E, Lindeløv JK. (2019)
PsychoPy2: Experiments in behavior made easy Behav Res 51: 195.
https://doi.org/10.3758/s13428-018-01193-y
"""
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '2020.1.3'
expName = 'Nback_Practice' # from the Builder filename that created this script
expInfo = {'participant': '', '姓名拼音': '', '男1/女2': '', '入院1/出院2': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='C:\\Users\\zhang\\Desktop\\张以昊\\课题组\\4_Nback\\Nback_practice_lastrun.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=[1024, 768], fullscr=False, screen=0,
winType='pyglet', allowGUI=True, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "introduction1"
introduction1Clock = core.Clock()
introduction_1 = visual.TextStim(win=win, name='introduction_1',
text='欢迎参加测试\n\n本测试分三种类型\n现在是练习部分\n\n(继续,请按空格键)',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
introduction1_2 = keyboard.Keyboard()
# Initialize components for Routine "introduction5"
introduction5Clock = core.Clock()
introduction_5 = visual.TextStim(win=win, name='introduction_5',
text='如果准备好了,请开始练习\n\n(继续,请按空格键)',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_7 = keyboard.Keyboard()
# Initialize components for Routine "tip1"
tip1Clock = core.Clock()
text_2 = visual.TextStim(win=win, name='text_2',
text='现在,练习第一种类型\n\n \n(继续,请按空格键)',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_4 = keyboard.Keyboard()
# Initialize components for Routine "introduction2"
introduction2Clock = core.Clock()
introduction2_1 = visual.TextStim(win=win, name='introduction2_1',
text='第一种类型\n\n开始时,屏幕中间会出现注视点“+”\n之后会连续出现一系列的数字\n\n在每个数字出现时\n您只需要按下空格键即可\n\n(继续,请按空格键)',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
introduction2_2 = keyboard.Keyboard()
# Initialize components for Routine "_0back_pre"
_0back_preClock = core.Clock()
concentration_pre1 = visual.TextStim(win=win, name='concentration_pre1',
text='+',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "_0back"
_0backClock = core.Clock()
back0_1 = visual.TextStim(win=win, name='back0_1',
text='default text',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp0 = keyboard.Keyboard()
message0=" "
# Initialize components for Routine "feedback_0"
feedback_0Clock = core.Clock()
text_3 = visual.TextStim(win=win, name='text_3',
text='default text',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "tip2"
tip2Clock = core.Clock()
text = visual.TextStim(win=win, name='text',
text='现在,练习第二种类型\n\n(继续请按空格键)',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_5 = keyboard.Keyboard()
# Initialize components for Routine "introduction3"
introduction3Clock = core.Clock()
introduction3_1 = visual.TextStim(win=win, name='introduction3_1',
text='第二种类型\n\n开始时,屏幕中间会出现注视点“+”\n之后会连续出现一系列的数字\n\n从第二个数字出现时\n您需要判断该数字与上一个数字是否一致\n一致,请按左键; 不一致,请按右键\n\n(继续,请按空格键)',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
introduction3_2 = keyboard.Keyboard()
# Initialize components for Routine "_1back_pre"
_1back_preClock = core.Clock()
concentration1_pre = visual.TextStim(win=win, name='concentration1_pre',
text='+',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
back1_pre = visual.TextStim(win=win, name='back1_pre',
text='2\n(无需作答)',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=-1.0);
# Initialize components for Routine "_1back"
_1backClock = core.Clock()
back1_1 = visual.TextStim(win=win, name='back1_1',
text='default text',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_2 = keyboard.Keyboard()
message1=0
# Initialize components for Routine "feedback_1"
feedback_1Clock = core.Clock()
feedback1 = visual.TextStim(win=win, name='feedback1',
text='default text',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "tip3"
tip3Clock = core.Clock()
tip_3 = visual.TextStim(win=win, name='tip_3',
text='现在,练习第三种类型\n\n(继续,请按空格键)',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_6 = keyboard.Keyboard()
# Initialize components for Routine "introduction4"
introduction4Clock = core.Clock()
introduction4_1 = visual.TextStim(win=win, name='introduction4_1',
text='第三种类型\n\n开始时,屏幕中间会出现注视点“+”\n之后会连续出现一系列的数字\n\n从第三个数字出现时\n您需要判断该数字与倒数二个数字是否一致\n一致,请按左键; 不一致,请按右键\n\n(继续,请按空格键)',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp = keyboard.Keyboard()
# Initialize components for Routine "_2back_pre"
_2back_preClock = core.Clock()
concentration_pre = visual.TextStim(win=win, name='concentration_pre',
text='+',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
text_4 = visual.TextStim(win=win, name='text_4',
text='1\n(无需作答)',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=-1.0);
text_5 = visual.TextStim(win=win, name='text_5',
text='4\n(无需作答)',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=-2.0);
# Initialize components for Routine "_2back"
_2backClock = core.Clock()
back2_1 = visual.TextStim(win=win, name='back2_1',
text='default text',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
key_resp_3 = keyboard.Keyboard()
message2=" "
# Initialize components for Routine "feedback_2"
feedback_2Clock = core.Clock()
feedback2 = visual.TextStim(win=win, name='feedback2',
text='default text',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "thanks"
thanksClock = core.Clock()
text_6 = visual.TextStim(win=win, name='text_6',
text='练习结束,请开始正式测试',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "introduction1"-------
continueRoutine = True
# update component parameters for each repeat
introduction1_2.keys = []
introduction1_2.rt = []
_introduction1_2_allKeys = []
# keep track of which components have finished
introduction1Components = [introduction_1, introduction1_2]
for thisComponent in introduction1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
introduction1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "introduction1"-------
while continueRoutine:
# get current time
t = introduction1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=introduction1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *introduction_1* updates
if introduction_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
introduction_1.frameNStart = frameN # exact frame index
introduction_1.tStart = t # local t and not account for scr refresh
introduction_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(introduction_1, 'tStartRefresh') # time at next scr refresh
introduction_1.setAutoDraw(True)
# *introduction1_2* updates
waitOnFlip = False
if introduction1_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
introduction1_2.frameNStart = frameN # exact frame index
introduction1_2.tStart = t # local t and not account for scr refresh
introduction1_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(introduction1_2, 'tStartRefresh') # time at next scr refresh
introduction1_2.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(introduction1_2.clock.reset) # t=0 on next screen flip
win.callOnFlip(introduction1_2.clearEvents, eventType='keyboard') # clear events on next screen flip
if introduction1_2.status == STARTED and not waitOnFlip:
theseKeys = introduction1_2.getKeys(keyList=['space'], waitRelease=False)
_introduction1_2_allKeys.extend(theseKeys)
if len(_introduction1_2_allKeys):
introduction1_2.keys = _introduction1_2_allKeys[-1].name # just the last key pressed
introduction1_2.rt = _introduction1_2_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in introduction1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "introduction1"-------
for thisComponent in introduction1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "introduction1" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "introduction5"-------
continueRoutine = True
# update component parameters for each repeat
key_resp_7.keys = []
key_resp_7.rt = []
_key_resp_7_allKeys = []
# keep track of which components have finished
introduction5Components = [introduction_5, key_resp_7]
for thisComponent in introduction5Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
introduction5Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "introduction5"-------
while continueRoutine:
# get current time
t = introduction5Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=introduction5Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *introduction_5* updates
if introduction_5.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
introduction_5.frameNStart = frameN # exact frame index
introduction_5.tStart = t # local t and not account for scr refresh
introduction_5.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(introduction_5, 'tStartRefresh') # time at next scr refresh
introduction_5.setAutoDraw(True)
# *key_resp_7* updates
waitOnFlip = False
if key_resp_7.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_7.frameNStart = frameN # exact frame index
key_resp_7.tStart = t # local t and not account for scr refresh
key_resp_7.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_7, 'tStartRefresh') # time at next scr refresh
key_resp_7.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_7.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_7.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_7.status == STARTED and not waitOnFlip:
theseKeys = key_resp_7.getKeys(keyList=['space'], waitRelease=False)
_key_resp_7_allKeys.extend(theseKeys)
if len(_key_resp_7_allKeys):
key_resp_7.keys = _key_resp_7_allKeys[-1].name # just the last key pressed
key_resp_7.rt = _key_resp_7_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in introduction5Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "introduction5"-------
for thisComponent in introduction5Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('introduction_5.started', introduction_5.tStartRefresh)
thisExp.addData('introduction_5.stopped', introduction_5.tStopRefresh)
# the Routine "introduction5" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "tip1"-------
continueRoutine = True
# update component parameters for each repeat
key_resp_4.keys = []
key_resp_4.rt = []
_key_resp_4_allKeys = []
# keep track of which components have finished
tip1Components = [text_2, key_resp_4]
for thisComponent in tip1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
tip1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "tip1"-------
while continueRoutine:
# get current time
t = tip1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=tip1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_2* updates
if text_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_2.frameNStart = frameN # exact frame index
text_2.tStart = t # local t and not account for scr refresh
text_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_2, 'tStartRefresh') # time at next scr refresh
text_2.setAutoDraw(True)
# *key_resp_4* updates
waitOnFlip = False
if key_resp_4.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_4.frameNStart = frameN # exact frame index
key_resp_4.tStart = t # local t and not account for scr refresh
key_resp_4.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_4, 'tStartRefresh') # time at next scr refresh
key_resp_4.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_4.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_4.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_4.status == STARTED and not waitOnFlip:
theseKeys = key_resp_4.getKeys(keyList=['space'], waitRelease=False)
_key_resp_4_allKeys.extend(theseKeys)
if len(_key_resp_4_allKeys):
key_resp_4.keys = _key_resp_4_allKeys[-1].name # just the last key pressed
key_resp_4.rt = _key_resp_4_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in tip1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "tip1"-------
for thisComponent in tip1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_2.started', text_2.tStartRefresh)
thisExp.addData('text_2.stopped', text_2.tStopRefresh)
# the Routine "tip1" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "introduction2"-------
continueRoutine = True
# update component parameters for each repeat
introduction2_2.keys = []
introduction2_2.rt = []
_introduction2_2_allKeys = []
# keep track of which components have finished
introduction2Components = [introduction2_1, introduction2_2]
for thisComponent in introduction2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
introduction2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "introduction2"-------
while continueRoutine:
# get current time
t = introduction2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=introduction2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *introduction2_1* updates
if introduction2_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
introduction2_1.frameNStart = frameN # exact frame index
introduction2_1.tStart = t # local t and not account for scr refresh
introduction2_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(introduction2_1, 'tStartRefresh') # time at next scr refresh
introduction2_1.setAutoDraw(True)
# *introduction2_2* updates
waitOnFlip = False
if introduction2_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
introduction2_2.frameNStart = frameN # exact frame index
introduction2_2.tStart = t # local t and not account for scr refresh
introduction2_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(introduction2_2, 'tStartRefresh') # time at next scr refresh
introduction2_2.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(introduction2_2.clock.reset) # t=0 on next screen flip
win.callOnFlip(introduction2_2.clearEvents, eventType='keyboard') # clear events on next screen flip
if introduction2_2.status == STARTED and not waitOnFlip:
theseKeys = introduction2_2.getKeys(keyList=['space'], waitRelease=False)
_introduction2_2_allKeys.extend(theseKeys)
if len(_introduction2_2_allKeys):
introduction2_2.keys = _introduction2_2_allKeys[-1].name # just the last key pressed
introduction2_2.rt = _introduction2_2_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in introduction2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "introduction2"-------
for thisComponent in introduction2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "introduction2" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "_0back_pre"-------
continueRoutine = True
routineTimer.add(1.000000)
# update component parameters for each repeat
# keep track of which components have finished
_0back_preComponents = [concentration_pre1]
for thisComponent in _0back_preComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
_0back_preClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "_0back_pre"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = _0back_preClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=_0back_preClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *concentration_pre1* updates
if concentration_pre1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
concentration_pre1.frameNStart = frameN # exact frame index
concentration_pre1.tStart = t # local t and not account for scr refresh
concentration_pre1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(concentration_pre1, 'tStartRefresh') # time at next scr refresh
concentration_pre1.setAutoDraw(True)
if concentration_pre1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > concentration_pre1.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
concentration_pre1.tStop = t # not accounting for scr refresh
concentration_pre1.frameNStop = frameN # exact frame index
win.timeOnFlip(concentration_pre1, 'tStopRefresh') # time at next scr refresh
concentration_pre1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in _0back_preComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "_0back_pre"-------
for thisComponent in _0back_preComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('concentration_pre1.started', concentration_pre1.tStartRefresh)
thisExp.addData('concentration_pre1.stopped', concentration_pre1.tStopRefresh)
# set up handler to look after randomisation of conditions etc
loop_0back = data.TrialHandler(nReps=2, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('documents\\document_0back_pre.xlsx'),
seed=None, name='loop_0back')
thisExp.addLoop(loop_0back) # add the loop to the experiment
thisLoop_0back = loop_0back.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisLoop_0back.rgb)
if thisLoop_0back != None:
for paramName in thisLoop_0back:
exec('{} = thisLoop_0back[paramName]'.format(paramName))
for thisLoop_0back in loop_0back:
currentLoop = loop_0back
# abbreviate parameter names if possible (e.g. rgb = thisLoop_0back.rgb)
if thisLoop_0back != None:
for paramName in thisLoop_0back:
exec('{} = thisLoop_0back[paramName]'.format(paramName))
# ------Prepare to start Routine "_0back"-------
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
back0_1.setText(num1)
key_resp0.keys = []
key_resp0.rt = []
_key_resp0_allKeys = []
# keep track of which components have finished
_0backComponents = [back0_1, key_resp0]
for thisComponent in _0backComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
_0backClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "_0back"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = _0backClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=_0backClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *back0_1* updates
if back0_1.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
back0_1.frameNStart = frameN # exact frame index
back0_1.tStart = t # local t and not account for scr refresh
back0_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(back0_1, 'tStartRefresh') # time at next scr refresh
back0_1.setAutoDraw(True)
if back0_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > back0_1.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
back0_1.tStop = t # not accounting for scr refresh
back0_1.frameNStop = frameN # exact frame index
win.timeOnFlip(back0_1, 'tStopRefresh') # time at next scr refresh
back0_1.setAutoDraw(False)
# *key_resp0* updates
waitOnFlip = False
if key_resp0.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
key_resp0.frameNStart = frameN # exact frame index
key_resp0.tStart = t # local t and not account for scr refresh
key_resp0.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp0, 'tStartRefresh') # time at next scr refresh
key_resp0.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp0.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp0.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp0.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > key_resp0.tStartRefresh + 3-frameTolerance:
# keep track of stop time/frame for later
key_resp0.tStop = t # not accounting for scr refresh
key_resp0.frameNStop = frameN # exact frame index
win.timeOnFlip(key_resp0, 'tStopRefresh') # time at next scr refresh
key_resp0.status = FINISHED
if key_resp0.status == STARTED and not waitOnFlip:
theseKeys = key_resp0.getKeys(keyList=['space'], waitRelease=False)
_key_resp0_allKeys.extend(theseKeys)
if len(_key_resp0_allKeys):
key_resp0.keys = _key_resp0_allKeys[-1].name # just the last key pressed
key_resp0.rt = _key_resp0_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in _0backComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "_0back"-------
for thisComponent in _0backComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
loop_0back.addData('back0_1.started', back0_1.tStartRefresh)
loop_0back.addData('back0_1.stopped', back0_1.tStopRefresh)
# check responses
if key_resp0.keys in ['', [], None]: # No response was made
key_resp0.keys = None
loop_0back.addData('key_resp0.keys',key_resp0.keys)
if key_resp0.keys != None: # we had a response
loop_0back.addData('key_resp0.rt', key_resp0.rt)
loop_0back.addData('key_resp0.started', key_resp0.tStartRefresh)
loop_0back.addData('key_resp0.stopped', key_resp0.tStopRefresh)
if not key_resp0.keys:
message0="请在三秒内按键"
# ------Prepare to start Routine "feedback_0"-------
continueRoutine = True
routineTimer.add(1.000000)
# update component parameters for each repeat
text_3.setText(message0)
# keep track of which components have finished
feedback_0Components = [text_3]
for thisComponent in feedback_0Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
feedback_0Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "feedback_0"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = feedback_0Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=feedback_0Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_3* updates
if text_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_3.frameNStart = frameN # exact frame index
text_3.tStart = t # local t and not account for scr refresh
text_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_3, 'tStartRefresh') # time at next scr refresh
text_3.setAutoDraw(True)
if text_3.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_3.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
text_3.tStop = t # not accounting for scr refresh
text_3.frameNStop = frameN # exact frame index
win.timeOnFlip(text_3, 'tStopRefresh') # time at next scr refresh
text_3.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in feedback_0Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "feedback_0"-------
for thisComponent in feedback_0Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
loop_0back.addData('text_3.started', text_3.tStartRefresh)
loop_0back.addData('text_3.stopped', text_3.tStopRefresh)
thisExp.nextEntry()
# completed 2 repeats of 'loop_0back'
# ------Prepare to start Routine "tip2"-------
continueRoutine = True
# update component parameters for each repeat
key_resp_5.keys = []
key_resp_5.rt = []
_key_resp_5_allKeys = []
# keep track of which components have finished
tip2Components = [text, key_resp_5]
for thisComponent in tip2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
tip2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "tip2"-------
while continueRoutine:
# get current time
t = tip2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=tip2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text* updates
if text.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text.frameNStart = frameN # exact frame index
text.tStart = t # local t and not account for scr refresh
text.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text, 'tStartRefresh') # time at next scr refresh
text.setAutoDraw(True)
# *key_resp_5* updates
waitOnFlip = False
if key_resp_5.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_5.frameNStart = frameN # exact frame index
key_resp_5.tStart = t # local t and not account for scr refresh
key_resp_5.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_5, 'tStartRefresh') # time at next scr refresh
key_resp_5.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_5.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_5.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_5.status == STARTED and not waitOnFlip:
theseKeys = key_resp_5.getKeys(keyList=['space'], waitRelease=False)
_key_resp_5_allKeys.extend(theseKeys)
if len(_key_resp_5_allKeys):
key_resp_5.keys = _key_resp_5_allKeys[-1].name # just the last key pressed
key_resp_5.rt = _key_resp_5_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in tip2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "tip2"-------
for thisComponent in tip2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text.started', text.tStartRefresh)
thisExp.addData('text.stopped', text.tStopRefresh)
# the Routine "tip2" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "introduction3"-------
continueRoutine = True
# update component parameters for each repeat
introduction3_2.keys = []
introduction3_2.rt = []
_introduction3_2_allKeys = []
# keep track of which components have finished
introduction3Components = [introduction3_1, introduction3_2]
for thisComponent in introduction3Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
introduction3Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "introduction3"-------
while continueRoutine:
# get current time
t = introduction3Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=introduction3Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *introduction3_1* updates
if introduction3_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
introduction3_1.frameNStart = frameN # exact frame index
introduction3_1.tStart = t # local t and not account for scr refresh
introduction3_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(introduction3_1, 'tStartRefresh') # time at next scr refresh
introduction3_1.setAutoDraw(True)
# *introduction3_2* updates
waitOnFlip = False
if introduction3_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
introduction3_2.frameNStart = frameN # exact frame index
introduction3_2.tStart = t # local t and not account for scr refresh
introduction3_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(introduction3_2, 'tStartRefresh') # time at next scr refresh
introduction3_2.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(introduction3_2.clock.reset) # t=0 on next screen flip
win.callOnFlip(introduction3_2.clearEvents, eventType='keyboard') # clear events on next screen flip
if introduction3_2.status == STARTED and not waitOnFlip:
theseKeys = introduction3_2.getKeys(keyList=['space'], waitRelease=False)
_introduction3_2_allKeys.extend(theseKeys)
if len(_introduction3_2_allKeys):
introduction3_2.keys = _introduction3_2_allKeys[-1].name # just the last key pressed
introduction3_2.rt = _introduction3_2_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in introduction3Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "introduction3"-------
for thisComponent in introduction3Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "introduction3" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "_1back_pre"-------
continueRoutine = True
routineTimer.add(2.000000)
# update component parameters for each repeat
# keep track of which components have finished
_1back_preComponents = [concentration1_pre, back1_pre]
for thisComponent in _1back_preComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
_1back_preClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "_1back_pre"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = _1back_preClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=_1back_preClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *concentration1_pre* updates
if concentration1_pre.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
concentration1_pre.frameNStart = frameN # exact frame index
concentration1_pre.tStart = t # local t and not account for scr refresh
concentration1_pre.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(concentration1_pre, 'tStartRefresh') # time at next scr refresh
concentration1_pre.setAutoDraw(True)
if concentration1_pre.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > concentration1_pre.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
concentration1_pre.tStop = t # not accounting for scr refresh
concentration1_pre.frameNStop = frameN # exact frame index
win.timeOnFlip(concentration1_pre, 'tStopRefresh') # time at next scr refresh
concentration1_pre.setAutoDraw(False)
# *back1_pre* updates
if back1_pre.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
back1_pre.frameNStart = frameN # exact frame index
back1_pre.tStart = t # local t and not account for scr refresh
back1_pre.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(back1_pre, 'tStartRefresh') # time at next scr refresh
back1_pre.setAutoDraw(True)
if back1_pre.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > back1_pre.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
back1_pre.tStop = t # not accounting for scr refresh
back1_pre.frameNStop = frameN # exact frame index
win.timeOnFlip(back1_pre, 'tStopRefresh') # time at next scr refresh
back1_pre.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in _1back_preComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "_1back_pre"-------
for thisComponent in _1back_preComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('concentration1_pre.started', concentration1_pre.tStartRefresh)
thisExp.addData('concentration1_pre.stopped', concentration1_pre.tStopRefresh)
thisExp.addData('back1_pre.started', back1_pre.tStartRefresh)
thisExp.addData('back1_pre.stopped', back1_pre.tStopRefresh)
# set up handler to look after randomisation of conditions etc
loop_1back = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('documents\\document_1back_pre.xlsx'),
seed=None, name='loop_1back')
thisExp.addLoop(loop_1back) # add the loop to the experiment
thisLoop_1back = loop_1back.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisLoop_1back.rgb)
if thisLoop_1back != None:
for paramName in thisLoop_1back:
exec('{} = thisLoop_1back[paramName]'.format(paramName))
for thisLoop_1back in loop_1back:
currentLoop = loop_1back
# abbreviate parameter names if possible (e.g. rgb = thisLoop_1back.rgb)
if thisLoop_1back != None:
for paramName in thisLoop_1back:
exec('{} = thisLoop_1back[paramName]'.format(paramName))
# ------Prepare to start Routine "_1back"-------
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
back1_1.setText(num2)
key_resp_2.keys = []
key_resp_2.rt = []
_key_resp_2_allKeys = []
# keep track of which components have finished
_1backComponents = [back1_1, key_resp_2]
for thisComponent in _1backComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
_1backClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "_1back"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = _1backClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=_1backClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *back1_1* updates
if back1_1.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
back1_1.frameNStart = frameN # exact frame index
back1_1.tStart = t # local t and not account for scr refresh
back1_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(back1_1, 'tStartRefresh') # time at next scr refresh
back1_1.setAutoDraw(True)
if back1_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > back1_1.tStartRefresh + 1-frameTolerance:
# keep track of stop time/frame for later
back1_1.tStop = t # not accounting for scr refresh
back1_1.frameNStop = frameN # exact frame index
win.timeOnFlip(back1_1, 'tStopRefresh') # time at next scr refresh
back1_1.setAutoDraw(False)
# *key_resp_2* updates
waitOnFlip = False
if key_resp_2.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.tStart = t # local t and not account for scr refresh
key_resp_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_2, 'tStartRefresh') # time at next scr refresh
key_resp_2.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_2.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > key_resp_2.tStartRefresh + 3-frameTolerance:
# keep track of stop time/frame for later
key_resp_2.tStop = t # not accounting for scr refresh
key_resp_2.frameNStop = frameN # exact frame index
win.timeOnFlip(key_resp_2, 'tStopRefresh') # time at next scr refresh
key_resp_2.status = FINISHED
if key_resp_2.status == STARTED and not waitOnFlip:
theseKeys = key_resp_2.getKeys(keyList=['left', 'right'], waitRelease=False)
_key_resp_2_allKeys.extend(theseKeys)
if len(_key_resp_2_allKeys):
key_resp_2.keys = _key_resp_2_allKeys[-1].name # just the last key pressed
key_resp_2.rt = _key_resp_2_allKeys[-1].rt
# was this correct?
if (key_resp_2.keys == str(num2_corr)) or (key_resp_2.keys == num2_corr):
key_resp_2.corr = 1
else:
key_resp_2.corr = 0
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in _1backComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "_1back"-------
for thisComponent in _1backComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
loop_1back.addData('back1_1.started', back1_1.tStartRefresh)
loop_1back.addData('back1_1.stopped', back1_1.tStopRefresh)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys = None
# was no response the correct answer?!
if str(num2_corr).lower() == 'none':
key_resp_2.corr = 1; # correct non-response
else:
key_resp_2.corr = 0; # failed to respond (incorrectly)
# store data for loop_1back (TrialHandler)
loop_1back.addData('key_resp_2.keys',key_resp_2.keys)
loop_1back.addData('key_resp_2.corr', key_resp_2.corr)
if key_resp_2.keys != None: # we had a response
loop_1back.addData('key_resp_2.rt', key_resp_2.rt)
loop_1back.addData('key_resp_2.started', key_resp_2.tStartRefresh)
loop_1back.addData('key_resp_2.stopped', key_resp_2.tStopRefresh)
if not key_resp_2.keys:
message1="请在三秒内按键"
else:
if key_resp_2.corr:
message1="回答正确"
else:
message1="回答错误"
# ------Prepare to start Routine "feedback_1"-------
continueRoutine = True
routineTimer.add(1.000000)
# update component parameters for each repeat
feedback1.setText(message1)
# keep track of which components have finished
feedback_1Components = [feedback1]
for thisComponent in feedback_1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
feedback_1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "feedback_1"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = feedback_1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=feedback_1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *feedback1* updates
if feedback1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
feedback1.frameNStart = frameN # exact frame index
feedback1.tStart = t # local t and not account for scr refresh
feedback1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(feedback1, 'tStartRefresh') # time at next scr refresh
feedback1.setAutoDraw(True)
if feedback1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > feedback1.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
feedback1.tStop = t # not accounting for scr refresh
feedback1.frameNStop = frameN # exact frame index
win.timeOnFlip(feedback1, 'tStopRefresh') # time at next scr refresh
feedback1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in feedback_1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "feedback_1"-------
for thisComponent in feedback_1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
loop_1back.addData('feedback1.started', feedback1.tStartRefresh)
loop_1back.addData('feedback1.stopped', feedback1.tStopRefresh)
thisExp.nextEntry()
# completed 1 repeats of 'loop_1back'
# ------Prepare to start Routine "tip3"-------
continueRoutine = True
# update component parameters for each repeat
key_resp_6.keys = []
key_resp_6.rt = []
_key_resp_6_allKeys = []
# keep track of which components have finished
tip3Components = [tip_3, key_resp_6]
for thisComponent in tip3Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
tip3Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "tip3"-------
while continueRoutine:
# get current time
t = tip3Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=tip3Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *tip_3* updates
if tip_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
tip_3.frameNStart = frameN # exact frame index
tip_3.tStart = t # local t and not account for scr refresh
tip_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(tip_3, 'tStartRefresh') # time at next scr refresh
tip_3.setAutoDraw(True)
# *key_resp_6* updates
waitOnFlip = False
if key_resp_6.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp_6.frameNStart = frameN # exact frame index
key_resp_6.tStart = t # local t and not account for scr refresh
key_resp_6.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_6, 'tStartRefresh') # time at next scr refresh
key_resp_6.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_6.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_6.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_6.status == STARTED and not waitOnFlip:
theseKeys = key_resp_6.getKeys(keyList=['space'], waitRelease=False)
_key_resp_6_allKeys.extend(theseKeys)
if len(_key_resp_6_allKeys):
key_resp_6.keys = _key_resp_6_allKeys[-1].name # just the last key pressed
key_resp_6.rt = _key_resp_6_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in tip3Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "tip3"-------
for thisComponent in tip3Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('tip_3.started', tip_3.tStartRefresh)
thisExp.addData('tip_3.stopped', tip_3.tStopRefresh)
# the Routine "tip3" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "introduction4"-------
continueRoutine = True
# update component parameters for each repeat
key_resp.keys = []
key_resp.rt = []
_key_resp_allKeys = []
# keep track of which components have finished
introduction4Components = [introduction4_1, key_resp]
for thisComponent in introduction4Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
introduction4Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "introduction4"-------
while continueRoutine:
# get current time
t = introduction4Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=introduction4Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *introduction4_1* updates
if introduction4_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
introduction4_1.frameNStart = frameN # exact frame index
introduction4_1.tStart = t # local t and not account for scr refresh
introduction4_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(introduction4_1, 'tStartRefresh') # time at next scr refresh
introduction4_1.setAutoDraw(True)
# *key_resp* updates
waitOnFlip = False
if key_resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp.frameNStart = frameN # exact frame index
key_resp.tStart = t # local t and not account for scr refresh
key_resp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp, 'tStartRefresh') # time at next scr refresh
key_resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp.status == STARTED and not waitOnFlip:
theseKeys = key_resp.getKeys(keyList=['space'], waitRelease=False)
_key_resp_allKeys.extend(theseKeys)
if len(_key_resp_allKeys):
key_resp.keys = _key_resp_allKeys[-1].name # just the last key pressed
key_resp.rt = _key_resp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in introduction4Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "introduction4"-------
for thisComponent in introduction4Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('introduction4_1.started', introduction4_1.tStartRefresh)
thisExp.addData('introduction4_1.stopped', introduction4_1.tStopRefresh)
# the Routine "introduction4" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "_2back_pre"-------
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
# keep track of which components have finished
_2back_preComponents = [concentration_pre, text_4, text_5]
for thisComponent in _2back_preComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
_2back_preClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "_2back_pre"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = _2back_preClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=_2back_preClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *concentration_pre* updates
if concentration_pre.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
concentration_pre.frameNStart = frameN # exact frame index
concentration_pre.tStart = t # local t and not account for scr refresh
concentration_pre.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(concentration_pre, 'tStartRefresh') # time at next scr refresh
concentration_pre.setAutoDraw(True)
if concentration_pre.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > concentration_pre.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
concentration_pre.tStop = t # not accounting for scr refresh
concentration_pre.frameNStop = frameN # exact frame index
win.timeOnFlip(concentration_pre, 'tStopRefresh') # time at next scr refresh
concentration_pre.setAutoDraw(False)
# *text_4* updates
if text_4.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
text_4.frameNStart = frameN # exact frame index
text_4.tStart = t # local t and not account for scr refresh
text_4.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_4, 'tStartRefresh') # time at next scr refresh
text_4.setAutoDraw(True)
if text_4.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_4.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
text_4.tStop = t # not accounting for scr refresh
text_4.frameNStop = frameN # exact frame index
win.timeOnFlip(text_4, 'tStopRefresh') # time at next scr refresh
text_4.setAutoDraw(False)
# *text_5* updates
if text_5.status == NOT_STARTED and tThisFlip >= 3-frameTolerance:
# keep track of start time/frame for later
text_5.frameNStart = frameN # exact frame index
text_5.tStart = t # local t and not account for scr refresh
text_5.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_5, 'tStartRefresh') # time at next scr refresh
text_5.setAutoDraw(True)
if text_5.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_5.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
text_5.tStop = t # not accounting for scr refresh
text_5.frameNStop = frameN # exact frame index
win.timeOnFlip(text_5, 'tStopRefresh') # time at next scr refresh
text_5.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in _2back_preComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "_2back_pre"-------
for thisComponent in _2back_preComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('concentration_pre.started', concentration_pre.tStartRefresh)
thisExp.addData('concentration_pre.stopped', concentration_pre.tStopRefresh)
thisExp.addData('text_4.started', text_4.tStartRefresh)
thisExp.addData('text_4.stopped', text_4.tStopRefresh)
thisExp.addData('text_5.started', text_5.tStartRefresh)
thisExp.addData('text_5.stopped', text_5.tStopRefresh)
# set up handler to look after randomisation of conditions etc
loop2back = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('documents\\document_2back_pre.xlsx'),
seed=None, name='loop2back')
thisExp.addLoop(loop2back) # add the loop to the experiment
thisLoop2back = loop2back.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisLoop2back.rgb)
if thisLoop2back != None:
for paramName in thisLoop2back:
exec('{} = thisLoop2back[paramName]'.format(paramName))
for thisLoop2back in loop2back:
currentLoop = loop2back
# abbreviate parameter names if possible (e.g. rgb = thisLoop2back.rgb)
if thisLoop2back != None:
for paramName in thisLoop2back:
exec('{} = thisLoop2back[paramName]'.format(paramName))
# ------Prepare to start Routine "_2back"-------
continueRoutine = True
routineTimer.add(4.000000)
# update component parameters for each repeat
back2_1.setText(num3)
key_resp_3.keys = []
key_resp_3.rt = []
_key_resp_3_allKeys = []
# keep track of which components have finished
_2backComponents = [back2_1, key_resp_3]
for thisComponent in _2backComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
_2backClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "_2back"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = _2backClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=_2backClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *back2_1* updates
if back2_1.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
back2_1.frameNStart = frameN # exact frame index
back2_1.tStart = t # local t and not account for scr refresh
back2_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(back2_1, 'tStartRefresh') # time at next scr refresh
back2_1.setAutoDraw(True)
if back2_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > back2_1.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
back2_1.tStop = t # not accounting for scr refresh
back2_1.frameNStop = frameN # exact frame index
win.timeOnFlip(back2_1, 'tStopRefresh') # time at next scr refresh
back2_1.setAutoDraw(False)
# *key_resp_3* updates
waitOnFlip = False
if key_resp_3.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.tStart = t # local t and not account for scr refresh
key_resp_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp_3, 'tStartRefresh') # time at next scr refresh
key_resp_3.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp_3.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp_3.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp_3.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > key_resp_3.tStartRefresh + 3-frameTolerance:
# keep track of stop time/frame for later
key_resp_3.tStop = t # not accounting for scr refresh
key_resp_3.frameNStop = frameN # exact frame index
win.timeOnFlip(key_resp_3, 'tStopRefresh') # time at next scr refresh
key_resp_3.status = FINISHED
if key_resp_3.status == STARTED and not waitOnFlip:
theseKeys = key_resp_3.getKeys(keyList=['left', 'right'], waitRelease=False)
_key_resp_3_allKeys.extend(theseKeys)
if len(_key_resp_3_allKeys):
key_resp_3.keys = _key_resp_3_allKeys[-1].name # just the last key pressed
key_resp_3.rt = _key_resp_3_allKeys[-1].rt
# was this correct?
if (key_resp_3.keys == str(num3_corr)) or (key_resp_3.keys == num3_corr):
key_resp_3.corr = 1
else:
key_resp_3.corr = 0
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in _2backComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "_2back"-------
for thisComponent in _2backComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
loop2back.addData('back2_1.started', back2_1.tStartRefresh)
loop2back.addData('back2_1.stopped', back2_1.tStopRefresh)
# check responses
if key_resp_3.keys in ['', [], None]: # No response was made
key_resp_3.keys = None
# was no response the correct answer?!
if str(num3_corr).lower() == 'none':
key_resp_3.corr = 1; # correct non-response
else:
key_resp_3.corr = 0; # failed to respond (incorrectly)
# store data for loop2back (TrialHandler)
loop2back.addData('key_resp_3.keys',key_resp_3.keys)
loop2back.addData('key_resp_3.corr', key_resp_3.corr)
if key_resp_3.keys != None: # we had a response
loop2back.addData('key_resp_3.rt', key_resp_3.rt)
loop2back.addData('key_resp_3.started', key_resp_3.tStartRefresh)
loop2back.addData('key_resp_3.stopped', key_resp_3.tStopRefresh)
if not key_resp_3.keys:
message2="请在三秒内按键"
else:
if key_resp_3.corr:
message2="回答正确"
else:
message2="回答错误"
# ------Prepare to start Routine "feedback_2"-------
continueRoutine = True
routineTimer.add(1.000000)
# update component parameters for each repeat
feedback2.setText(message2)
# keep track of which components have finished
feedback_2Components = [feedback2]
for thisComponent in feedback_2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
feedback_2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "feedback_2"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = feedback_2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=feedback_2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *feedback2* updates
if feedback2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
feedback2.frameNStart = frameN # exact frame index
feedback2.tStart = t # local t and not account for scr refresh
feedback2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(feedback2, 'tStartRefresh') # time at next scr refresh
feedback2.setAutoDraw(True)
if feedback2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > feedback2.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
feedback2.tStop = t # not accounting for scr refresh
feedback2.frameNStop = frameN # exact frame index
win.timeOnFlip(feedback2, 'tStopRefresh') # time at next scr refresh
feedback2.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in feedback_2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "feedback_2"-------
for thisComponent in feedback_2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
loop2back.addData('feedback2.started', feedback2.tStartRefresh)
loop2back.addData('feedback2.stopped', feedback2.tStopRefresh)
thisExp.nextEntry()
# completed 1 repeats of 'loop2back'
# ------Prepare to start Routine "thanks"-------
continueRoutine = True
routineTimer.add(2.000000)
# update component parameters for each repeat
# keep track of which components have finished
thanksComponents = [text_6]
for thisComponent in thanksComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
thanksClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "thanks"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = thanksClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=thanksClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_6* updates
if text_6.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_6.frameNStart = frameN # exact frame index
text_6.tStart = t # local t and not account for scr refresh
text_6.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_6, 'tStartRefresh') # time at next scr refresh
text_6.setAutoDraw(True)
if text_6.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_6.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
text_6.tStop = t # not accounting for scr refresh
text_6.frameNStop = frameN # exact frame index
win.timeOnFlip(text_6, 'tStopRefresh') # time at next scr refresh
text_6.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in thanksComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "thanks"-------
for thisComponent in thanksComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_6.started', text_6.tStartRefresh)
thisExp.addData('text_6.stopped', text_6.tStopRefresh)
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| 45.842132
| 121
| 0.66599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 33,398
| 0.362927
|
541db46f26a3ec258d9d85654ca98eae0553065a
| 3,271
|
py
|
Python
|
pysal/contrib/geotable/utils.py
|
cubensys/pysal
|
8d50990f6e6603ba79ae1a887a20a1e3a0734e51
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pysal/contrib/geotable/utils.py
|
cubensys/pysal
|
8d50990f6e6603ba79ae1a887a20a1e3a0734e51
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
pysal/contrib/geotable/utils.py
|
cubensys/pysal
|
8d50990f6e6603ba79ae1a887a20a1e3a0734e51
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-07-19T01:46:17.000Z
|
2021-07-19T01:46:17.000Z
|
from ...cg import asShape as pShape
from ...common import requires as _requires
from warnings import warn
@_requires('geopandas')
def to_df(df, geom_col='geometry', **kw):
"""
Convert a Geopandas dataframe into a normal pandas dataframe with a column
containing PySAL shapes. Always returns a copy.
Arguments
---------
df : geopandas.GeoDataFrame
a geopandas dataframe (or pandas dataframe) with a column
containing geo-interfaced shapes
geom_col: str
string denoting which column in the df contains the geometry
**kw : keyword options
options passed directly to pandas.DataFrame(...,**kw)
See Also
--------
pandas.DataFrame
"""
import pandas as pd
from geopandas import GeoDataFrame, GeoSeries
out = df.copy(deep=True)
out[geom_col] = out[geom_col].apply(pShape)
return pd.DataFrame(out, **kw)
@_requires('geopandas')
def to_gdf(df, geom_col='geometry', **kw):
"""
Convert a pandas dataframe with geometry column to a GeoPandas dataframe. Returns a copy always.
Arguments
---------
df : pandas.DataFrame
a pandas dataframe with a column containing geo-interfaced
shapes
geom_col: str
string denoting which column in the df contains the geometry
**kw : keyword options
options passed directly to geopandas.GeoDataFrame(...,**kw)
See Also
--------
geopandas.GeoDataFrame
"""
from geopandas import GeoDataFrame
from shapely.geometry import asShape as sShape
out = df.copy(deep=True)
out[geom_col] = out[geom_col].apply(sShape)
out = GeoDataFrame(out, geometry=geom_col, **kw)
return out
def insert_metadata(df, obj, name=None, inplace=False, overwrite=False):
"""
Insert an object into a dataframe's metadata with a given key.
Arguments
------------
df : pd.DataFrame
dataframe to insert into the metadata
obj : object
object desired to insert into the dataframe
name : string
key of the object to use. Will be available as
an attribute of the dataframe.
inplace : bool
flag to denote whether to operate on a copy
of the dataframe or not.
overwrite : bool
flag to denote whether to replace existing entry
in metadata or not.
Returns
--------
If inplace, changes dataframe implicitly.
Else, returns a new dataframe with added metadata.
"""
if not inplace:
new = df.copy(deep=True)
insert_metadata(new, obj, name=name,
inplace=True, overwrite=overwrite)
return new
if name is None:
name = type(obj).__name__
if hasattr(df, name):
if overwrite:
warn('Overwriting attribute {}! This may break the dataframe!'.format(name))
else:
raise Exception('Dataframe already has attribute {}. Cowardly refusing '
'to break dataframe. '.format(name))
df._metadata.append(name)
df.__setattr__(name, obj)
| 33.721649
| 100
| 0.602262
| 0
| 0
| 0
| 0
| 1,674
| 0.51177
| 0
| 0
| 2,107
| 0.644146
|
541de3fcd94ea7163228d56302142fff219657ee
| 287
|
py
|
Python
|
torrent/torrent_tracker/whitelist_api/urls.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | 3
|
2021-04-17T10:20:26.000Z
|
2022-03-08T07:36:13.000Z
|
torrent/torrent_tracker/whitelist_api/urls.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | null | null | null |
torrent/torrent_tracker/whitelist_api/urls.py
|
projectpai/paipass
|
8b8e70b6808bf026cf957e240c7eed7bfcf4c55d
|
[
"MIT"
] | null | null | null |
from django.urls import path
from whitelist_api.views import AddTorrentInfoHash, RemoveTorrentInfoHash
app_name = 'whitelist_api'
urlpatterns = [
path('add-torrent-info-hash/', AddTorrentInfoHash.as_view()),
path('del-torrent-info-hash/', RemoveTorrentInfoHash.as_view()),
]
| 23.916667
| 73
| 0.766551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.219512
|
541e93e13927b8ffff8b83f86083ffe9dd7cdee8
| 1,847
|
py
|
Python
|
vega/core/metrics/pytorch/flops_and_params.py
|
qixiuai/vega
|
3e6588ea4aedb03e3594a549a97ffdb86adb88d1
|
[
"MIT"
] | null | null | null |
vega/core/metrics/pytorch/flops_and_params.py
|
qixiuai/vega
|
3e6588ea4aedb03e3594a549a97ffdb86adb88d1
|
[
"MIT"
] | null | null | null |
vega/core/metrics/pytorch/flops_and_params.py
|
qixiuai/vega
|
3e6588ea4aedb03e3594a549a97ffdb86adb88d1
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""PyTorch model counter of FLOPS and parameters."""
from copy import deepcopy
import torch.nn as nn
from thop import profile
from thop.profile import register_hooks
from thop.vision.basic_hooks import count_softmax
def add_new_hooks(custom_hooks):
"""Add new register hooks to custom hooks."""
add_register_hooks = {
nn.PReLU: register_hooks[nn.ReLU],
nn.ELU: register_hooks[nn.ReLU],
nn.Softmax: count_softmax
}
for k, v in add_register_hooks.items():
if k not in register_hooks and k not in custom_hooks:
custom_hooks[k] = v
return custom_hooks
def calc_model_flops_params(model, input, custom_hooks=None, verbose=False):
"""Pytorch model flops and parameters calculation.
:param model: pytorch model
:type model: torch.nn.Module
:param input: pytorch input tensor
:type input: torch.Tensor
:param custom_hooks: hooks defined by outside customer
:type custom_hooks: dict or None
:param verbose: whether to print op type which not in collection
:type verbose: bool, default True
:return: flops and params
:rtype: float, float
"""
_model = deepcopy(model)
if custom_hooks is None:
custom_hooks = {}
custom_hooks = add_new_hooks(custom_hooks)
inputs = (input, )
flops, params = profile(_model, inputs, custom_hooks, verbose)
del _model
return flops, params
| 33.581818
| 76
| 0.714131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 974
| 0.527342
|
5421063007f16d8c808280360658d8af84912272
| 524
|
py
|
Python
|
Projects/project04/pop_shrink.py
|
tonysulfaro/CSE-331
|
b4f743b1127ebe531ba8417420d043e9c149135a
|
[
"MIT"
] | 2
|
2019-02-13T17:49:18.000Z
|
2020-09-30T04:51:53.000Z
|
Projects/project04/pop_shrink.py
|
tonysulfaro/CSE-331
|
b4f743b1127ebe531ba8417420d043e9c149135a
|
[
"MIT"
] | null | null | null |
Projects/project04/pop_shrink.py
|
tonysulfaro/CSE-331
|
b4f743b1127ebe531ba8417420d043e9c149135a
|
[
"MIT"
] | null | null | null |
from Stack import Stack
def main():
stack = Stack()
stack.push(0)
stack.push(1)
stack.push(2)
stack.push(3)
assert stack.data == [0, 1, 2, 3]
assert stack.capacity == 4
assert stack.size == 4
popped = stack.pop()
assert popped == 3
popped = stack.pop()
assert popped == 2
print(stack)
assert stack.data == [0, 1]
assert stack.capacity == 2
assert stack.size == 2
print("Expected:", "['0', '1'] Capacity: 2")
print("Output:", str(stack))
main()
| 17.466667
| 48
| 0.564885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.083969
|
5421bfc32b86a8ee54dfb925ef8eac6e4d16b3b0
| 212
|
py
|
Python
|
pycache/__init__.py
|
HuiiBuh/pycache
|
300bd51f9e575fd77014d6c86497dd58f313f752
|
[
"MIT"
] | 1
|
2021-09-04T05:34:26.000Z
|
2021-09-04T05:34:26.000Z
|
pycache/__init__.py
|
HuiiBuh/pycache
|
300bd51f9e575fd77014d6c86497dd58f313f752
|
[
"MIT"
] | 1
|
2021-03-14T19:26:01.000Z
|
2021-03-16T18:46:38.000Z
|
pycache/__init__.py
|
HuiiBuh/pycache
|
300bd51f9e575fd77014d6c86497dd58f313f752
|
[
"MIT"
] | null | null | null |
__version__ = '0.3.2'
# noinspection PyUnresolvedReferences
from ._cache._cache import cache
# noinspection PyUnresolvedReferences
from ._scheduler._scheduler import add_schedule, schedule, ScheduleSubscription
| 30.285714
| 79
| 0.84434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 81
| 0.382075
|
5423d564159a63ea1cc7a476c45ce6fae5bb3b4a
| 1,670
|
py
|
Python
|
bender/tests/test_predict_pipeline.py
|
otovo/bender
|
b64f0656658287b932ce44d52e6035682652fe33
|
[
"Apache-2.0"
] | 2
|
2021-12-17T15:45:40.000Z
|
2021-12-18T14:15:43.000Z
|
bender/tests/test_predict_pipeline.py
|
otovo/bender
|
b64f0656658287b932ce44d52e6035682652fe33
|
[
"Apache-2.0"
] | 2
|
2022-03-30T14:31:12.000Z
|
2022-03-31T14:25:25.000Z
|
bender/tests/test_predict_pipeline.py
|
otovo/bender
|
b64f0656658287b932ce44d52e6035682652fe33
|
[
"Apache-2.0"
] | 1
|
2021-12-19T17:16:38.000Z
|
2021-12-19T17:16:38.000Z
|
import numpy as np
import pytest
from pandas.core.frame import DataFrame
from bender.importers import DataImporters
from bender.model_loaders import ModelLoaders
from bender.model_trainer.decision_tree import DecisionTreeClassifierTrainer
from bender.split_strategies import SplitStrategies
pytestmark = pytest.mark.asyncio
async def test_predict_data() -> None:
model, data_set = await (
DataImporters.literal(DataFrame({'x': [0, 1], 'y': [0, 1], 'output': [0, 1]}))
# No test set
.split(SplitStrategies.ratio(1))
.train(DecisionTreeClassifierTrainer(), input_features=['x', 'y'], target_feature='output')
.run()
)
test_data = DataFrame({'x': [2, -3, 4], 'y': [2, -3, 4]})
expected = [1, 0, 1]
_, _, result = await (ModelLoaders.literal(model).import_data(DataImporters.literal(test_data)).predict().run())
assert np.all(expected == result)
"""
Supervised Regression
Vector[float] -> float
.train(
RegresionModels.linear(),
input_features=["area", "location"], # floats
target_feature="price" # float
)
"""
"""
Supervised Classification
Vector[float / int / bool / str] -> str / bool / int
.train(
ClassificationModels.DecisionTree(),
input_features=["sepal_length", "sepal_width"], # float / int / bool / str
target_feature="class_name" # str / bool / int
)
# Should only be avaialbe for clustering / classification problems
.predict_probability(
labels={
"setosa": "is_setosa_probability",
"versicolor": "is_versicolor_probability",
}
)
"""
| 27.833333
| 116
| 0.640719
| 0
| 0
| 0
| 0
| 0
| 0
| 1,341
| 0.802994
| 792
| 0.474251
|
5423f2c125c3cb768b4a0cd17051477a73148c1a
| 16,691
|
py
|
Python
|
gbot/libs/helper.py
|
dgw/goshu
|
3cba300d92f9bde58cf7348ddc3183d52b4c4bcf
|
[
"ISC"
] | 5
|
2015-01-03T00:08:58.000Z
|
2017-05-05T11:57:03.000Z
|
gbot/libs/helper.py
|
dgw/goshu
|
3cba300d92f9bde58cf7348ddc3183d52b4c4bcf
|
[
"ISC"
] | 3
|
2016-02-07T07:35:13.000Z
|
2016-11-26T19:29:02.000Z
|
gbot/libs/helper.py
|
dgw/goshu
|
3cba300d92f9bde58cf7348ddc3183d52b4c4bcf
|
[
"ISC"
] | 1
|
2020-11-12T09:09:06.000Z
|
2020-11-12T09:09:06.000Z
|
#!/usr/bin/env python3
# Goshu IRC Bot
# written by Daniel Oaks <daniel@danieloaks.net>
# licensed under the ISC license
"""extends several builtin functions and provides helper functions
The default Python library is extensive and well-stocked. There are some
times however, you wish a small task was taken care of for you. This module
if chock full of little extensions and helper functions I've needed while
writing Goshu.
Small, interesting, self-contained functions that can probably be reused
elsewhere.
"""
import collections.abc
import datetime
import imp
import json
import os
import re
import string
import sys
import urllib.parse
from girc.formatting import escape
from http_status import Status
from pyquery import PyQuery as pq
import importlib
import requests
import xml.sax.saxutils as saxutils
import yaml
valid_filename_chars = string.ascii_letters + string.digits + '#._- '
def true_or_false(in_str):
"""Returns True/False if string represents it, else None."""
in_str = in_str.lower()
if in_str.startswith(('true', 'y', '1', 'on')):
return True
elif in_str.startswith(('false', 'n', '0', 'off')):
return False
else:
return None
def split_num(line, chars=' ', maxsplits=1, empty=''):
"""/lazy/ wrapper, to stop us having to bounds-check when splitting.
Arguments:
line -- line to split
chars -- character(s) to split line on
maxsplits -- how many split items are returned
empty -- character to put in place of nothing
Returns:
line.split(chars, items); return value is padded until `maxsplits + 1` number of values
are present"""
line = line.split(chars, maxsplits)
while len(line) <= maxsplits:
line.append(empty)
return line
def is_ok(func, prompt, blank='', clearline=False):
"""Prompt the user for yes/no and returns True/False
Arguments:
prompt -- Prompt for the user
blank -- If True, a blank response will return True, ditto for False, the default ''
will not accept blank responses and ask until the user gives an appropriate
response
Returns:
True if user accepts, False if user does not"""
while True:
ok = func(prompt).lower().strip()
if len(ok) > 0:
if ok[0] == 'y' or ok[0] == 't' or ok[0] == '1': # yes, true, 1
return True
elif ok[0] == 'n' or ok[0] == 'f' or ok[0] == '0': # no, false, 0
return False
else:
if blank is True:
return True
elif blank is False:
return False
def bytes_to_str(bytes, base=2, precision=0):
"""Convert number of bytes to a human-readable format
Arguments:
bytes -- number of bytes
base -- base 2 'regular' multiplexer, or base 10 'storage' multiplexer
precision -- number of decimal places to output
Returns:
Human-readable string such as '1.32M'
"""
if base == 2:
multiplexer = 1024
elif base == 10:
multiplexer = 1000
else:
return None # raise error
precision_string = '%.' + str(precision) + 'f'
mebi_convert = True
if bytes >= (multiplexer ** 4):
terabytes = float(bytes / (multiplexer ** 4))
output = (precision_string % terabytes) + 'T'
elif bytes >= (multiplexer ** 3):
gigabytes = float(bytes / (multiplexer ** 3))
output = (precision_string % gigabytes) + 'G'
elif bytes >= (multiplexer ** 2):
megabytes = float(bytes / (multiplexer ** 2))
output = (precision_string % megabytes) + 'M'
elif bytes >= (multiplexer ** 1):
kilobytes = float(bytes / (multiplexer ** 1))
output = (precision_string % kilobytes) + 'K'
else:
output = (precision_string % float(bytes)) + 'B'
mebi_convert = False
# mebibytes and gibibytes all those weird HDD manufacturer terms
if base == 10 and mebi_convert:
num, base = output[:-1], output[-1]
output = num + base.lower() + 'B'
return output
def time_metric(secs=60, mins=0):
"""Returns user-readable string representing given number of seconds."""
if mins:
secs += (mins * 60)
time = ''
for metric_secs, metric_char in [[7 * 24 * 60 * 60, 'w'],
[24 * 60 * 60, 'd'],
[60 * 60, 'h'],
[60, 'm']]:
if secs > metric_secs:
time += '{}{}'.format(int(secs / metric_secs), metric_char)
secs -= int(secs / metric_secs) * metric_secs
if secs > 0:
time += '{}s'.format(secs)
return time
def metric(num, metric_list=[[10 ** 9, 'B'], [10 ** 6, 'M'], [10 ** 3, 'k']], additive=False):
"""Returns user-readable string representing given value.
Arguments:
num is the base value we're converting.
metric_list is the list of data we're working off.
additive is whether we add the various values together, or separate them.
Return:
a string such as 345K or 23w6d2h53s"""
output = ''
for metric_count, metric_char in metric_list:
if num > metric_count:
if additive:
format_str = '{}{}'
else:
format_str = '{:.1f}{}'
num = (num / metric_count)
if not additive:
num = float(num)
output += format_str.format(num, metric_char)
if not additive:
break
# just in case no output
if output == '':
output = str(num)
return output
def get_url(url, **kwargs):
"""Gets a url, handles all the icky requests stuff."""
try:
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
r = requests.get(url, **kwargs)
r.status = Status(r.status_code)
if not r.ok:
return 'HTTP Error - {code} {name} - {description}'.format(**{
'code': r.status.code,
'name': r.status.name,
'description': r.status.description
})
except requests.exceptions.Timeout:
return 'Connection timed out'
except requests.exceptions.RequestException as x:
return '{}'.format(x.__class__.__name__)
return r
def format_extract(format_json, input_element, format=None, debug=False, fail='Failure'):
if not format:
if 'format' in format_json:
format = format_json['format']
else:
return 'No format for format_extract()'
if 'debug' in format_json:
debug = format_json['debug']
# format-specific settings
if format == 'json':
input_element = json.loads(input_element)
retrieve = json_return
elif format == 'xml':
# ignore xml namespaces
input_element = input_element.replace(' xmlns:', ' xmlnamespace:')
input_element = input_element.replace(' xmlns=', ' xmlnamespace=')
retrieve = xml_return
# format extraction - format kwargs
format_dict = {}
if 'response_dict' in format_json:
for name in format_json['response_dict']:
try:
if isinstance(format_json['response_dict'][name], collections.abc.Callable):
try:
format_dict[name] = format_json['response_dict'][name](format_json,
input_element)
except BaseException as x:
if debug:
return 'Unknown failure: {}'.format(x)
else:
return 'Code error'
else:
format_dict[name] = retrieve(input_element,
format_json['response_dict'][name])
if format_dict[name] is None:
return fail
except KeyError:
if debug:
return 'Fail on {}'.format(name)
else:
return fail
except IndexError:
if debug:
return 'Fail on {}'.format(name)
else:
return fail
try:
return format_json['response'].format(**format_dict)
except KeyError:
if debug:
return 'Fail on format() key'
else:
return fail
except IndexError:
if debug:
return 'Fail on format() index'
else:
return fail
def xml_return(input_xml, selector):
pq_xml = pq(input_xml)
if selector[0] == 'text':
return selector[1]
elif selector[0] == 'text.escape':
return escape(selector[1])
elif selector[0] == 'jquery':
return pq_xml(selector[1]).text()
elif selector[0] == 'jquery.attr':
return pq_xml(selector[1]).attr(selector[2])
def json_return(input_json, selector):
if selector[0] == 'text':
return selector[1]
elif selector[0] == 'text.escape':
return escape(selector[1])
elif selector[0] == 'json.lower':
if len(selector) > 2:
default = selector[2]
else:
default = ""
return str(json_element(input_json, selector[1], default=default)).lower()
elif selector[0] == 'json.quote_plus':
if len(selector) > 2:
default = selector[2]
else:
default = ""
return urllib.parse.quote_plus(str(json_element(input_json, selector[1],
default=default)))
elif selector[0] == 'json.num.metric':
if len(selector) > 2:
default = selector[2]
else:
default = 0
return metric(int(json_element(input_json, selector[1], default=default)))
elif selector[0] == 'json.datetime.fromtimestamp':
if len(selector) > 2:
default = selector[2]
else:
default = 0
ts = json_element(input_json, selector[1], default=default)
return datetime.datetime.fromtimestamp(ts).strftime(selector[2])
elif selector[0] == 'json.dict.returntrue':
keys = []
json_dict = json_element(input_json, selector[1])
for key in json_dict:
if json_dict[key]:
keys.append(key)
return selector[2].join(keys)
# before general json
else:
if len(selector) > 2:
default = selector[2]
else:
default = None
return escape(str(json_element(input_json, selector[1], default=default)))
def json_element(input_dict, query, default=None):
"""Runs through a data structure and returns the selected element."""
for element in query:
is_list_index = isinstance(element, int) and isinstance(input_dict, (list, tuple))
if is_list_index or element in input_dict:
input_dict = input_dict[element]
else:
return default
return input_dict
def filename_escape(unsafe, replace_char='_', valid_chars=valid_filename_chars):
"""Escapes a string to provide a safe local filename
Arguments:
unsafe -- Unsafe string to escape
replace_char -- Character to replace unsafe characters with
valid_chars -- Valid filename characters
Returns:
Safe local filename string
"""
if not unsafe:
return ''
safe = ''
for character in unsafe:
if character in valid_chars:
safe += character
else:
safe += replace_char
return safe
_unescape_map = {
''': "'",
''': "'",
'"': "'",
}
def html_unescape(input):
"""Turns any html-escaped characters back to their normal equivalents."""
output = saxutils.unescape(input)
for char in _unescape_map.keys():
output = output.replace(char, _unescape_map[char])
return output
def utf8_bom(input):
"""Strips BOM from a utf8 string, because open() leaves it in for some reason."""
output = input.replace('\ufeff', '')
return output
class JsonHandler:
def __init__(self, base, folder, attr=None, callback_name=None, ext=None, yaml=False):
if ext:
self.pattern = [x.format(ext) for x in ['*.{}.yaml', '*.{}.json', '*_{}.py']]
else:
self.pattern = ['*.yaml', '*.json', '*.py']
self.base = base
self.attr = attr
self.folder = folder
self.ext = ext
self.callback_name = callback_name
self.yaml = yaml
self.reload()
def spread_new_json(self, new_json):
if self.attr:
setattr(self.base, self.attr, new_json)
if self.callback_name:
getattr(self.base, self.callback_name, None)(new_json)
def reload(self):
new_json = {}
if not os.path.exists(self.folder):
self.spread_new_json(new_json)
return
# loading
folders_to_scan = [self.folder]
# loading list of folders that contain modules
for f in os.listdir(self.folder):
if f == 'disabled':
continue
full_name = os.path.join(self.folder, f)
if os.path.isdir(full_name):
folders_to_scan.append(full_name)
# loading actual modules
for folder in folders_to_scan:
for f in os.listdir(folder):
full_name = os.path.join(folder, f)
if os.path.isfile(full_name):
(extname, ext) = os.path.splitext(full_name)
if ext.lower() not in ['.json', '.yaml']:
continue
# check for loader-specific extension
if self.ext:
name, ext = os.path.splitext(extname)
pyfile = '{}_{}'.format('.'.join(name.split(os.sep)), self.ext)
# not really our module
if ext != os.extsep + self.ext:
continue
else:
name, ext = extname, ''
pyfile = '.'.join(name[2:].split(os.sep))
# NOTE: this is static, and that is bad
pyfile = pyfile.lstrip('..modules.')
# py file
if self.yaml:
try:
module = importlib.import_module(pyfile)
imp.reload(module) # so reloading works
# we should capture this and output errors to stderr
except:
pass
# yaml / json
with open(full_name, encoding='utf-8') as js_f:
if self.yaml:
try:
info = yaml.load(js_f.read(), Loader=yaml.FullLoader)
# we should capture this and output errors to stderr
except Exception as ex:
print('failed to load YAML file', full_name, ':', ex)
continue
else:
info = json.loads(js_f.read())
# set module name and info
if 'name' not in info:
new_name = name.split('/')[-1].split('\\')[-1]
info['name'] = [new_name]
new_json[info['name'][0]] = info
# set info on base object and / or call callback
self.spread_new_json(new_json)
# timedelta functions
_td_str_map = [
('d', 'days'),
('h', 'hours'),
('m', 'minutes'),
('s', 'seconds'),
]
_str_td = r''
for istr, td in _td_str_map:
_str_td += r'\s*(?:(?P<' + td + r'>[0-9]+)\s*' + istr + r')?'
_TD_STR_REGEX = re.compile(_str_td)
def timedelta_to_string(delta):
"""Converts a timedelta dict to a string."""
td_string = ''
for istr, td in _td_str_map:
if td in delta:
td_string += str(delta[td])
td_string += istr
return td_string
def string_to_timedelta(td_string):
"""Converts a string to a timedelta dict."""
match = _TD_STR_REGEX.match(td_string)
delta = {}
for istr, td in _td_str_map:
if match.group(td):
if '.' in match.group(td):
val = float(match.group(td))
else:
val = int(match.group(td))
delta[td] = val
return delta
# path
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
| 30.795203
| 94
| 0.549038
| 3,564
| 0.213528
| 0
| 0
| 0
| 0
| 0
| 0
| 4,286
| 0.256785
|
542466b53c52821ceb40707c73e0ab32ca5a0262
| 8,707
|
py
|
Python
|
ptf/lib/runner.py
|
opennetworkinglab/tassen
|
6e42ba79f83caa1bd6ecb40fd9bd1e9f8768ec09
|
[
"Apache-2.0"
] | 4
|
2020-07-08T22:04:35.000Z
|
2020-07-14T15:09:37.000Z
|
ptf/lib/runner.py
|
opennetworkinglab/tassen
|
6e42ba79f83caa1bd6ecb40fd9bd1e9f8768ec09
|
[
"Apache-2.0"
] | 1
|
2020-07-07T08:12:40.000Z
|
2020-07-07T08:12:41.000Z
|
ptf/lib/runner.py
|
opennetworkinglab/tassen
|
6e42ba79f83caa1bd6ecb40fd9bd1e9f8768ec09
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
# SPDX-FileCopyrightText: 2018-present Open Networking Foundation
#
# SPDX-License-Identifier: Apache-2.0
import Queue
import argparse
import json
import logging
import os
import re
import subprocess
import sys
import threading
import time
from collections import OrderedDict
import google.protobuf.text_format
import grpc
from p4.v1 import p4runtime_pb2, p4runtime_pb2_grpc
PTF_ROOT = os.path.dirname(os.path.realpath(__file__))
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("PTF runner")
def error(msg, *args, **kwargs):
logger.error(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
logger.warn(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
logger.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
logger.debug(msg, *args, **kwargs)
def check_ifaces(ifaces):
"""
Checks that required interfaces exist.
"""
ifconfig_out = subprocess.check_output(['ifconfig'])
iface_list = re.findall(r'^([a-zA-Z0-9]+)', ifconfig_out, re.S | re.M)
present_ifaces = set(iface_list)
ifaces = set(ifaces)
return ifaces <= present_ifaces
def build_bmv2_config(bmv2_json_path):
"""
Builds the device config for BMv2
"""
with open(bmv2_json_path) as f:
return f.read()
def update_config(p4info_path, bmv2_json_path, grpc_addr, device_id):
"""
Performs a SetForwardingPipelineConfig on the device
"""
channel = grpc.insecure_channel(grpc_addr)
stub = p4runtime_pb2_grpc.P4RuntimeStub(channel)
debug("Sending P4 config")
# Send master arbitration via stream channel
# This should go in library, to be re-used also by base_test.py.
stream_out_q = Queue.Queue()
stream_in_q = Queue.Queue()
def stream_req_iterator():
while True:
p = stream_out_q.get()
if p is None:
break
yield p
def stream_recv(stream):
for p in stream:
stream_in_q.put(p)
def get_stream_packet(type_, timeout=1):
start = time.time()
try:
while True:
remaining = timeout - (time.time() - start)
if remaining < 0:
break
msg = stream_in_q.get(timeout=remaining)
if not msg.HasField(type_):
continue
return msg
except: # timeout expired
pass
return None
stream = stub.StreamChannel(stream_req_iterator())
stream_recv_thread = threading.Thread(target=stream_recv, args=(stream,))
stream_recv_thread.start()
req = p4runtime_pb2.StreamMessageRequest()
arbitration = req.arbitration
arbitration.device_id = device_id
election_id = arbitration.election_id
election_id.high = 0
election_id.low = 1
stream_out_q.put(req)
rep = get_stream_packet("arbitration", timeout=5)
if rep is None:
error("Failed to establish handshake")
return False
try:
# Set pipeline config.
request = p4runtime_pb2.SetForwardingPipelineConfigRequest()
request.device_id = device_id
election_id = request.election_id
election_id.high = 0
election_id.low = 1
config = request.config
with open(p4info_path, 'r') as p4info_f:
config.p4info.ParseFromString(p4info_f.read())
config.p4_device_config = build_bmv2_config(bmv2_json_path)
request.action = p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT
try:
stub.SetForwardingPipelineConfig(request)
except Exception as e:
error("Error during SetForwardingPipelineConfig")
error(str(e))
return False
return True
finally:
stream_out_q.put(None)
stream_recv_thread.join()
def run_test(p4info_path, grpc_addr, device_id, cpu_port, ptfdir, port_map_path,
extra_args=()):
"""
Runs PTF tests included in provided directory.
Device must be running and configfured with appropriate P4 program.
"""
# TODO: check schema?
# "ptf_port" is ignored for now, we assume that ports are provided by
# increasing values of ptf_port, in the range [0, NUM_IFACES[.
port_map = OrderedDict()
with open(port_map_path, 'r') as port_map_f:
port_list = json.load(port_map_f)
for entry in port_list:
p4_port = entry["p4_port"]
iface_name = entry["iface_name"]
port_map[p4_port] = iface_name
if not check_ifaces(port_map.values()):
error("Some interfaces are missing")
return False
ifaces = []
# FIXME
# find base_test.py
pypath = os.path.dirname(os.path.abspath(__file__))
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] += ":" + pypath
else:
os.environ['PYTHONPATH'] = pypath
for iface_idx, iface_name in port_map.items():
ifaces.extend(['-i', '{}@{}'.format(iface_idx, iface_name)])
cmd = ['ptf']
cmd.extend(['--test-dir', ptfdir])
cmd.extend(ifaces)
test_params = 'p4info=\'{}\''.format(p4info_path)
test_params += ';grpcaddr=\'{}\''.format(grpc_addr)
test_params += ';device_id=\'{}\''.format(device_id)
test_params += ';cpu_port=\'{}\''.format(cpu_port)
cmd.append('--test-params={}'.format(test_params))
cmd.extend(extra_args)
debug("Executing PTF command: {}".format(' '.join(cmd)))
try:
# we want the ptf output to be sent to stdout
p = subprocess.Popen(cmd)
p.wait()
except:
error("Error when running PTF tests")
return False
return p.returncode == 0
def check_ptf():
try:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(['ptf', '--version'],
stdout=devnull, stderr=devnull)
return True
except subprocess.CalledProcessError:
return True
except OSError: # PTF not found
return False
# noinspection PyTypeChecker
def main():
parser = argparse.ArgumentParser(
description="Compile the provided P4 program and run PTF tests on it")
parser.add_argument('--p4info',
help='Location of p4info proto in binary format',
type=str, action="store", required=True)
parser.add_argument('--bmv2-json',
help='Location BMv2 JSON output from p4c (if target is bmv2)',
type=str, action="store", required=False)
parser.add_argument('--grpc-addr',
help='Address to use to connect to P4 Runtime server',
type=str, default='localhost:50051')
parser.add_argument('--device-id',
help='Device id for device under test',
type=int, default=1)
parser.add_argument('--cpu-port',
help='CPU port ID of device under test',
type=int, required=True)
parser.add_argument('--ptf-dir',
help='Directory containing PTF tests',
type=str, required=True)
parser.add_argument('--port-map',
help='Path to JSON port mapping',
type=str, required=True)
args, unknown_args = parser.parse_known_args()
if not check_ptf():
error("Cannot find PTF executable")
sys.exit(1)
if not os.path.exists(args.p4info):
error("P4Info file {} not found".format(args.p4info))
sys.exit(1)
if not os.path.exists(args.bmv2_json):
error("BMv2 json file {} not found".format(args.bmv2_json))
sys.exit(1)
if not os.path.exists(args.port_map):
print "Port map path '{}' does not exist".format(args.port_map)
sys.exit(1)
try:
success = update_config(p4info_path=args.p4info,
bmv2_json_path=args.bmv2_json,
grpc_addr=args.grpc_addr,
device_id=args.device_id)
if not success:
sys.exit(2)
success = run_test(p4info_path=args.p4info,
device_id=args.device_id,
grpc_addr=args.grpc_addr,
cpu_port=args.cpu_port,
ptfdir=args.ptf_dir,
port_map_path=args.port_map,
extra_args=unknown_args)
if not success:
sys.exit(3)
except Exception:
raise
if __name__ == '__main__':
main()
| 31.547101
| 91
| 0.605949
| 0
| 0
| 2,560
| 0.294016
| 0
| 0
| 0
| 0
| 1,899
| 0.2181
|
5427881b2cdb695dc79fdf0dbaacbc4dd2f6b718
| 178
|
py
|
Python
|
rsebs/__init__.py
|
gabrielperezs/recycling-snapshots
|
b0707e883bb6037505af815877e4ef8ce544e35e
|
[
"Apache-2.0"
] | 1
|
2017-05-23T05:58:47.000Z
|
2017-05-23T05:58:47.000Z
|
rsebs/__init__.py
|
gabrielperezs/recycling-snapshots
|
b0707e883bb6037505af815877e4ef8ce544e35e
|
[
"Apache-2.0"
] | null | null | null |
rsebs/__init__.py
|
gabrielperezs/recycling-snapshots
|
b0707e883bb6037505af815877e4ef8ce544e35e
|
[
"Apache-2.0"
] | null | null | null |
from .snapshots import set_client
from .snapshots import get_snapshots
from .snapshots import tag_snapshot
from .snapshots import set_drymode
from .snapshots import unset_drymode
| 35.6
| 36
| 0.865169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
54286060601c97e4e84de6381203dae2af8365e8
| 1,184
|
py
|
Python
|
predict_form.py
|
HuginnM/UsedCarsUA
|
aa871c1bc6cdc1a84810db265c732b04cb4935f0
|
[
"Apache-2.0"
] | null | null | null |
predict_form.py
|
HuginnM/UsedCarsUA
|
aa871c1bc6cdc1a84810db265c732b04cb4935f0
|
[
"Apache-2.0"
] | null | null | null |
predict_form.py
|
HuginnM/UsedCarsUA
|
aa871c1bc6cdc1a84810db265c732b04cb4935f0
|
[
"Apache-2.0"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField, IntegerField, FloatField, StringField
from wtforms.validators import DataRequired
import pandas as pd
uniq_vals = pd.read_csv("data/unique_cat_vals.csv", index_col=0)
class InputData(FlaskForm):
car = SelectField(label="Car", choices=uniq_vals.car.dropna().sort_values(), validators=[DataRequired()])
model = SelectField("Model", choices=uniq_vals.model.dropna().sort_values(), validators=[DataRequired()])
body = SelectField(label="Body", choices=uniq_vals.body.dropna().sort_values(), validators=[DataRequired()])
drive = SelectField("Drive", choices=uniq_vals.drive.dropna().sort_values(), validators=[DataRequired()])
engType = SelectField("Engine type: ", choices=uniq_vals.engType.dropna().sort_values(), validators=[DataRequired()])
engV = FloatField("Engine Volume", validators=[DataRequired()])
year = IntegerField("Year", validators=[DataRequired()])
mileage = IntegerField(label="Mileage", validators=[DataRequired()])
registration = SelectField(label="Registration", choices=uniq_vals.registration.dropna())
submit = SubmitField("Predict the price")
| 56.380952
| 121
| 0.754223
| 935
| 0.789696
| 0
| 0
| 0
| 0
| 0
| 0
| 129
| 0.108953
|
5429177713786c59d64d5d6d11764c591147502b
| 2,764
|
py
|
Python
|
color.py
|
laplacetw/color-codes-bot
|
e7afb5b09e7c4a5dde6608917781cc6a0ea05287
|
[
"MIT"
] | 1
|
2020-10-21T01:26:09.000Z
|
2020-10-21T01:26:09.000Z
|
color.py
|
laplacetw/color-codes-bot
|
e7afb5b09e7c4a5dde6608917781cc6a0ea05287
|
[
"MIT"
] | null | null | null |
color.py
|
laplacetw/color-codes-bot
|
e7afb5b09e7c4a5dde6608917781cc6a0ea05287
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python3
color_chart = {
'1C1':[13.24, 88.89, 228.98, 0.], '1N1':[14.2, 95.37, 233.82, 0.], '1N2':[12.95, 91.79, 219.5, 0.],
'1W1':[14.67, 103.64, 229.41, 0.], '1W2':[14.69, 106.34, 227.28, 0.], '2C0':[15.73, 134.68, 222.32, 0.],
'2C1':[14.57, 125.89, 220.69, 0.], '2C3':[13.7, 103.72, 199.46, 0.], '2N1':[15., 104.25, 225.8, 0.],
'2W0':[15., 110.11, 224.22, 0.], '2W1':[14.42, 125.06, 224.55, 0.], '2W2':[17.13, 141.58, 209.99, 0.],
'3C1':[15.7, 118.18, 212.01, 0.], '3C2':[15.7, 118.18, 212.01, 0.], '3N1':[16.1, 150.1, 189.09, 0.],
'3N2':[15.18, 140.68, 202.63, 0.], '3W1':[15.66, 129.81, 209.44, 0.], '3W2':[17.05, 161.56, 184.85, 0.],
'4C3':[14.23, 148.1, 198.74, 0.], '4N1':[15.92, 159.35, 190.71, 0.], '4N2':[17.29, 166.95, 195.76, 0.],
'4W1':[14.67, 143.61, 208.85, 0.], '4W2':[17.76, 162.02, 189.44, 0.], '5C1':[13.09, 179.49, 160.58, 0.],
'5N1':[15.43, 187.36, 180.34, 0.], '5N2':[16.66, 207.88, 147.84, 0.], '5W1':[15.66, 163.85, 182.07, 0.],
'5W2':[14.95, 160.63, 189.17, 0.], '6C2':[12.85, 179.52, 131.66, 0.], '6N1':[14.94, 185.61, 162.16, 0.],
'6N2':[15.7, 183.46, 138.37, 0.], '6W1':[14.76, 166.57, 166.78, 0.], '6W2':[13.79, 176.99, 142.22, 0.],
'7C1':[12.2, 191.5, 121.34, 0.], '7N1':[12.7, 162.67, 109.41, 0.], '7W1':[13.25, 165.64, 126.03, 0.],
'8N1':[12.5, 191.83, 95.43, 0.], 'CR1':[14.09, 173.14, 163.66, 0.]}
color_chart_new = {
'1C1':[14.63, 79.35, 239.58, 0.], '1N1':[16.89, 77.75, 243.46, 0.], '1N2':[13.27, 104.13, 231.18, 0.],
'1W1':[17.78, 104.99, 236.54, 0.], '1W2':[16., 117.24, 234.86, 0.], '2C0':[17.16, 80.90, 240.48, 0.],
'2C1':[14., 116.60, 237.21, 0.], '2C3':[13.36, 94.80, 231.17, 0.], '2N1':[16., 115.65, 238.19, 0.],
'2W0':[15.79, 108.95, 237.93, 0.], '2W1':[15.01, 120.45, 240.01, 0.], '2W2':[17.97, 125.56, 243.83, 0.],
'3C1':[10.99, 115.63, 226.18, 0.], '3C2':[10.84, 117.73, 219.17, 0.], '3N1':[11.9, 126.73, 228.04, 0.],
'3N2':[11.43, 126.97, 224.13, 0.], '3W1':[13.14, 148.12, 229.10, 0.], '3W2':[14.01, 133.06, 234.48, 0.],
'4C3':[11.68, 150.85, 219.34, 0.], '4N1':[12., 151.75, 190.41, 0.], '4N2':[12.24, 138.18, 206.75, 0.],
'4W1':[12., 151.31, 224.04, 0.], '4W2':[12., 165.62, 201.74, 0.], '5C1':[10.4, 184.48, 176.72, 0.],
'5N1':[11.68, 188.46, 210.23, 0.], '5N2':[10.98, 183.80, 195.04, 0.], '5W1':[12.73, 185.75, 221.30, 0.],
'5W2':[10.83, 162.54, 211.10, 0.], '6C2':[9.29, 217.70, 111.99, 0.], '6N1':[11.24, 180.30, 156.76, 0.],
'6N2':[11., 173.55, 145.55, 0.], '6W1':[11.09, 188.43, 171.41, 0.], '6W2':[11., 182.77, 151.02, 0.],
'7C1':[8.07, 199.37, 115.59, 0.], '7N1':[9.93, 187.51, 122.57, 0.], '7W1':[9.86, 192.48, 135.62, 0.],
'8N1':[8.64, 181.83, 109.53, 0.]}
| 86.375
| 109
| 0.48589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 396
| 0.143271
|
5429df166b3efe8e9b12e537d9c5a2b68d7af8f7
| 235
|
py
|
Python
|
leetCode/algorithms/easy/occurrences_after_bigram.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 25
|
2015-01-21T16:39:18.000Z
|
2021-05-24T07:01:24.000Z
|
leetCode/algorithms/easy/occurrences_after_bigram.py
|
gauravsingh58/algo
|
397859a53429e7a585e5f6964ad24146c6261326
|
[
"WTFPL"
] | 2
|
2020-09-30T19:39:36.000Z
|
2020-10-01T17:15:16.000Z
|
leetCode/algorithms/easy/occurrences_after_bigram.py
|
ferhatelmas/algo
|
a7149c7a605708bc01a5cd30bf5455644cefd04d
|
[
"WTFPL"
] | 15
|
2015-01-21T16:39:27.000Z
|
2020-10-01T17:00:22.000Z
|
from typing import List
class Solution:
def findOcurrences(self, text: str, first: str, second: str) -> List[str]:
ls = text.split()
return [c for a, b, c in zip(ls, ls[1:], ls[2:]) if a == first and b == second]
| 29.375
| 87
| 0.595745
| 208
| 0.885106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
542a62b48d45febc53b82e238fe6ed286841ea91
| 454
|
py
|
Python
|
src/pyuwds3/utils/egocentric_spatial_relations.py
|
LAAS-HRI/uwds3
|
42390f62ed5701a32710341b01faa10efc448078
|
[
"MIT"
] | 2
|
2020-08-19T06:15:14.000Z
|
2021-05-23T09:55:18.000Z
|
src/pyuwds3/utils/egocentric_spatial_relations.py
|
LAAS-HRI/uwds3
|
42390f62ed5701a32710341b01faa10efc448078
|
[
"MIT"
] | 5
|
2021-01-06T09:00:35.000Z
|
2021-01-20T13:22:19.000Z
|
src/pyuwds3/utils/egocentric_spatial_relations.py
|
LAAS-HRI/uwds3
|
42390f62ed5701a32710341b01faa10efc448078
|
[
"MIT"
] | 2
|
2020-11-18T17:34:43.000Z
|
2021-05-23T16:14:17.000Z
|
import math
from scipy.spatial.distance import euclidean
from ..types.bbox import BoundingBox
def is_left_of(bb1, bb2):
_, _, bb1_max, _, _ = bb1
bb2_min, _, _, _, _ = bb2
return bb1_max < bb2_min
def is_right_of(bb1, bb2):
bb1_min, _, _, _, _ = bb1
_, _, bb2_max, _, _ = bb2
return bb1_min > bb2_max
def is_behind(bb1, bb2):
_, _, _, _, bb1_depth = bb1
_, _, _, _, bb2_depth = bb2
return bb1_depth > bb2_depth
| 19.73913
| 44
| 0.634361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
542b4553e4da40bd25e9c35ead38f8985d1d5c31
| 2,883
|
py
|
Python
|
machine_replacement_action_probs.py
|
dsbrown1331/broil
|
3c06e15c560db3242c0e331a2b16cc578a843606
|
[
"MIT"
] | 1
|
2021-03-29T09:53:53.000Z
|
2021-03-29T09:53:53.000Z
|
machine_replacement_action_probs.py
|
dsbrown1331/broil
|
3c06e15c560db3242c0e331a2b16cc578a843606
|
[
"MIT"
] | 1
|
2020-11-22T15:05:48.000Z
|
2020-11-25T00:10:17.000Z
|
machine_replacement_action_probs.py
|
dsbrown1331/broil
|
3c06e15c560db3242c0e331a2b16cc578a843606
|
[
"MIT"
] | null | null | null |
import bayesian_irl
import mdp_worlds
import utils
import mdp
import numpy as np
import scipy
import random
import generate_efficient_frontier
from machine_replacement import generate_posterior_samples
if __name__=="__main__":
seed = 1234
np.random.seed(seed)
scipy.random.seed(seed)
random.seed(seed)
num_states = 4
num_samples = 2000
gamma = 0.95
alpha = 0.99
posterior = generate_posterior_samples(num_samples)
r_sa = np.mean(posterior, axis=1)
init_distribution = np.ones(num_states)/num_states #uniform distribution
mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution)
print("mean MDP reward", r_sa)
u_sa = mdp.solve_mdp_lp(mdp_env, debug=True)
print("mean policy from posterior")
utils.print_stochastic_policy_action_probs(u_sa, mdp_env)
print("MAP/Mean policy from posterior")
utils.print_policy_from_occupancies(u_sa, mdp_env)
print("rewards")
print(mdp_env.r_sa)
print("expected value = ", np.dot(u_sa, r_sa))
stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env)
print("expected return", mdp.get_policy_expected_return(stoch_pi, mdp_env))
print("values", mdp.get_state_values(u_sa, mdp_env))
print('q-values', mdp.get_q_values(u_sa, mdp_env))
#run CVaR optimization, just the robust version
u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states)
posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples from MCMC
#generate efficient frontier
lambda_range = [0.0, 0.3, 0.75, 0.95, 1.0]
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
bar_width = 0.15
opacity = 0.9
color=iter(cm.rainbow(np.linspace(0,1,6)))
cnt = 0
index = np.arange(num_states)
for i,lamda in enumerate(lambda_range):
print("lambda = ", lamda)
cvar_opt_usa, cvar, exp_ret = mdp.solve_max_cvar_policy(mdp_env, u_expert, posterior, posterior_probs, alpha, False, lamda)
print('action probs')
utils.print_stochastic_policy_action_probs(cvar_opt_usa, mdp_env)
stoch_pi = utils.get_optimal_policy_from_usa(cvar_opt_usa, mdp_env)
print(stoch_pi[:,1])
c = next(color)
plt.figure(1)
label = r"$\lambda={}$".format(lamda)
rects1 = plt.bar(index + cnt * bar_width,stoch_pi[:,0], bar_width,
alpha=opacity, label=label, color=c)
cnt += 1
plt.figure(1)
plt.axis([-1,5,0, 1])
plt.yticks(fontsize=18)
plt.xticks(index + 2*bar_width, ('1', '2', '3', '4'), fontsize=18)
plt.legend(loc='best', fontsize=16)
plt.xlabel('State',fontsize=20)
plt.ylabel('Pr(Do Nothing $\mid$ State)',fontsize=20)
plt.tight_layout()
plt.savefig("./figs/machine_replacement/action_probs_machine_replacement.png")
plt.show()
| 27.990291
| 131
| 0.687825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 442
| 0.153313
|
542b464eeb35182c67fc88683f7b87c523d2bec7
| 5,982
|
py
|
Python
|
sequential/seq_smnist/train_args_seq_smnist.py
|
mariacer/cl_in_rnns
|
333b8e03391600a8e3df7d684a3f171b135d273a
|
[
"Apache-2.0"
] | 26
|
2020-06-17T08:44:15.000Z
|
2022-03-20T04:21:13.000Z
|
sequential/seq_smnist/train_args_seq_smnist.py
|
mariacer/cl_in_rnns
|
333b8e03391600a8e3df7d684a3f171b135d273a
|
[
"Apache-2.0"
] | null | null | null |
sequential/seq_smnist/train_args_seq_smnist.py
|
mariacer/cl_in_rnns
|
333b8e03391600a8e3df7d684a3f171b135d273a
|
[
"Apache-2.0"
] | 4
|
2020-10-26T02:19:38.000Z
|
2021-12-26T02:26:05.000Z
|
#!/usr/bin/env python3
# Copyright 2019 Benjamin Ehret, Maria Cervera
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :sequential/smnist/train_args_seq_smnist.py
# @author :be
# @contact :behret@ethz.ch
# @created :24/03/2020
# @version :1.0
# @python_version :3.6.8
"""
Command-line arguments and default values for the sequential SMNIST task are
handled here.
"""
import argparse
import warnings
import utils.cli_args as cli
import sequential.train_args_sequential as seq
def parse_cmd_arguments(default=False, argv=None):
"""Parse command-line arguments.
Args:
default (optional): If True, command-line arguments will be ignored and
only the default values will be parsed.
argv (optional): If provided, it will be treated as a list of command-
line argument that is passed to the parser in place of sys.argv.
Returns:
The Namespace object containing argument names and values.
"""
description = 'Continual learning on sequential SMNIST task.'
parser = argparse.ArgumentParser(description=description)
cli.cl_args(parser, show_beta=True, dbeta=0.005,
show_from_scratch=True, show_multi_head=True,
show_split_head_cl3=False, show_cl_scenario=False,
show_num_tasks=True, dnum_tasks=45)
cli.train_args(parser, show_lr=True, show_epochs=False,
dbatch_size=64, dn_iter=5000,
dlr=1e-3, show_clip_grad_value=False, show_clip_grad_norm=True,
show_momentum=False, show_adam_beta1=True)
seq.rnn_args(parser, drnn_arch='256', dnet_act='tanh')
cli.hypernet_args(parser, dhyper_chunks=-1, dhnet_arch='50,50',
dtemb_size=32, demb_size=32, dhnet_act='relu')
# Args of new hnets.
nhnet_args = cli.hnet_args(parser, allowed_nets=['hmlp', 'chunked_hmlp',
'structured_hmlp', 'hdeconv', 'chunked_hdeconv'], dhmlp_arch='50,50',
show_cond_emb_size=True, dcond_emb_size=32, dchmlp_chunk_size=1000,
dchunk_emb_size=32, show_use_cond_chunk_embs=True,
dhdeconv_shape='512,512,3', prefix='nh_',
pf_name='new edition of a hyper-', show_net_act=True, dnet_act='relu',
show_no_bias=True, show_dropout_rate=True, ddropout_rate=-1,
show_specnorm=True, show_batchnorm=False, show_no_batchnorm=False)
seq.new_hnet_args(nhnet_args)
cli.init_args(parser, custom_option=False, show_normal_init=False,
show_hyper_fan_init=True)
cli.eval_args(parser, dval_iter=250, show_val_set_size=True,
dval_set_size=1000)
magroup = cli.miscellaneous_args(parser, big_data=False,
synthetic_data=True, show_plots=True, no_cuda=True,
show_publication_style=False)
seq.ewc_args(parser, dewc_lambda=5000., dn_fisher=-1, dtbptt_fisher=-1,
dts_weighting_fisher='last')
seq.si_args(parser, dsi_lambda=1.)
seq.context_mod_args(parser, dsparsification_reg_type='l1',
dsparsification_reg_strength=1., dcontext_mod_init='constant')
seq.miscellaneous_args(magroup, dmask_fraction=0.8, dclassification=True,
dts_weighting='last', show_use_ce_loss=False,
show_early_stopping_thld=True)
# Replay arguments.
rep_args = seq.replay_args(parser)
cli.generator_args(rep_args, dlatent_dim=100)
cli.main_net_args(parser, allowed_nets=['simple_rnn'],
dsrnn_rec_layers='256', dsrnn_pre_fc_layers='',
dsrnn_post_fc_layers='',
show_net_act=True, dnet_act='tanh', show_no_bias=True,
show_dropout_rate=False, show_specnorm=False, show_batchnorm=False,
prefix='dec_', pf_name='replay decoder')
seq_args(parser)
args = None
if argv is not None:
if default:
warnings.warn('Provided "argv" will be ignored since "default" ' +
'option was turned on.')
args = argv
if default:
args = []
config = parser.parse_args(args=args)
### Check argument values!
cli.check_invalid_argument_usage(config)
seq.check_invalid_args_sequential(config)
if config.train_from_scratch:
# FIXME We could get rid of this warning by properly checkpointing and
# loading all networks.
warnings.warn('When training from scratch, only during accuracies ' +
'make sense. All other outputs should be ignored!')
return config
def seq_args(parser):
"""This is a helper function of function :func:`parse_cmd_arguments` to add
specific arguments to the argument group related to seq smnist task.
Arguments specified in this function:
- `ssmnist_seq_len`
Args:
parser: Object of class :class:`argparse.ArgumentParser`.
"""
heading = 'SSMNIST options'
sgroup = parser.add_argument_group(heading)
sgroup.add_argument('--ssmnist_seq_len', type=int, default=2,
help='The number of digits used in a sequence. ' +
'Default: %(default)s.')
sgroup.add_argument('--ssmnist_two_classes', action='store_true',
help='If used, every task will have only 2 classes. ' +
'Instead of classifying every possible sequence ' +
'individually, sequences are randomly grouped ' +
'into 2 classes.')
if __name__=='__main__':
pass
| 41.541667
| 80
| 0.674858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,522
| 0.421598
|
542b4d4125780654fe2bbd178dc02f72ba260ddd
| 2,490
|
py
|
Python
|
examples/compare.py
|
guo-yong-zhi/wordcloud2
|
43d34766323e8eec45d46eeaa98537849f48cd37
|
[
"MIT"
] | null | null | null |
examples/compare.py
|
guo-yong-zhi/wordcloud2
|
43d34766323e8eec45d46eeaa98537849f48cd37
|
[
"MIT"
] | null | null | null |
examples/compare.py
|
guo-yong-zhi/wordcloud2
|
43d34766323e8eec45d46eeaa98537849f48cd37
|
[
"MIT"
] | null | null | null |
from wordcloud2 import wordcloud as W
import os
from PIL import Image
stwords = {"us", "will"}
print("==Obama's==")
cs = W.randomscheme() #:Set1_8
as_ = W.randomangles() #(0,90,45,-45)
dens = 0.5 #not too high
wca = W.wordcloud(
W.processtext(open(W.pkgdir(W.WordCloud)+"/res/Barack Obama's First Inaugural Address.txt").read(),
stopwords=set(W.stopwords_en).union(stwords)),
colors = cs,
angles = as_,
density = dens)
wca.generate()
#md# ### Then generate the wordcloud on the right
print("==Trump's==")
wcb = W.wordcloud(
W.processtext(open(W.pkgdir(W.WordCloud)+"/res/Donald Trump's Inaugural Address.txt").read(),
stopwords=set(W.stopwords_en).union(stwords)),
mask = wca.getsvgmask(),
colors = cs,
angles = as_,
density = dens,
run = W.identity, #turn off the useless initimage! and placement! in advance
)
#md# Follow these steps to generate a wordcloud: initimage! -> placement! -> generate!
samewords = list(set(wca.getwords()).intersection(wcb.getwords()))
print(len(samewords), "same words")
for w in samewords:
wcb.setcolors(w, wca.getcolors(w))
wcb.setangles(w, wca.getangles(w))
wcb.initimages()
wcb.setstate(":placement!")
print("=ignore defferent words=")
with wcb.keep(samewords) as wcb:
assert set(wcb.getwords()) == set(samewords)
centers = wca.getpositions(samewords, type=W.Ju.getcenter)
wcb.setpositions(samewords, centers, type=W.Ju.setcenter_b) #manually initialize the position,
wcb.setstate(":placement!") #and set the state flag
wcb.generate(1000, patient=-1, retry=1) #patient=-1 means no teleport; retry=1 means no rescale
print("=pin same words=")
with wcb.pin(samewords):
wcb.placement()
wcb.generate(1000, retry=1) #allow teleport but don‘t allow rescale
if wcb.getstate() != ":generate!":
print("=overall tuning=")
wcb.generate(1000, patient=-1, retry=2) #allow rescale but don‘t allow teleport
ma = wca.paint()
mb = wcb.paint()
sp = ma.width//20
cmp = Image.new('RGBA', (ma.width*2+sp, ma.height))
cmp.paste(ma, (0, 0, ma.width, ma.height))
cmp.paste(mb, (ma.width+sp, 0, ma.width*2+sp, ma.height))
os.makedirs('address_compare', exist_ok=True)
print("results are saved in address_compare")
cmp.save("address_compare/compare.png")
gif = W.GIF("address_compare")
wca.record("Obama", gif)
wcb.record("Trump", gif)
W.gif_generate(gif, framerate=1)
#md# 
#md# 
| 35.571429
| 104
| 0.685542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 866
| 0.347233
|
542b9661a1d12114a162b51bacab5cac808471e8
| 3,520
|
py
|
Python
|
modules/insight/nbCurvesLevelSet.py
|
chrisidefix/devide
|
99bfe156e710fa47ba7ae88b0ce1eef592a3a439
|
[
"BSD-3-Clause"
] | 25
|
2015-08-24T16:05:14.000Z
|
2020-12-09T20:07:14.000Z
|
modules/insight/nbCurvesLevelSet.py
|
chrisidefix/devide
|
99bfe156e710fa47ba7ae88b0ce1eef592a3a439
|
[
"BSD-3-Clause"
] | 1
|
2016-02-16T21:18:10.000Z
|
2016-02-16T21:18:10.000Z
|
modules/insight/nbCurvesLevelSet.py
|
chrisidefix/devide
|
99bfe156e710fa47ba7ae88b0ce1eef592a3a439
|
[
"BSD-3-Clause"
] | 5
|
2016-02-16T20:05:37.000Z
|
2020-01-31T11:27:39.000Z
|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
import itk
import module_kits.itk_kit as itk_kit
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
class nbCurvesLevelSet(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
# setup defaults
self._config.propagationScaling = 1.0
self._config.advectionScaling = 1.0
self._config.curvatureScaling = 1.0
self._config.numberOfIterations = 500
configList = [
('Propagation scaling:', 'propagationScaling', 'base:float',
'text', 'Weight factor for the propagation term'),
('Advection scaling:', 'advectionScaling', 'base:float',
'text', 'Weight factor for the advection term'),
('Curvature scaling:', 'curvatureScaling', 'base:float',
'text', 'Weight factor for the curvature term'),
('Number of iterations:', 'numberOfIterations', 'base:int',
'text',
'Number of iterations that the algorithm should be run for')]
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self})
# create all pipeline thingies
self._createITKPipeline()
self.sync_module_logic_with_config()
def close(self):
self._destroyITKPipeline()
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
def execute_module(self):
self.get_output(0).Update()
def get_input_descriptions(self):
return ('Feature image (ITK)', 'Initial level set (ITK)' )
def set_input(self, idx, inputStream):
if idx == 0:
self._nbcLS.SetFeatureImage(inputStream)
else:
self._nbcLS.SetInput(inputStream)
def get_output_descriptions(self):
return ('Image Data (ITK)',)
def get_output(self, idx):
return self._nbcLS.GetOutput()
def config_to_logic(self):
self._nbcLS.SetPropagationScaling(
self._config.propagationScaling)
self._nbcLS.SetAdvectionScaling(
self._config.advectionScaling)
self._nbcLS.SetCurvatureScaling(
self._config.curvatureScaling)
def logic_to_config(self):
self._config.propagationScaling = self._nbcLS.\
GetPropagationScaling()
self._config.advectionScaling = self._nbcLS.GetAdvectionScaling()
self._config.curvatureScaling = self._nbcLS.GetCurvatureScaling()
# --------------------------------------------------------------------
# END OF API CALLS
# --------------------------------------------------------------------
def _createITKPipeline(self):
# input: smoothing.SetInput()
# output: thresholder.GetOutput()
if3 = itk.Image[itk.F, 3]
self._nbcLS = itk.NarrowBandCurvesLevelSetImageFilter[if3,if3].New()
#self._nbcLS.SetMaximumRMSError( 0.1 );
self._nbcLS.SetNumberOfIterations( 500 );
itk_kit.utils.setupITKObjectProgress(
self, self._nbcLS,
'NarrowBandCurvesLevelSetImageFilter',
'Evolving level set')
def _destroyITKPipeline(self):
"""Delete all bindings to components of the ITK pipeline.
"""
del self._nbcLS
| 32
| 76
| 0.600284
| 3,279
| 0.931534
| 0
| 0
| 0
| 0
| 0
| 0
| 1,006
| 0.285795
|
58087fdf8d89ae3ca538e157ca99613c2f7a205f
| 2,835
|
py
|
Python
|
setup.py
|
ThomasChiroux/ejabberd_external_auth_jwt
|
fce68cca70ca578b3c1c002a4dea2aa65e3150c1
|
[
"MIT"
] | null | null | null |
setup.py
|
ThomasChiroux/ejabberd_external_auth_jwt
|
fce68cca70ca578b3c1c002a4dea2aa65e3150c1
|
[
"MIT"
] | null | null | null |
setup.py
|
ThomasChiroux/ejabberd_external_auth_jwt
|
fce68cca70ca578b3c1c002a4dea2aa65e3150c1
|
[
"MIT"
] | null | null | null |
#
# Copyright 2018-2019 Happineo
#
"""setuptools installer for zamita."""
import os
import uuid
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_py import build_py
# local imports
from build_scripts.version import VersionInfo
HERE = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(HERE, "README.md"), encoding="UTF-8").read()
NEWS = open(os.path.join(HERE, "NEWS.md"), encoding="UTF-8").read()
PROJECT_NAME = "ejabberd_external_auth_jwt"
VERSION = None
try:
VERSION = VersionInfo().version
except Exception:
pass
if VERSION is None or not VERSION:
try:
VERSION_FILE = open(f"{PROJECT_NAME}/RELEASE-VERSION", "r")
try:
VERSION = VERSION_FILE.readlines()[0]
VERSION = VERSION.strip()
except Exception:
VERSION = "0.0.0"
finally:
VERSION_FILE.close()
except IOError:
VERSION = "0.0.0"
class CustomBuild(build_py):
"""custom build class."""
def run(self):
"""Add target and write the release-VERSION file."""
# honor the --dry-run flag
if not self.dry_run:
target_dirs = []
target_dirs.append(os.path.join(self.build_lib, PROJECT_NAME))
target_dirs.append(PROJECT_NAME)
# mkpath is a distutils helper to create directories
for _dir in target_dirs:
self.mkpath(_dir)
try:
for _dir in target_dirs:
fobj = open(os.path.join(_dir, "RELEASE-VERSION"), "w")
fobj.write(VERSION)
fobj.close()
except Exception:
pass
super().run()
with open("requirements.txt") as f:
requirements = f.read().splitlines()
if requirements[0].startswith("-i"):
requirements = requirements[1:]
setup(
name=PROJECT_NAME,
version=VERSION,
description="ejabberd_external_auth_jwt",
long_description=README + "\n\n" + NEWS,
cmdclass={"build_py": CustomBuild},
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: Linux",
],
keywords="",
author="Thomas Chiroux",
author_email="",
url="https://www.github.com/ThomasChiroux/ejabberd_external_auth_jwt",
license="LICENSE.txt",
packages=find_packages(exclude=["ez_setup"]),
package_data={"": ["*.rst", "*.md", "*.yaml", "*.cfg"]},
include_package_data=True,
zip_safe=False,
test_suite="pytest",
tests_require=[],
install_requires=requirements,
entry_points={
"console_scripts": [
"ejabberd_external_auth_jwt=ejabberd_external_auth_jwt.main:main_sync"
]
},
)
| 27.794118
| 82
| 0.618342
| 770
| 0.271605
| 0
| 0
| 0
| 0
| 0
| 0
| 808
| 0.285009
|
5808c926d701d604229b7c9061a8576e5eb62676
| 4,724
|
py
|
Python
|
Analysis/Feb2021/common_plotting.py
|
TimChild/dat_analysis
|
2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73
|
[
"MIT"
] | 2
|
2021-03-07T03:17:13.000Z
|
2021-03-07T03:17:16.000Z
|
Analysis/Feb2021/common_plotting.py
|
TimChild/dat_analysis
|
2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73
|
[
"MIT"
] | 1
|
2021-03-09T00:00:52.000Z
|
2021-03-09T00:00:52.000Z
|
Analysis/Feb2021/common_plotting.py
|
TimChild/dat_analysis
|
2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73
|
[
"MIT"
] | null | null | null |
"""
Sep 21 -- A few of the plots used in analysis, very far from a complete list, and probably most are too specific to be
useful again.
Moved useful functions from here.
"""
from __future__ import annotations
from typing import List, Callable, Optional, Union, TYPE_CHECKING
import numpy as np
from dat_analysis.analysis_tools.entropy import dat_integrated_sub_lin
from dat_analysis.plotting.plotly.hover_info import HoverInfo
if TYPE_CHECKING:
pass
def common_dat_hover_infos(datnum=True,
heater_bias=False,
fit_entropy_name: Optional[str] = None,
fit_entropy=False,
int_info_name: Optional[str] = None,
output_name: Optional[str] = None,
integrated_entropy=False,
sub_lin: bool = False,
sub_lin_width: Optional[Union[float, Callable]] = None,
int_info=False,
amplitude=False,
theta=False,
gamma=False,
) -> List[HoverInfo]:
"""
Returns a list of HoverInfos for the specified parameters. To do more complex things, append specific
HoverInfos before/after this.
Examples:
hover_infos = common_dat_hover_infos(datnum=True, amplitude=True, theta=True)
hover_group = HoverInfoGroup(hover_infos)
Args:
datnum ():
heater_bias ():
fit_entropy_name (): Name of saved fit_entropy if wanting fit_entropy
fit_entropy ():
int_info_name (): Name of int_info if wanting int_info or integrated_entropy
output_name (): Name of SE output to integrate (defaults to int_info_name)
integrated_entropy ():
sub_lin (): Whether to subtract linear term from integrated_info first
sub_lin_width (): Width of transition to avoid in determining linear terms
int_info (): amp/dT/sf from int_info
Returns:
List[HoverInfo]:
"""
hover_infos = []
if datnum:
hover_infos.append(HoverInfo(name='Dat', func=lambda dat: dat.datnum, precision='.d', units=''))
if heater_bias:
hover_infos.append(HoverInfo(name='Bias', func=lambda dat: dat.AWG.max(0) / 10, precision='.1f', units='nA'))
if fit_entropy:
hover_infos.append(HoverInfo(name='Fit Entropy',
func=lambda dat: dat.Entropy.get_fit(name=fit_entropy_name,
check_exists=True).best_values.dS,
precision='.2f', units='kB'), )
if integrated_entropy:
if output_name is None:
output_name = int_info_name
if sub_lin:
if sub_lin_width is None:
raise ValueError(f'Must specify sub_lin_width if subtrating linear term from integrated entropy')
elif not isinstance(sub_lin_width, Callable):
sub_lin_width = lambda _: sub_lin_width # make a value into a function so so that can assume function
data = lambda dat: dat_integrated_sub_lin(dat, signal_width=sub_lin_width(dat), int_info_name=int_info_name,
output_name=output_name)
hover_infos.append(HoverInfo(name='Sub lin width', func=sub_lin_width, precision='.1f', units='mV'))
else:
data = lambda dat: dat.Entropy.get_integrated_entropy(
name=int_info_name,
data=dat.SquareEntropy.get_Outputs(
name=output_name).average_entropy_signal)
hover_infos.append(HoverInfo(name='Integrated Entropy',
func=lambda dat: np.nanmean(data(dat)[-10:]),
precision='.2f', units='kB'))
if int_info:
info = lambda dat: dat.Entropy.get_integration_info(name=int_info_name)
hover_infos.append(HoverInfo(name='SF amp',
func=lambda dat: info(dat).amp,
precision='.3f',
units='nA'))
hover_infos.append(HoverInfo(name='SF dT',
func=lambda dat: info(dat).dT,
precision='.3f',
units='mV'))
hover_infos.append(HoverInfo(name='SF',
func=lambda dat: info(dat).sf,
precision='.3f',
units=''))
return hover_infos
| 44.990476
| 120
| 0.556308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,362
| 0.288315
|
580a05b1f8e364040a8ccda54856a6eead097400
| 9,980
|
py
|
Python
|
Code/sphero_learn.py
|
rvarga601/IER
|
1cf05e641dea2fb3b4ad5329e3e556713cc199fe
|
[
"MIT"
] | null | null | null |
Code/sphero_learn.py
|
rvarga601/IER
|
1cf05e641dea2fb3b4ad5329e3e556713cc199fe
|
[
"MIT"
] | null | null | null |
Code/sphero_learn.py
|
rvarga601/IER
|
1cf05e641dea2fb3b4ad5329e3e556713cc199fe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 10 23:54:16 2021
@author: rolandvarga
"""
import gym
import numpy as np
import matplotlib.pyplot as plt
import time
from scipy.signal import savgol_filter
import pickle
#%matplotlib qt
#%matplotlib inline
# Set to 1 to repeat SARSA learning (With Intel Core i7-8750H it takes
# around 70 minutes), 0 for loading previous result
REPEAT_LEARNING = 0
# Parameter to set which tests to do
DO_TEST1 = 1 # Simulate the system once and plot the trajectory
DO_TEST2 = 0 # Simulate the system 1000 times and plot success-rate
# Set to 1 to plot a projection of the state-value function V
PLOT_STATEVALUE = 1
#%% Load previous result
if REPEAT_LEARNING == 0:
filename='train_6x6x20x60000.pickle'
with open(filename, 'rb') as f:
cell_nums, dhat, durations, Q, reward_set, rhat, start_time, end_time, states_high, max_steps = pickle.load(f)
#%% SARSA learning
env = gym.make('SphericalRobot-v0')
#Function to choose the next action
def choose_action(state, eps):
action=0
if np.random.uniform(0, 1) < eps:
# Select a random action
action = env.action_space.sample()
else:
# Choose greedy action
action = np.array(np.unravel_index(np.argmax(Q[state], axis=None), Q[state].shape))
# action = np.argmax(Q[state])
return action
#Convert continuous state-space to discrete
def discretize_state(observation_c, low, high, cell_nums):
# Initialize the discretized observation
observation_d = []
# Loop through and discretize all 3 states
for state,low_val,high_val,c_num in zip(observation_c,low,high,cell_nums):
# Define intervals for the possible values
bins = np.linspace(low_val,high_val,c_num+1,endpoint=True)
# Discretize with NumPy function
state_d = np.digitize(state, bins, right=True)
# Check if the discrete values are valid
assert state_d > 0 and state_d <= c_num
observation_d.append(state_d-1) # -1 to have values start at 0
return observation_d
if REPEAT_LEARNING == 1:
# Learning parameters
epsilon = 0.3 # For start
total_episodes = 100
max_steps = 300
alpha = 0.1
gamma = 0.99
# The discretization of the states
states_high = np.array([6,6,2*np.pi/env.c]) # Set boundaries for the values
cell_nums = np.array([6,6,20]) # Set the number of discrete cells
#Initializing the Q-matrix
Q = np.ones(np.append(cell_nums,[3,3]))
#Function to update the Q-value
def update(state, state2, reward, action, action2):
predict = Q[state][action]
target = reward + gamma * Q[state2][action2]
Q[state][action] = Q[state][action] + alpha * (target - predict)
#Initializing the reward
# reward=0
reward_set = []
durations = []
start_time = time.time()
# Starting the SARSA learning
for episode in range(total_episodes):
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), epsilon)
states = [state1]
while t < max_steps:
# Visualizing the training, TODO
# env.render()
# Getting the next state
state2, reward, done, info = env.step(action1)
# Note: The 3rd state is the difference between the wheel angles
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
# Choosing the next action
action2 = choose_action(tuple(state2_d), epsilon)
# Updating the Q-value
update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
# Update variables for next iteration
state1 = state2
action1 = action2
# Save state to be able to plot trajectories
states.append(state2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
reward_set.append(cumm_reward)
durations.append(t)
# plt.figure(0)
# x = np.array(states)[:,0]
# y = np.array(states)[:,1]
# plt.scatter(x, y)
# plt.xlim(-5, 5)
# plt.ylim(-5, 5)
# plt.show()
# Print time it took to run the learning
end_time = time.time()
print("--- %s seconds ---" % (end_time - start_time))
# Plot the filtered rewards during the learning
plt.figure(1)
#plt.plot(reward_set)
rhat = savgol_filter(reward_set, 501, 3) # window size 501, polynomial order 3
plt.plot(rhat)
#plt.ylim(-500, 500)
plt.xlabel(r"Episode [-]")
plt.ylabel(r"Reward [-]")
plt.legend()
plt.savefig('reward_learning.eps', format='eps', bbox_inches='tight')
plt.show()
# Plot the filtered episode lengths during the learning
plt.figure(2)
#plt.plot(durations)
dhat = savgol_filter(durations, 51, 3) # window size 51, polynomial order 3
plt.plot(dhat)
plt.show()
#%% Test 1: Generate one trajectory
if DO_TEST1 == 1:
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), 0.0)
states = [state1]
actions = [action1]
while t < max_steps:
#Visualizing the training
# env.render()
#Getting the next state
state2, reward, done, info = env.step(action1)
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
#Choosing the next action
action2 = choose_action(tuple(state2_d), 0.0)
#Learning the Q-value
#update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
state1 = state2
action1 = action2
states.append(state2)
actions.append(action2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
print(reward)
# Plot trajectory on 2D plot
plt.figure(3)
x = np.array(states)[:,0]
y = np.array(states)[:,1]
plt.scatter(x, y)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.xticks(np.arange(-5, 6, 1))
plt.yticks(np.arange(-5, 6, 1))
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel(r"$x_1$ [m]")
plt.ylabel(r"$x_2$ [m]")
plt.legend()
plt.savefig('trajectory.eps', format='eps', bbox_inches='tight')
plt.show()
# Plot position states separately
plt.figure(4)
plt.plot(x, label="x1")
plt.plot(y, label="x2")
plt.xlabel(r"Time step [-]")
plt.ylabel(r"Coordinate [m]")
plt.legend()
plt.savefig('trajectory_plot.eps', format='eps', bbox_inches='tight')
plt.show()
#%% Test 2: Successful-unsuccessful tries
if DO_TEST2 == 1:
cumm_rewards = []
for k in range(1000):
t = 0
cumm_reward = 0
state1 = env.reset()
state1_d = discretize_state(state1, -states_high, states_high, cell_nums)
action1 = choose_action(tuple(state1_d), 0.0)
while t < max_steps:
#Visualizing the training
# env.render()
#Getting the next state
state2, reward, done, info = env.step(action1)
state1_d = discretize_state(np.array([state1[0],state1[1], state1[2]-state1[3]]),
-states_high, states_high, cell_nums)
state2_d = discretize_state(np.array([state2[0],state2[1], state2[2]-state2[3]]),
-states_high, states_high, cell_nums)
#Choosing the next action
action2 = choose_action(tuple(state2_d), 0.0)
#Learning the Q-value
#update(tuple(state1_d), tuple(state2_d), reward, tuple(action1), tuple(action2))
state1 = state2
action1 = action2
#states.append(state2)
#actions.append(action2)
#Updating the respective vaLues
t += 1
cumm_reward += reward
#If at the end of learning process
if done:
break
cumm_rewards.append(cumm_reward)
print("Average reward out of 1000 try: " + str(np.average(np.array(cumm_rewards))))
plt.figure(5)
plt.hist(cumm_rewards,np.array([-1000,0,1000]))
plt.show()
#%% Additional plot: State-value function
if PLOT_STATEVALUE == 1:
V = np.zeros([cell_nums[0],cell_nums[1]])
for k in range(V.shape[0]):
for l in range(V.shape[1]):
V[k,l]=np.amax(Q[k,l,:])
plt.figure(6)
plt.imshow(V, cmap='coolwarm', interpolation='nearest')
plt.colorbar()
plt.savefig('state_value.eps', format='eps', bbox_inches='tight')
plt.show()
| 30.993789
| 118
| 0.577154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,966
| 0.297194
|
580b61225012c491f65cb5e42655216093dbdb35
| 8,952
|
py
|
Python
|
HW7/kernel_eigenface.py
|
joycenerd/Machine_Learning_2021
|
ecb634a9f2f1112a393a9707ce69c3bc751c4542
|
[
"MIT"
] | 1
|
2021-11-18T09:22:21.000Z
|
2021-11-18T09:22:21.000Z
|
HW7/kernel_eigenface.py
|
joycenerd/Machine_Learning_2021
|
ecb634a9f2f1112a393a9707ce69c3bc751c4542
|
[
"MIT"
] | null | null | null |
HW7/kernel_eigenface.py
|
joycenerd/Machine_Learning_2021
|
ecb634a9f2f1112a393a9707ce69c3bc751c4542
|
[
"MIT"
] | null | null | null |
from scipy.spatial.distance import cdist
from numpy.linalg import eig, norm, pinv
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import argparse
import ntpath
import glob
import os
parser = argparse.ArgumentParser()
parser.add_argument("--option", type=str, default="PCA",
help="Choose which task to do: [PCA, LDA]")
parser.add_argument("--img-size", type=int, default=50,
help="image resize shape")
parser.add_argument("--kernel-type", type=str, default="linear",
help="kernel type for PCA/LDA: [linear, polynomial, rbf]")
parser.add_argument("--gamma", type=float, default=1,
help="gamma value for polynomial or rbf kernel")
parser.add_argument("--coeff", type=int, default=2,
help="coeff value for polynomial kernel")
parser.add_argument("--degree", type=int, default=20,
help="degree value for polynomial kernel")
args = parser.parse_args()
DATA_PATH = "./Yale_Face_Database/"
SAVE_PATH = "./results/"
def read_data(data_path):
img_size = args.img_size
data = []
filepath = []
label = []
for file in glob.glob(data_path+"*"):
# file path (135,)
filepath.append(file)
# data (135,10000)
image = Image.open(file)
image = image.resize((img_size, img_size), Image.ANTIALIAS)
image = np.array(image)
data.append(image.ravel())
# label (135,)
_, tail = ntpath.split(file)
label.append(int(tail[7:9]))
return np.array(data), filepath, np.array(label)
def get_eig(data, method, kernel_type="none"):
# get eigenvalue and eigenvector by np.linalg.eig()
eigval, eigvec = eig(data)
# sort by decreasing order of eigenvalues
idx = eigval.argsort()[::-1]
eigval = eigval[idx]
eigvec = eigvec[:, idx]
return eigval, eigvec
def get_kernel(X):
kernel_type = args.kernel_type
gamma = args.gamma
coeff = args.coeff
degree = args.degree
if kernel_type == "linear":
kernel = X@X.T
elif kernel_type == "polynomial":
kernel = np.power(gamma*(X@X.T)+coeff, degree)
elif kernel_type == "rbf":
kernel = np.exp(-gamma*cdist(X, X, metric="sqeuclidean"))
return kernel
def pca(x, kernel_type=None, kernel=None):
if kernel_type == None:
x_bar = np.mean(x, axis=0)
cov = (x-x_bar)@(x-x_bar).T
eigval, eigvec = get_eig(cov, "pca")
# project data
eigvec = (x-x_bar).T@eigvec
else:
x_bar = 0
# cetralize the kernel
n = kernel.shape[0]
one = np.ones((n, n), dtype=float)
one *= 1.0/n
kernel = kernel - one @ kernel - kernel @ one + one @ kernel @ one
eigval, eigvec = get_eig(kernel, "pca", kernel_type)
for i in range(eigvec.shape[1]):
eigvec[:, i] *= 1/norm(eigvec[:, i], 1)
# get the top 25 eigenvectors
W = eigvec[:, :25].real
return x_bar, W
def draw_eigenface(W, name):
img_size = args.img_size
# save eigenface in 5x5 grid
for i in range(5):
for j in range(5):
idx = i * 5 + j
plt.subplot(5, 5, idx + 1)
plt.imshow(W[:, idx].reshape((img_size, img_size)), cmap='gray')
plt.axis('off')
plt.savefig(SAVE_PATH+name+".jpg")
def lda(X, label, kernel_type="none", dims=25):
(n, d) = X.shape
label = np.asarray(label)
c = np.unique(label)
mu = np.mean(X, axis=0)
S_w = np.zeros((d, d), dtype=np.float64)
S_b = np.zeros((d, d), dtype=np.float64)
# Sw=(xi-mj)*(xi-mj)^T
# Sb=nj*(mj-m)*(mj-m)^T
for i in c:
X_i = X[np.where(label == i)[0], :]
mu_i = np.mean(X_i, axis=0)
S_w += (X_i - mu_i).T @ (X_i - mu_i)
S_b += X_i.shape[0] * ((mu_i - mu).T @ (mu_i - mu))
# get eigenvalues and eigenvectors
S = pinv(S_w) @ S_b
eigen_val, eigen_vec = get_eig(S, "lda", kernel_type)
for i in range(eigen_vec.shape[1]):
eigen_vec[:, i] = eigen_vec[:, i] / norm(eigen_vec[:, i])
W = eigen_vec[:, :25].real
return W
def reconstruct(data, W, method, m=None):
img_size = args.img_size
if method == "pca":
reconstruction = (data-m)@W@W.T+m
elif method == "lda":
reconstruction = data@W@W.T
idx = 1
for i in range(2):
for j in range(5):
plt.subplot(2, 5, idx)
plt.imshow(reconstruction[idx-1, :].reshape(
(img_size, img_size)), cmap='gray')
plt.axis('off')
idx += 1
plt.savefig(SAVE_PATH+method+"_reconstruction"+".jpg")
def face_recognition(train_data, train_label, test_data, test_label):
num_of_train = train_label.shape[0]
num_of_test = test_label.shape[0]
dist_mat = np.zeros((num_of_test, num_of_train), dtype=float)
# calculate distance
for i in range(num_of_test):
dist = np.zeros(num_of_train, dtype=float)
for j in range(num_of_train):
dist[j] = np.sum((test_data[i, :]-train_data[j, :])**2)
dist = np.argsort(dist)
dist_mat[i, :] = label[dist]
# KNN
K = [1, 3, 5, 7, 9, 11]
best_acc = 0.0
for k in K:
correct = 0.0
for i in range(num_of_test):
dist = dist_mat[i, :]
dist = dist[:k]
val, cnt = np.unique(dist, return_counts=True)
most_cnt = np.argmax(cnt)
pred = val[most_cnt]
if pred == test_label[i]:
correct += 1
acc = correct/num_of_test
print(f"Face recognition accuracy when K={k}: {acc:.4}")
if acc > best_acc:
best_acc = acc
best_K = k
print(f"Best K: {best_K}\tBest accuracy: {best_acc:.4}")
def project(train_data, test_data, W, m=0):
# data dimensionality reductionn
option = args.option
if option == "PCA":
train_proj = (train_data-m)@W
test_proj = (test_data-m)@W
elif option == "LDA":
train_proj = train_data@W
test_proj = test_data@W
return train_proj, test_proj
if __name__ == "__main__":
option = args.option
kernel_type = args.kernel_type
# read training and testing data
train_data, train_filepath, train_label = read_data(DATA_PATH+"Training/")
test_data, test_filepath, test_label = read_data(DATA_PATH+"Testing/")
data = np.vstack((train_data, test_data)) # (165,10000)
filepath = np.hstack((train_filepath, test_filepath)) # (165,)
label = np.hstack((train_label, test_label)) # (165,)
num_of_data = label.shape[0]
print(f"Num of data: {num_of_data}")
if option == "PCA":
rand_idx = np.random.randint(num_of_data, size=10)
samples = data[rand_idx, :] # (10,10000)
x_bar, W = pca(data)
draw_eigenface(W, "eigenface")
print("eigenface completed...")
reconstruct(samples, W, "pca", x_bar)
print("reconstruction completed...")
train_proj, test_proj = project(train_data, test_data, W, x_bar)
face_recognition(train_proj, train_label, test_proj, test_label)
print("pca face recognition completed...\n")
# python kernel_eigenface.py --option PCA --kernel-type polynomial --gamma 5 --coeff 1 --degree 2
# python kernel_eigenface.py --option PCA --kernel-type rbf --gamma 1e-7
kernel = get_kernel(data)
_, W = pca(data, kernel_type, kernel)
train_kernel = kernel[:train_label.shape[0], :]
test_kernel = kernel[train_label.shape[0]:, :]
train_proj, test_proj = project(train_kernel, test_kernel, W)
face_recognition(train_proj, train_label, test_proj, test_label)
print(
f"kernel pca with {kernel_type} kernel face recognition completed...")
if option == "LDA":
rand_idx = np.random.randint(num_of_data, size=10)
samples = data[rand_idx, :] # (10,10000)
W = lda(data, label)
draw_eigenface(W, "fisherface")
print("fisherface completed...")
reconstruct(samples, W, "lda")
print("reconstruction completed...")
train_proj, test_proj = project(train_data, test_data, W)
face_recognition(train_proj, train_label, test_proj, test_label)
print("lda face recognition completed...\n")
# python kernel_eigenface.py --option LDA --kernel-type polynomial --gamma 1 --coeff 2 --degree 20
# python kernel_eigenface.py --option PCA --kernel-type rbf --gamma 1e-4
kernel = get_kernel(data.T)
W = lda(kernel, kernel_type)
train_kernel = kernel[:train_label.shape[0], :]
test_kernel = kernel[train_label.shape[0]:, :]
train_proj, test_proj = project(train_kernel, test_kernel, W)
face_recognition(train_proj, train_label, test_proj, test_label)
print(
f"kernel lda with {kernel_type} kernel face recognition completed...")
| 31.632509
| 106
| 0.601095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,791
| 0.200067
|
580c8290606fc382a91ddcb30034d1076a50dc58
| 18,427
|
py
|
Python
|
duqo/optimization/predict.py
|
canbooo/pyRDO
|
f7143438aa30cc79587c9f35fc9ff6aa262fc4d3
|
[
"BSD-3-Clause"
] | 11
|
2021-08-17T05:55:01.000Z
|
2022-02-03T13:16:42.000Z
|
duqo/optimization/predict.py
|
canbooo/pyRDO
|
f7143438aa30cc79587c9f35fc9ff6aa262fc4d3
|
[
"BSD-3-Clause"
] | null | null | null |
duqo/optimization/predict.py
|
canbooo/pyRDO
|
f7143438aa30cc79587c9f35fc9ff6aa262fc4d3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 15:33:47 2019
@author: Bogoclu
"""
import typing
import multiprocessing as mp
import warnings
import numpy as np
from scipy import stats
from .space import FullSpace
from duqo.proba import DS, MC, SUSE, ISPUD, FORM
from duqo.doe.lhs import make_doe
def _check_obj_wgt(obj_weights, num_obj):
""" Check obj_wgt argument passed to CondMom """
if obj_weights is None:
return None
try:
_ = obj_weights[0]
except (TypeError, IndexError):
obj_weights = np.ones(num_obj) * obj_weights
if len(obj_weights) != num_obj:
msg = f"Mismatch between the number of entries ({len(obj_weights)} in "
msg += f"obj_wgt and the number of stochastic objectives ({num_obj})."
raise ValueError(msg)
return np.array(obj_weights).ravel()
def _check_std_inds(use_std, num_obj):
""" Check use_std argument passed to CondMom and
convert it to a slice definition
"""
if isinstance(use_std, bool):
inds = [use_std] * num_obj
if len(inds) != num_obj:
msg = "Mismatch between the number of entries in "
msg += "use_std and the number of stochastic objectives."
raise ValueError(msg)
return np.array(use_std, dtype=bool)
def _find_integrator_cls(integrator):
"""
Find the Integrator class as defined by the string integrator
"""
integrator = integrator.upper()
if integrator == "DS":
IntCls = DS
elif integrator == "MC":
IntCls = MC
elif integrator == "ISPUD":
IntCls = ISPUD
elif integrator == "FORM":
IntCls = FORM
elif integrator == "SUSE":
IntCls = SUSE
else:
msg = f"Requested integrator {integrator} is not found."
raise ValueError(msg)
return IntCls
def _make_chain(methods: list):
"""Makes the chain given a list of method names"""
try:
first = methods[0]
except TypeError:
raise TypeError(f"methods must be a list of strings or classes, not {type(methods)}")
try:
_ = first.upper()
except AttributeError:
return methods
return [_find_integrator_cls(name.upper()) for name in methods]
def _n_para_chk(num_parallel: int = None):
""" Check the num_parallel argument as passed to CondProb """
n_procs = max(1, mp.cpu_count()) # could cpu_count ever be < 1?
if num_parallel is None or num_parallel > n_procs:
print(f"Number of parallel processes was set to {n_procs}")
return n_procs
return num_parallel
def _default_init(targ_prob: float, acc_max: float, num_inp: int,
num_para: int):
"""Decide the default integrator chain methods and arguments depending
on the problem
Parameters
----------
targ_prob : float
target failure probability
acc_max : float
target tolerance for the estimation
num_inp : int
number of stochastic inputs of the constraints
num_para : int
number of parallel processes to use
Returns
-------
integrators : list
Integrator classes, that are to be initiated
int_args : dict
Keyword arguments to pass to integrators
"""
if targ_prob * acc_max >= 1e-5:
if targ_prob * acc_max >= 1e-4:
integrators = ["MC"]
else:
integrators = ["SUSE", "MC"]
int_args = {"num_starts": 1, "batch_size": 1e5}
elif num_inp < 15:
integrators = ["SUSE", "DS"]
int_args = {"num_starts": 1}
else:
integrators = ["SUSE"]
int_args = {"num_starts": num_para}
print("Using", integrators, "as default chain.")
return integrators, int_args
def _is_worker(workers, name):
""" check if name is in workers list of classes"""
for worker in workers:
wname = read_integrator_name(worker)
if name.upper() in wname.upper():
return True
return False
def read_integrator_name(worker):
""" read the name of the integrator instance worker """
name = str(worker).split(".")[-1]
return "".join([c for c in name if c.isalnum()])
class CondMom:
"""Class to estimate conditional means
full_space : FullSpace instance
The definition of the optimization and stochastic spaces
base_doe : int or np.ndarray
set if a new doe should be calculated or the same one should
be transformed during the optimization.
if array, it should have zero mean and unit variance
but the original marginal distributions and correlation.
it should have same number of columns as stochastic variables
used in the objective. If integer, a base_doe with that number of
samples will be created
doe_size : int
The size of the doe to use. If base_doe is a numpy array, this
has no effect and doesn't have to be passed.
obj_wgt : float or iterable of floats:
If not None, these weights will be used for combining the
estimated mean and the variance/std. dev. If iterable, it
must be the same length as the number of stochastic input
variables as used for the objective function.
If None, the variances are returned separetly
use_std : bool or iterable of bools
Flag to use standard deviation (True) or the variance for the
estimation. If iterable, it must be the same length as the number
of stochastic input variables as used for the objective function.
"""
def __init__(self, full_space: FullSpace, base_doe: typing.Union[bool, np.ndarray] = True,
doe_size: int = 100, obj_wgt: typing.Optional[typing.Union[float, list, np.ndarray]] = None,
use_std: typing.Union[bool, list] = False):
self.full_space = full_space
num_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(use_std, num_obj)
self._obj_wgt = _check_obj_wgt(obj_wgt, num_obj)
self._doe_size = None
self._base_doe = None
self.doe_size = doe_size
self.base_doe = base_doe
@property
def base_doe(self):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
return self._base_doe
@base_doe.setter
def base_doe(self, new_doe):
"""Base doe to use for the moment estimation
Don't set this to an array with truncnorm and lognormal distributions
in the MultiVariate if you don't know exactly what you are doing.
"""
# Sanity checks for base_doe. Using parameters with multiple valid types
# may be an antipattern but it makes configuration easier from
# the user point of view. Tolerate this for a better user experience.
if isinstance(new_doe, np.ndarray):
if self._is_valid_base(new_doe): # raises errors
self._base_doe = new_doe.copy() # Make our copy.
return
try:
make_base_doe = bool(new_doe)
except ValueError:
return
if make_base_doe:
# Prepare doe with zero mean and unit variance
doe = self.full_space.inp_space.sto_obj_base_doe(self.doe_size)
self._base_doe = doe
return
# if not bool(new_doe); remake new doe so set base_doe to None
self._base_doe = None
return
def _is_valid_base(self, new_doe):
# Assume numpy array
n_sto_obj_inps = len(self.full_space.inp_space.inds["sto_obj"])
if new_doe.shape[1] != n_sto_obj_inps:
msg = "base_doe must be one of None, bool or a 2d array "
msg += f"with shape (num_samples, num_stochastic_objective_variables={n_sto_obj_inps})."
raise TypeError(msg)
if max(abs(new_doe.mean(0).max()), abs(1 - new_doe.std(0).max())) > 0.5:
msg = "base_doe must have zero mean and unit variance."
raise ValueError(msg)
return True
@property
def doe_size(self):
"""Size of the base doe to use for the moment estimation"""
return self._doe_size
@doe_size.setter
def doe_size(self, new_size):
"""Size of the base doe to use for the moment estimation"""
self._doe_size = new_size
if self.base_doe is not None:
self.base_doe = new_size
@property
def obj_wgt(self):
"""Weights for the linear combination of cond. moments"""
return self._obj_wgt
@obj_wgt.setter
def obj_wgt(self, new_obj_wgt):
"""Weights for the linear combination of cond. moments"""
n_obj = len(self.full_space.obj_inds["sto"])
self._obj_wgt = _check_obj_wgt(new_obj_wgt, n_obj)
@property
def use_std(self):
"""Indexes to use std. dev. instead of variance"""
return self._use_std
@use_std.setter
def use_std(self, new_std):
"""Indexes to use std. dev. instead of variance"""
n_obj = len(self.full_space.obj_inds["sto"])
self._use_std = _check_std_inds(new_std, n_obj)
def gen_doe(self, x_opt):
"""Get DoE for the Moment estimation for x_opt"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
if self.base_doe is None:
return self.full_space.inp_space.sto_obj_doe(x_opt, self._doe_size)
mean, std = self.full_space.inp_space.opt_moms(x_opt)
names = self.full_space.inp_space.mulvar.names
names = [names[i] for i in self.full_space.inp_space.mv_inds("sto_obj")]
# Translating is not sufficient for lognormal and truncated normal
inds = [i for i, x in enumerate(names) if "log" in x or "trunc" in x]
if not inds:
return self.base_doe * std + mean
# Handle Lognormal
binds = np.ones(self.base_doe.shape[1], dtype=bool)
binds[inds] = False
base_doe = self.base_doe.copy()
base_doe[:, binds] = base_doe[:, binds] * std[binds] + mean[binds]
mean = mean[inds]
std = std[inds]
cur_mv = self.full_space.inp_space.opt_mulvar(x_opt, domain="sto_obj")
for ind in inds:
base_doe[:, ind] = cur_mv.dists[ind].marg.ppf(base_doe[:, ind])
return base_doe
def est_mom(self, x_opt):
""" Estimate conditional moments for a single optimization point x_opt
Conditional moments are E[Y | x_opt] and Var[Y | x_opt]
Parameters
----------
x_opt : numpy.ndarray
the coordinates of the optimization variables to compute
the moments
Returns
-------
mus : numpy.ndarray
Estimated means, or if obj_wgt was not None,
the combined mu + obj_wgt * sigma
sigmas : numpy.ndarray
Estimated variances or std. dev. depending on the settings.
only returned if obj_wgt is None.
"""
if x_opt.ndim == 1:
x_opt = x_opt.reshape((1, -1))
doe = self.gen_doe(x_opt)
res = self.full_space.sto_obj(doe, x_opt)
mus = np.mean(res, axis=0)
sigmas = np.zeros(mus.shape)
std_inds = self.use_std
sigmas[std_inds] = np.std(res[:, std_inds], axis=0, ddof=1)
var_inds = np.logical_not(std_inds)
sigmas[var_inds] = np.var(res[:, var_inds], axis=0, ddof=1)
if self.obj_wgt is None:
return mus, sigmas
return mus + self.obj_wgt * sigmas
class CondProba:
"""A chain of integtrators for the calculation of the probability
This starts with a fast integrator to get an initial guess. If the
guess is too far away from target_pf, this stops further calculations
and returns the failure probability. Used for accelerating the
optimization process. Chains with a single element are also possible.
Parameters
----------
num_inputs : int
Number of stochastic inputs used for the constraints
target_fail_prob : float
Target failure probability. If unsure, just set it sufficiently low
i.e. >=1e-6. Note that Numerical unstabilities start at 1e-9 due to
scipy stats returning nans and infs
num_parallel : int
Number of parallel computations, if the used integrator supports it.
If passed, the entry in call_args will override this.
methods : None or list of str
Names of the methods to use for the estimation. If None, a default
chain will be selected depending the problem definition, which is
recommended for new users.
Currently the following names are supported:
MC - Crude Monte Carlo
DS - Directional simulation
FORM - First order reliability method
ISPUD - Importance sampling using design point (MPP)
call_args : None or list
keyword argument dict to pass to the integrator calc_prob_fail
as call arguments. Any argument in this will override the
initialization arguments with the same name i.e. target_fp and
num_parallel
target_tol : float
Target tolerance for the failure probability. Also used
for stopping the chain, if the computed failure probability
is either smaller than target_fp * target_tol or larger than
target_fp / target_tol.
"""
def __init__(self, target_fail_prob: float, num_inputs: int, num_parallel: int = 4,
methods: typing.Optional[typing.Union[str, list]] = None, call_args: typing.Optional[dict] = None,
target_tol: float = 0.01):
self.n_inp = num_inputs
num_para = _n_para_chk(num_parallel)
cargs = {"num_parallel": num_para, "multi_region": True}
if methods is None:
methods, cargs = _default_init(target_fail_prob, target_tol,
num_inputs, num_para)
if call_args is None:
self.call_args = {**cargs}
else:
self.call_args = {**cargs, **call_args}
self._tar_fp = target_fail_prob
self._tar_tol = target_tol
self.workers = _make_chain(methods)
self._prob_tol()
if "doe" in self.call_args.keys():
doe = self.call_args["doe"]
if doe.shape[1] != self.n_inp:
msg = f"Shape mismatch between the number of inputs ({self.n_inp}) "
msg += f"and the DoE {doe.shape[1]}"
raise ValueError()
mu_max = np.max(np.mean(doe, axis=0))
sig_max = np.max(np.std(doe, axis=0))
if abs(mu_max) > 1e-10 or abs(sig_max - 1) > 1e-10:
msg = "Zero mean and unit variance is required for doe "
msg += "in call_args, found mean == {mu_max} and "
msg += "sigma == {sig_max} columns"
raise ValueError(msg)
elif _is_worker(self.workers, "ISPUD"):
margs = [stats.norm() for k in range(self.n_inp)]
self.call_args["doe"] = make_doe(100, margs, num_tries=1000)
self.call_args["post_proc"] = False
self.call_args["num_parallel"] = num_para
@property
def target_fail_prob(self):
"""target failure probability"""
return self._tar_fp
@target_fail_prob.setter
def target_fail_prob(self, new_fp):
"""target failure probability"""
if new_fp <= 0 or new_fp > 0.9:
msg = "Target failure probability should lie in the interval (0,0.9]"
raise ValueError(msg)
self._tar_fp = new_fp
self._prob_tol()
@property
def target_tol(self):
"""Target accuracy for failure probability estimation"""
return self._tar_tol
@target_tol.setter
def target_tol(self, new_tol):
"""Target accuracy for failure probability estimation"""
if new_tol <= 0 or new_tol > 0.9:
msg = "Target probability accuracy should lie in the interval (0,0.9]"
raise ValueError(msg)
self._tar_tol = new_tol
self._prob_tol()
def _prob_tol(self):
prob_tol = self._tar_fp * self._tar_tol
if _is_worker(self.workers, "MC") and prob_tol < 1e-6:
msg = "Crude Monte Carlo can be very inefficient for "
msg += "such low probabilities of failure."
warnings.warn(msg)
self.call_args["prob_tol"] = prob_tol
def calc_fail_prob(self, input_mv, constraints, const_args, verbose: int = 0):
""" Calculate failure probability using the worker chain
Parameters
----------
input_mv : MultiVar instance
Definition of the multivariate input
constraints : list
constraint functions to initialize the integrator
const_args : None or list
arguments to pass to the constraints
Returns:
--------
pof : float
probability of failure
feasible : bool
pof <= target_pf
"""
if not self.workers:
raise ValueError("No estimators defined")
for worker in self.workers:
estimator = worker(input_mv, constraints, const_args)
try:
pof = estimator.calc_fail_prob(**self.call_args)[0]
except ValueError:
if worker == self.workers[-1]:
print("Fatal error while calculating probability of failure with", worker)
print(input_mv)
print("Setting it to 100%.")
pof = 1.
continue
if verbose > 1:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
if pof > self._tar_fp:
prob_ratio = self._tar_fp / pof
else:
prob_ratio = pof / self._tar_fp
if prob_ratio <= self._tar_tol:
break
if verbose > 0:
try:
name = read_integrator_name(worker)
print(f"{name} estimated the failure probability as {pof:.2e}.")
except NameError:
pass
return pof, pof <= self._tar_fp
| 35.920078
| 115
| 0.61475
| 14,279
| 0.774896
| 0
| 0
| 3,354
| 0.182016
| 0
| 0
| 8,433
| 0.457644
|
580d37ef443f31d16e61142142999c038e7fd18f
| 5,352
|
py
|
Python
|
mymodule/twitter_json_parsing.py
|
sobkovych/TwitterFriendsMap
|
7fb1a844264334fba443feba3830cca2c86b55c9
|
[
"MIT"
] | null | null | null |
mymodule/twitter_json_parsing.py
|
sobkovych/TwitterFriendsMap
|
7fb1a844264334fba443feba3830cca2c86b55c9
|
[
"MIT"
] | null | null | null |
mymodule/twitter_json_parsing.py
|
sobkovych/TwitterFriendsMap
|
7fb1a844264334fba443feba3830cca2c86b55c9
|
[
"MIT"
] | 1
|
2020-02-26T09:20:17.000Z
|
2020-02-26T09:20:17.000Z
|
"""Parse json files."""
import json
import re
def search_for_key(final_key: str, tree: dict, space: list = []):
"""Search all data for a key.
:param final_key: the key
:param tree: the data
:param space: found values
:return: all found values
"""
if isinstance(tree, dict) and final_key in tree.keys():
space.append(tree[final_key])
tree.pop(final_key)
if isinstance(tree, dict):
for key in tree:
search_for_key(final_key, tree[key])
elif isinstance(tree, list):
for item in tree:
search_for_key(final_key, item)
else:
return None
return space
def check_response(prompt: str, to_return: bool = False,
field: (tuple, None) = ({"yes", "y", "true", "t", "1"},
{"no", "n", "false", "f", "0"}),
expression: str = None,
max_len: int = None,
min_len: int = None) -> (bool, str):
"""Check responce by params.
:param prompt: input message
:param to_return: whether to return responce
:param field: values to avoid/look for
:param expression: regular expr check
:param max_len: max len check
:param min_len: min len check
:return: bool or value
"""
if field:
affirm = field[0] if field[0] else None
negat = field[1] if field[1] else None
else:
affirm = negat = None
while True:
resp = input(prompt).lower()
ret_value = resp if to_return else True
if affirm and resp in affirm:
return ret_value
if negat and resp in negat:
return False
if expression:
print(re.compile(expression))
if expression and re.fullmatch(expression, resp):
return ret_value
if min_len and len(resp) >= min_len:
return ret_value
if max_len and len(resp) <= max_len:
return ret_value
else:
print("The response is incorrect, try again!")
def get_step_by_step(obj):
"""Parse obj step by step.
:param obj: list, dict or other
:return: found value or None
"""
space = [(obj, "JSON")]
unsure = check_response("Ask to come back at every step?\n")
while True:
if isinstance(obj, dict):
print("This obj is a dict. These are the available keys:")
fill_len = len(max(obj.keys(), key=len)) + 10
for i, key in enumerate(obj):
if i % 2 == 0:
row = "{}.){}".format(i+1, key)
row = row.ljust(fill_len, " ")
else:
row = "{}.){}\n".format(i+1, key)
print(row, end='')
key = check_response("\nChose your key by name: ",
True, field=(obj, None))
obj = obj[key]
elif isinstance(obj, list):
print("This obj is a list.")
last_key = len(obj)-1
key = check_response(
"Choose an index from 0 to {}: ".format(last_key),
to_return=True,
field=({str(i) for i in range(last_key+1)}, None)
)
obj = obj[int(key)]
else:
print("Your final obj is: {}.".format(obj))
if check_response("Return: {} (y/n)?\n".format(obj)):
return obj
elif check_response("Come back to any step?\n"):
for i, step in enumerate(space):
print("Step {}: {}".format(i+1, step[1]))
l_space = len(space)
step = check_response("Which step to come back to "
"within range "
"[1, {}]?\n".format(l_space),
to_return=True,
field=(
{str(i+1) for i in range(l_space)},
None
))
step = int(step)
obj = space[step-1][0]
del space[step:]
continue
else:
print("Returning None...")
return None
space.append((obj, key))
if unsure:
while (len(space) > 1 and
check_response("Come back to previous step(y/n)?\n")):
space.pop()
obj = space[-1][0]
print("Now at step {}: {}".format(len(space), space[-1][1]))
def main(get: str, store: str = None, mode: str = "step"):
"""Find the leaf(user input) in the tree(method - user input).
(from 'kved.json' file)
:param store: where to store the result tree.
"""
with open(get, encoding="utf-8") as f:
tree = json.load(f)
if check_response("Analyse step by step(y/n)?\n"):
print(get_step_by_step(tree))
if check_response("Search for key(y/n)?\n"):
user_key = input("Enter your key: ")
print(search_for_key(user_key, tree=tree))
if store:
with open(store, mode="w+", encoding="utf-8") as outfile:
json.dump(tree, outfile, indent=4, ensure_ascii=False)
if __name__ == "__main__":
main("form.json")
| 32.047904
| 77
| 0.496076
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,340
| 0.250374
|
580d445ca9f82fbb66ddc5c165290139ca728a53
| 2,795
|
py
|
Python
|
meet/migrations/0001_initial.py
|
bjones-tech/speedy-meety
|
a7d557788a544b69fd6ad454d921d9cf02cfa636
|
[
"MIT"
] | null | null | null |
meet/migrations/0001_initial.py
|
bjones-tech/speedy-meety
|
a7d557788a544b69fd6ad454d921d9cf02cfa636
|
[
"MIT"
] | null | null | null |
meet/migrations/0001_initial.py
|
bjones-tech/speedy-meety
|
a7d557788a544b69fd6ad454d921d9cf02cfa636
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-17 02:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import meet.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Caller',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='none', max_length=200)),
('session_id', models.CharField(default='none', max_length=200)),
],
),
migrations.CreateModel(
name='Meeting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room_name', models.CharField(default='none', max_length=200)),
('room_id', models.CharField(default='none', max_length=200)),
('voice_id', models.CharField(default=meet.models.get_voice_id, max_length=200)),
('voice_used', models.BooleanField(default=False)),
('state', models.IntegerField(choices=[(0, 'Staged'), (1, 'In Progress'), (2, 'Completed')], default=0)),
('length', models.IntegerField(default=0)),
('topic_time_limit', models.IntegerField(default=0)),
('queue_next_topic', models.BooleanField(default=False)),
('complete_id', models.CharField(default='none', max_length=200)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='none', max_length=200)),
('message_id', models.CharField(default='none', max_length=200)),
('time_left', models.IntegerField(default=0)),
('recording', models.BooleanField(default=False)),
('transcription', models.TextField(blank=True, null=True)),
('meeting', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='meet.Meeting')),
],
),
migrations.AddField(
model_name='meeting',
name='current_topic',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='meet.Topic'),
),
migrations.AddField(
model_name='caller',
name='meeting',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='meet.Meeting'),
),
]
| 43.671875
| 131
| 0.586047
| 2,586
| 0.925224
| 0
| 0
| 0
| 0
| 0
| 0
| 464
| 0.166011
|
580de9ae168cc442b87908dac6e8235e1d9361f3
| 284
|
py
|
Python
|
setup.py
|
jrspruitt/pyfa_gpio
|
d0f189724b34a2a888dd01b33d237b79ace5becf
|
[
"MIT"
] | null | null | null |
setup.py
|
jrspruitt/pyfa_gpio
|
d0f189724b34a2a888dd01b33d237b79ace5becf
|
[
"MIT"
] | null | null | null |
setup.py
|
jrspruitt/pyfa_gpio
|
d0f189724b34a2a888dd01b33d237b79ace5becf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup,find_packages
version = '0.1'
setup(
name='pyfa_gpio',
version=version,
description='',
author='Jason Pruitt',
url='https://github.com/jrspruitt/pyfa_gpio',
license='MIT',
packages = find_packages(),
)
| 17.75
| 49
| 0.661972
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.34507
|
580ec4cbc90960d845dfc3bbcd5951593510c1c2
| 4,093
|
py
|
Python
|
dps/env/basic/path_discovery.py
|
alcinos/dps
|
5467db1216e9f9089376d2c71f524ced2382e4f6
|
[
"Apache-2.0"
] | null | null | null |
dps/env/basic/path_discovery.py
|
alcinos/dps
|
5467db1216e9f9089376d2c71f524ced2382e4f6
|
[
"Apache-2.0"
] | null | null | null |
dps/env/basic/path_discovery.py
|
alcinos/dps
|
5467db1216e9f9089376d2c71f524ced2382e4f6
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import numpy as np
from dps.register import RegisterBank
from dps.env import TensorFlowEnv
from dps.utils import Param, Config
def build_env():
return PathDiscovery()
config = Config(
build_env=build_env,
curriculum=[
dict(shape=(2, 2), threshold=6),
dict(shape=(3, 3), threshold=4),
dict(shape=(4, 4), threshold=2)
],
env_name='path_discovery',
shape=(3, 3),
T=10,
stopping_criteria="reward_per_ep,max",
)
class PathDiscovery(TensorFlowEnv):
""" The top-left cell stored an integers which says which of the other 3 corners is the rewarding corner.
Agents use the "look" to see which integer is present at the current cell.
"""
T = Param()
shape = Param()
n_val = Param()
require_discovery = Param(True)
def __init__(self, **kwargs):
self.action_names = '^ > v < look'.split()
self.action_shape = (len(self.action_names),)
self.rb = RegisterBank('PathDiscoveryRB', 'x y vision action', 'discovered',
[0.0, 0.0, -1.0, 0.0, 0.0], 'x y')
self.val_input = self._make_input(self.n_val)
self.test_input = self._make_input(self.n_val)
super(PathDiscovery, self).__init__()
def _make_input(self, batch_size):
start_x = np.random.randint(self.shape[0], size=(batch_size, 1))
start_y = np.random.randint(self.shape[1], size=(batch_size, 1))
grid = np.random.randint(3, size=(batch_size, np.product(self.shape)))
return np.concatenate([start_x, start_y, grid], axis=1).astype('f')
def _build_placeholders(self):
self.input = tf.placeholder(tf.float32, (None, 2+np.product(self.shape)))
def _make_feed_dict(self, n_rollouts, T, mode):
if mode == 'train':
inp = self._make_input(n_rollouts)
elif mode == 'val':
inp = self.val_input
elif mode == 'test':
inp = self.test_input
else:
raise Exception("Unknown mode: {}.".format(mode))
if n_rollouts is not None:
inp = inp[:n_rollouts, :]
return {self.input: inp}
def build_init(self, r):
return self.rb.wrap(x=self.input[:, 0:1], y=self.input[:, 1:2],
vision=r[:, 2:3], action=r[:, 3:4], discovered=r[:, 4:5])
def build_step(self, t, r, actions):
x, y, vision, action, discovered = self.rb.as_tuple(r)
up, right, down, left, look = tf.split(actions, 5, axis=1)
new_y = (1 - down - up) * y + down * (y+1) + up * (y-1)
new_x = (1 - right - left) * x + right * (x+1) + left * (x-1)
new_y = tf.clip_by_value(new_y, 0.0, self.shape[0]-1)
new_x = tf.clip_by_value(new_x, 0.0, self.shape[1]-1)
idx = tf.cast(y * self.shape[1] + x, tf.int32)
new_vision = tf.reduce_sum(
tf.one_hot(tf.reshape(idx, (-1,)), np.product(self.shape)) * self.input[:, 2:],
axis=1, keepdims=True)
vision = (1 - look) * vision + look * new_vision
action = tf.cast(tf.reshape(tf.argmax(actions, axis=1), (-1, 1)), tf.float32)
top_left = tf.cast(tf.equal(idx, 0), tf.float32)
discovered = discovered + look * top_left
discovered = tf.minimum(discovered, 1.0)
new_registers = self.rb.wrap(x=new_x, y=new_y, vision=vision, action=action, discovered=discovered)
top_right = tf.cast(tf.equal(idx, self.shape[1]-1), tf.float32)
bottom_left = tf.cast(tf.equal(idx, (self.shape[0]-1) * self.shape[1]), tf.float32)
bottom_right = tf.cast(tf.equal(idx, self.shape[0] * self.shape[1] - 1), tf.float32)
reward = (
top_right * tf.cast(tf.equal(self.input[:, 2:3], 0), tf.float32) +
bottom_left * tf.cast(tf.equal(self.input[:, 2:3], 1), tf.float32) +
bottom_right * tf.cast(tf.equal(self.input[:, 2:3], 2), tf.float32)
)
if self.require_discovery:
reward = reward * discovered
return tf.fill((tf.shape(r)[0], 1), 0.0), reward, new_registers
| 36.221239
| 109
| 0.590765
| 3,598
| 0.879062
| 0
| 0
| 0
| 0
| 0
| 0
| 339
| 0.082824
|
5810e3bb40adfc4d345436082de3af836eeff704
| 14,812
|
py
|
Python
|
utils/github/query.py
|
malkfilipp/ClickHouse
|
79a206b092cd465731020f331bc41f6951dbe751
|
[
"Apache-2.0"
] | 1
|
2019-09-16T11:07:32.000Z
|
2019-09-16T11:07:32.000Z
|
utils/github/query.py
|
malkfilipp/ClickHouse
|
79a206b092cd465731020f331bc41f6951dbe751
|
[
"Apache-2.0"
] | null | null | null |
utils/github/query.py
|
malkfilipp/ClickHouse
|
79a206b092cd465731020f331bc41f6951dbe751
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import requests
class Query:
'''Implements queries to the Github API using GraphQL
'''
def __init__(self, token, max_page_size=100, min_page_size=5):
self._token = token
self._max_page_size = max_page_size
self._min_page_size = min_page_size
self.api_costs = {}
_MEMBERS = '''
organization(login: "{organization}") {{
team(slug: "{team}") {{
members(first: {max_page_size} {next}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
login
}}
}}
}}
}}
'''
def get_members(self, organization, team):
'''Get all team members for organization
Returns:
logins: a list of members' logins
'''
logins = []
not_end = True
query = Query._MEMBERS.format(organization=organization,
team=team,
max_page_size=self._max_page_size,
next='')
while not_end:
result = self._run(query)['organization']['team']
if result is None:
break
result = result['members']
not_end = result['pageInfo']['hasNextPage']
query = Query._MEMBERS.format(organization=organization,
team=team,
max_page_size=self._max_page_size,
next=f'after: "{result["pageInfo"]["endCursor"]}"')
logins += [node['login'] for node in result['nodes']]
return logins
_LABELS = '''
repository(owner: "yandex" name: "ClickHouse") {{
pullRequest(number: {number}) {{
labels(first: {max_page_size} {next}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
name
color
}}
}}
}}
}}
'''
def get_labels(self, pull_request):
'''Fetchs all labels for given pull-request
Args:
pull_request: JSON object returned by `get_pull_requests()`
Returns:
labels: a list of JSON nodes with the name and color fields
'''
labels = [label for label in pull_request['labels']['nodes']]
not_end = pull_request['labels']['pageInfo']['hasNextPage']
query = Query._LABELS.format(number = pull_request['number'],
max_page_size = self._max_page_size,
next=f'after: "{pull_request["labels"]["pageInfo"]["endCursor"]}"')
while not_end:
result = self._run(query)['repository']['pullRequest']['labels']
not_end = result['pageInfo']['hasNextPage']
query = Query._LABELS.format(number=pull_request['number'],
max_page_size=self._max_page_size,
next=f'after: "{result["pageInfo"]["endCursor"]}"')
labels += [label for label in result['nodes']]
return labels
_TIMELINE = '''
repository(owner: "yandex" name: "ClickHouse") {{
pullRequest(number: {number}) {{
timeline(first: {max_page_size} {next}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
... on CrossReferencedEvent {{
isCrossRepository
source {{
... on PullRequest {{
number
baseRefName
merged
labels(first: {max_page_size}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
name
color
}}
}}
}}
}}
target {{
... on PullRequest {{
number
}}
}}
}}
}}
}}
}}
}}
'''
def get_timeline(self, pull_request):
'''Fetchs all cross-reference events from pull-request's timeline
Args:
pull_request: JSON object returned by `get_pull_requests()`
Returns:
events: a list of JSON nodes for CrossReferenceEvent
'''
events = [event for event in pull_request['timeline']['nodes'] if event and event['source']]
not_end = pull_request['timeline']['pageInfo']['hasNextPage']
query = Query._TIMELINE.format(number = pull_request['number'],
max_page_size = self._max_page_size,
next=f'after: "{pull_request["timeline"]["pageInfo"]["endCursor"]}"')
while not_end:
result = self._run(query)['repository']['pullRequest']['timeline']
not_end = result['pageInfo']['hasNextPage']
query = Query._TIMELINE.format(number=pull_request['number'],
max_page_size=self._max_page_size,
next=f'after: "{result["pageInfo"]["endCursor"]}"')
events += [event for event in result['nodes'] if event and event['source']]
return events
_PULL_REQUESTS = '''
repository(owner: "yandex" name: "ClickHouse") {{
defaultBranchRef {{
name
target {{
... on Commit {{
history(first: {max_page_size} {next}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
oid
associatedPullRequests(first: {min_page_size}) {{
totalCount
nodes {{
... on PullRequest {{
number
author {{
login
}}
mergedBy {{
login
}}
url
baseRefName
baseRepository {{
nameWithOwner
}}
mergeCommit {{
oid
}}
labels(first: {min_page_size}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
name
color
}}
}}
timeline(first: {min_page_size}) {{
pageInfo {{
hasNextPage
endCursor
}}
nodes {{
... on CrossReferencedEvent {{
isCrossRepository
source {{
... on PullRequest {{
number
baseRefName
merged
labels(first: 0) {{
nodes {{
name
}}
}}
}}
}}
target {{
... on PullRequest {{
number
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
'''
def get_pull_requests(self, before_commit, login):
'''Get all merged pull-requests from the HEAD of default branch to the last commit (excluding)
Args:
before_commit (string-convertable): commit sha of the last commit (excluding)
login (string): filter pull-requests by user login
Returns:
pull_requests: a list of JSON nodes with pull-requests' details
'''
pull_requests = []
not_end = True
query = Query._PULL_REQUESTS.format(max_page_size=self._max_page_size,
min_page_size=self._min_page_size,
next='')
while not_end:
result = self._run(query)['repository']['defaultBranchRef']
default_branch_name = result['name']
result = result['target']['history']
not_end = result['pageInfo']['hasNextPage']
query = Query._PULL_REQUESTS.format(max_page_size=self._max_page_size,
min_page_size=self._min_page_size,
next=f'after: "{result["pageInfo"]["endCursor"]}"')
for commit in result['nodes']:
if str(commit['oid']) == str(before_commit):
not_end = False
break
# TODO: fetch all pull-requests that were merged in a single commit.
assert commit['associatedPullRequests']['totalCount'] <= self._min_page_size, \
f'there are {commit["associatedPullRequests"]["totalCount"]} pull-requests merged in commit {commit["oid"]}'
for pull_request in commit['associatedPullRequests']['nodes']:
if(pull_request['baseRepository']['nameWithOwner'] == 'yandex/ClickHouse' and
pull_request['baseRefName'] == default_branch_name and
pull_request['mergeCommit']['oid'] == commit['oid'] and
(not login or pull_request['author']['login'] == login)):
pull_requests.append(pull_request)
return pull_requests
_DEFAULT = '''
repository(owner: "yandex", name: "ClickHouse") {
defaultBranchRef {
name
}
}
'''
def get_default_branch(self):
'''Get short name of the default branch
Returns:
name (string): branch name
'''
return self._run(Query._DEFAULT)['repository']['defaultBranchRef']['name']
def _run(self, query):
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
headers = {'Authorization': f'bearer {self._token}'}
query = f'''
{{
{query}
rateLimit {{
cost
remaining
}}
}}
'''
request = requests_retry_session().post('https://api.github.com/graphql', json={'query': query}, headers=headers)
if request.status_code == 200:
result = request.json()
if 'errors' in result:
raise Exception(f'Errors occured: {result["errors"]}')
import inspect
caller = inspect.getouterframes(inspect.currentframe(), 2)[1][3]
if caller not in self.api_costs.keys():
self.api_costs[caller] = 0
self.api_costs[caller] += result['data']['rateLimit']['cost']
return result['data']
else:
import json
raise Exception(f'Query failed with code {request.status_code}:\n{json.dumps(request.json(), indent=4)}')
| 41.96034
| 128
| 0.369498
| 14,768
| 0.997029
| 0
| 0
| 0
| 0
| 0
| 0
| 9,426
| 0.636376
|
5811d6d7e749badbaa3acffda48486b057d48a0e
| 4,404
|
py
|
Python
|
mortgage/mortgage.py
|
roelbertens/mortgages
|
b5fe415024933c772e6c7c57f041bf065ac86176
|
[
"MIT"
] | 1
|
2019-08-19T07:09:58.000Z
|
2019-08-19T07:09:58.000Z
|
mortgage/mortgage.py
|
roelbertens/mortgages
|
b5fe415024933c772e6c7c57f041bf065ac86176
|
[
"MIT"
] | null | null | null |
mortgage/mortgage.py
|
roelbertens/mortgages
|
b5fe415024933c772e6c7c57f041bf065ac86176
|
[
"MIT"
] | null | null | null |
from typing import List
import matplotlib.pyplot as plt
class Mortgage:
"""
A mortgage overview of the total burden (incl. interest) and the monthly fees per fixed period
"""
def __init__(self, mortgage_amount, burden, periods, monthly_fees, name):
self.mortgage_amount = int(mortgage_amount)
self.burden = int(burden)
self.periods = periods.copy()
self.monthly_fees = [int(fee) for fee in monthly_fees]
self.name = name
def __add__(self, other):
if not other:
return self
mortgage_amount = self.mortgage_amount + other.mortgage_amount
burden = self.burden + other.burden
periods, monthly_fees = _align_mortgages(periods_a=self.periods,
periods_b=other.periods,
fees_a=self.monthly_fees,
fees_b=other.monthly_fees)
name = self.name
if other.name != self.name:
name += ' & ' + other.name
return Mortgage(mortgage_amount=mortgage_amount,
burden=burden,
periods=periods,
monthly_fees=monthly_fees,
name=name)
def __radd__(self, other):
return self + other
def __repr__(self):
text = (f'{self.name}: {format(self.mortgage_amount, ",d")} euro\n'
f'Total burden: {format(self.burden, ",d")} euro\n'
'Monthly fees:\n')
for period, fee in zip(self.periods, self.monthly_fees):
text += f'- {period} months: {fee} euro\'s\n'
return text
def plot(self, axes=None) -> plt.axes:
if axes is None:
fig, axes = plt.subplots(2, 1, figsize=(5, 8))
nr_periods = len(self.periods)
axes[0].bar(x=range(nr_periods), height=self.monthly_fees, tick_label=self.periods,
color='darkblue')
axes[0].set_xlabel('Period (months)')
axes[0].set_ylabel('Monthly fee\n', color='darkblue')
axes[0].set_title(f'Subsequent monthly fees\nover the specified periods\n\n{self}\n')
axes[1].bar(x=[0, 1], height=[self.mortgage_amount, self.burden], color='purple')
axes[1].set_ylabel('\nAmount (euro)', color='purple')
axes[1].set_xlabel('')
axes[1].set_xticks([0, 1])
axes[1].set_xticklabels([f'Mortgage\n{format(self.mortgage_amount, ",d")}',
f'Total burden\n{format(self.burden, ",d")}'])
plt.tight_layout()
return axes
def compare(self, others: list) -> plt.axes:
mortgages = [self] + others
nr_mortgages = len(mortgages)
fig, axes = plt.subplots(2, nr_mortgages, figsize=(nr_mortgages * 3, 8), sharey='row')
for col_axes, mortgage in zip(axes.T, mortgages):
mortgage.plot(axes=col_axes)
plt.tight_layout()
return axes
def _align_mortgages(periods_a: List[int],
periods_b: List[int],
fees_a: List[int],
fees_b: List[int]) -> (List[int], List[int]):
""" Align periods and fees of two mortgages and compute the exact fee for each period.
:param periods_a: periods for Mortgage a
:param periods_b: periods for Mortgage b
:param fees_a: monthly fees for Mortgage a
:param fees_b: monthly fees for Mortgage b
:return: tuple of aligned periods and fees for the combined Mortgages a and b
"""
periods_a, periods_b, fees_a, fees_b = \
periods_a.copy(), periods_b.copy(), fees_a.copy(), fees_b.copy()
if not periods_a:
if not periods_b:
return [], []
else:
return periods_b, fees_b
elif not periods_b:
return periods_a, fees_a
if periods_b[0] < periods_a[0]:
periods_a, periods_b = periods_b, periods_a
fees_a, fees_b = fees_b, fees_a
first_period_fee = ([periods_a[0]], [fees_a[0] + fees_b[0]])
if periods_a[0] == periods_b[0]:
recursive_result = _align_mortgages(periods_a[1:], periods_b[1:], fees_a[1:], fees_b[1:])
else:
periods_b[0] -= periods_a[0]
recursive_result = _align_mortgages(periods_a[1:], periods_b, fees_a[1:], fees_b)
return tuple(a + b for a, b in zip(first_period_fee, recursive_result))
| 38.631579
| 98
| 0.584923
| 2,937
| 0.666894
| 0
| 0
| 0
| 0
| 0
| 0
| 891
| 0.202316
|
58127a028ca7d4bb09bc84dec02f9d31b1e190c3
| 32,827
|
py
|
Python
|
training/wml_train.py
|
corvy/MAX-Object-Detector
|
2a21183e6bb9d0c35bac297ee3cf1fc67f4c048f
|
[
"Apache-2.0"
] | 1
|
2019-10-25T11:36:46.000Z
|
2019-10-25T11:36:46.000Z
|
training/wml_train.py
|
karankrish/MAX-Image-Segmenter
|
2d5d080f4a3d7db1aa4cf320ab35b3e157a6f485
|
[
"Apache-2.0"
] | 1
|
2019-07-08T17:58:45.000Z
|
2019-09-05T18:07:45.000Z
|
training/wml_train.py
|
karankrish/MAX-Image-Segmenter
|
2d5d080f4a3d7db1aa4cf320ab35b3e157a6f485
|
[
"Apache-2.0"
] | 1
|
2019-10-30T20:42:46.000Z
|
2019-10-30T20:42:46.000Z
|
#!/usr/bin/env python
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import re
import shutil
import sys
import tarfile
import time
from enum import Enum
from zipfile import ZipFile
from utils.debug import debug
from utils.os_util import copy_dir
from utils.config import YAMLReader, ConfigParseError, ConfigurationError
from utils.wml import WMLWrapper, WMLWrapperError
from utils.cos import COSWrapper, COSWrapperError, BucketNotFoundError
class ExitCode(Enum):
"""
Defines the exit codes for this utility
"""
SUCCESS = 0
INCORRECT_INVOCATION = 1
ENV_ERROR = 2
CONFIGURATION_ERROR = 3
PRE_PROCESSING_FAILED = 4
TRAINING_FAILED = 5
DOWNLOAD_FAILED = 6
EXTRACTION_FAILED = 7
COPY_FAILED = 8
TRAINING_LOG_NAME = 'training-log.txt' # fixed; do not change
TRAINING_OUTPUT_ARCHIVE_NAME = 'model_training_output.tar.gz' # do not change
def print_banner(message):
print('# --------------------------------------------------------')
print('# {}'.format(message))
print('# --------------------------------------------------------')
# --------------------------------------------------------
# Process command line parameters
# --------------------------------------------------------
def process_cmd_parameters():
"""
Process command line parameters. This function terminates the
application if an invocation error was detected.
:returns: dict, containing two properties: 'config_file' and
'command'
:rtype: dict
"""
def display_usage():
print('--------------------------------------------------------'
'--------------------------------------------')
print('Train a MAX model using Watson Machine Learning. ')
print('\nUsage: {} <training_config_file> <command> \n'
.format(sys.argv[0]))
print('Valid commands:')
print(' clean '
'removes local model training artifacts')
print(' prepare '
'generates model training artifacts but skips model training')
print(' train '
'generates model training artifacts and trains the model')
print(' package '
'generates model training artifacts, trains the model, and '
'performs post processing')
print(' package <training_id> '
'monitors the training status and performs post processing')
print('--------------------------------------------------------'
'--------------------------------------------')
if len(sys.argv) <= 1:
# no arguments were provided; display usage information
display_usage()
sys.exit(ExitCode.SUCCESS.value)
if os.path.isfile(sys.argv[1]) is False:
print('Invocation error. "{}" is not a file.'.format(sys.argv[1]))
display_usage()
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
if len(sys.argv) < 3:
print('Invocation error. You must specify a command.')
display_usage()
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
cmd_parameters = {
'config_file': sys.argv[1],
'command': sys.argv[2].strip().lower(),
'training_id': None
}
if cmd_parameters['command'] not in ['clean',
'prepare',
'train',
'package']:
print('Invocation error. "{}" is not a valid command.'
.format(sys.argv[2]))
display_usage()
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
if cmd_parameters['command'] == 'package':
# package accepts as optional parameter an existing training id
if len(sys.argv) == 4:
cmd_parameters['training_id'] = sys.argv[3]
return cmd_parameters
cmd_parameters = process_cmd_parameters()
# --------------------------------------------------------
# Verify that the required environment variables are set
# --------------------------------------------------------
def verify_env_settings():
print_banner('Checking environment variables ...')
var_missing = False
# WML environment variables
for var_name in ['ML_ENV', 'ML_APIKEY', 'ML_INSTANCE']:
if os.environ.get(var_name) is None:
print(' Error. Environment variable {} is not defined.'
.format(var_name))
var_missing = True
# Cloud Object Storage environment variables
for var_name in ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']:
if os.environ.get(var_name) is None:
print(' Error. Environment variable {} is not defined.'
.format(var_name))
var_missing = True
if var_missing:
sys.exit(ExitCode.ENV_ERROR.value)
verify_env_settings()
# --------------------------------------------------------
# Process configuration file
# --------------------------------------------------------
print_banner('Validating configuration file "{}" ...'
.format(cmd_parameters['config_file']))
config = None
try:
r = YAMLReader(cmd_parameters['config_file'])
config = r.read()
except ConfigurationError as ce:
for missing_setting in ce.get_missing_settings():
print('Error. Configuration file "{}" does not'
' define setting "{}".'
.format(cmd_parameters['config_file'],
missing_setting.get('yaml_path')))
sys.exit(ExitCode.CONFIGURATION_ERROR.value)
except ConfigParseError as cpe:
print('Error. Configuration file "{}" is invalid: {}'
.format(cmd_parameters['config_file'],
str(cpe)))
sys.exit(ExitCode.CONFIGURATION_ERROR.value)
except FileNotFoundError:
print('Error. Configuration file "{}" was not found.'
.format(cmd_parameters['config_file']))
sys.exit(ExitCode.INVOCATION_ERROR.value)
debug('Using the following configuration settings: ', config)
cw = None # COS wrapper handle
w = None # WML wrapper handle
training_guid = cmd_parameters.get('training_id', None)
if cmd_parameters['command'] == 'package' and training_guid is not None:
# monitor status of an existing training run; skip preparation steps
try:
# instantiate Cloud Object Storage wrapper
cw = COSWrapper(os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'])
except COSWrapperError as cwe:
print('Error. Cloud Object Storage preparation failed: {}'.format(cwe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
print_banner('Verifying that "{}" is a valid training id ...'
.format(training_guid))
try:
# instantiate Watson Machine Learning wrapper
w = WMLWrapper(os.environ['ML_ENV'],
os.environ['ML_APIKEY'],
os.environ['ML_INSTANCE'])
# verify that the provided training id is valid
if not w.is_known_training_id(training_guid):
print('Error. "{}" is an unknown training id.'
.format(training_guid))
sys.exit(ExitCode.INCORRECT_INVOCATION.value)
except WMLWrapperError as wmle:
print(wmle)
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except Exception as ex:
print(' Exception type: {}'.format(type(ex)))
print(' Exception: {}'.format(ex))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
else:
# --------------------------------------------------------
# Remove existing model training artifacts
# --------------------------------------------------------
print_banner('Removing temporary work files ...')
for file in [config['model_code_archive']]:
if os.path.isfile(file):
os.remove(file)
# terminate if the "clean" command was specified
# when the utility was invoked
if cmd_parameters['command'] == 'clean':
print('Skipping model training.')
sys.exit(ExitCode.SUCCESS.value)
# --------------------------------------------------------
# Verify the Cloud Object Storage configuration:
# - the results bucket must exist
# --------------------------------------------------------
print_banner('Verifying Cloud Object Storage setup ...')
try:
# instantiate the Cloud Object Storage wrapper
cw = COSWrapper(os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'])
print(' Verifying that training results bucket "{}" exists. '
' It will be created if necessary ...'
.format(config['results_bucket']))
# make sure the training results bucket exists;
# it can be empty, but doesn't have to be
cw.create_bucket(config['results_bucket'],
exist_ok=True)
print(' Verifying that training data bucket "{}" exists. '
' It will be created if necessary ...'
.format(config['training_bucket']))
# make sure the training data bucket exists;
cw.create_bucket(config['training_bucket'],
exist_ok=True)
# if there are any initial_model artifacts in ther training bucket
# remove them
im_object_list = cw.get_object_list(config['training_bucket'],
key_name_prefix='initial_model')
if len(im_object_list) > 0:
print(' Removing model artifacts from training bucket "{}" ... '
.format(config['training_bucket']))
cw.delete_objects(config['training_bucket'], im_object_list)
# is there training data in the bucket?
no_training_data = cw.is_bucket_empty(config['training_bucket'])
if config.get('local_data_dir') and \
os.path.isdir(config['local_data_dir']):
config['local_data_dir'] = \
os.path.abspath(config['local_data_dir'])
# add initial_model artifacts to bucket
if config.get('local_data_dir') and \
os.path.isdir(config['local_data_dir']):
initial_model_path = os.path.join(config['local_data_dir'],
'initial_model')
print(' Looking for model artifacts in "{}" ... '
.format(initial_model_path))
for file in glob.iglob(initial_model_path + '/**/*',
recursive=True):
if os.path.isfile(file):
print(' Uploading model artifact "{}" to '
'training data bucket "{}" ...'
.format(file[len(initial_model_path):].lstrip('/'),
config['training_bucket']))
cw.upload_file(file,
config['training_bucket'],
'initial_model',
file[len(initial_model_path):]
.lstrip('/'))
print(' Looking for training data in bucket "{}" ... '
.format(config['training_bucket']))
# if there's no training data in the training data bucket
# upload whatever is found locally
if no_training_data:
print(' No training data was found.')
if config.get('local_data_dir', None) is None:
# error. there is no local training data either;
# abort processing
print('Error. No local training data was found. '
'Please check your configuration settings.')
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# verify that local_data_dir is a directory
if not os.path.isdir(config['local_data_dir']):
print('Error. "{}" is not a directory or cannot be accessed.'
.format(config['local_data_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# upload training data from the local data directory
print(' Looking for training data in "{}" ... '
.format(config['local_data_dir']))
file_count = 0
ignore_list = []
ignore_list.append(os.path.join(config['local_data_dir'],
'README.md'))
for file in glob.iglob(config['local_data_dir'] + '/**/*',
recursive=True):
if file in ignore_list or file.startswith(initial_model_path):
continue
if os.path.isfile(file):
print(' Uploading "{}" to training data bucket "{}" ...'
.format(file[len(config['local_data_dir']):]
.lstrip('/'),
config['training_bucket']))
cw.upload_file(file,
config['training_bucket'],
config.get('training_data_key_prefix'),
file[len(config['local_data_dir']):]
.lstrip('/'))
file_count += 1
if file_count == 0:
print('Error. No local training data was found in "{}".'
.format(config['local_data_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
else:
print('Uploaded {} data files to training data bucket "{}".'
.format(file_count, config['training_bucket']))
else:
print(' Found data in training data bucket "{}". Skipping upload.'
.format(config['training_bucket']))
except ValueError as ve:
print('Error. {}'.format(ve))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except BucketNotFoundError as bnfe:
print('Error. {}'.format(bnfe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except FileNotFoundError as fnfe:
print('Error. {}'.format(fnfe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except COSWrapperError as cwe:
print('Error. Cloud Object Storage preparation failed: {}'.format(cwe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# --------------------------------------------------------
# Create model building ZIP
# --------------------------------------------------------
print_banner('Locating model building files ...')
#
# 1. Assure that the model building directory
# config['model_building_code_dir'] exists
# 2. If there are no files in config['model_building_code_dir']:
# - determine whether model-building code is stored in a COS bucket
# - download model-building code to config['model_building_code_dir']
# 3. ZIP files in config['model_building_code_dir']
try:
# task 1: make sure the specified model building code directory exists
os.makedirs(config['model_building_code_dir'], exist_ok=True)
except Exception as ex:
debug(' Exception type: {}'.format(type(ex)))
print('Error. Model building code preparation failed: {}'.format(ex))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
if len(os.listdir(config['model_building_code_dir'])) == 0:
# Task 2: try to download model building code from Cloud Object Storage
# bucket
#
print('No model building code was found in "{}".'
.format(config['model_building_code_dir']))
try:
if config.get('model_bucket') is None or \
cw.is_bucket_empty(config['model_bucket'],
config.get('model_key_prefix')):
print('Error. Model building code preparation failed: '
'No source code was found locally in "{}" or '
' in Cloud Object Storage.'
.format(config['model_building_code_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
print('Found model building code in bucket "{}".'
.format(config['model_bucket']))
for object_key in cw.get_object_list(config['model_bucket'],
config.get(
'model_key_prefix')):
cw.download_file(config['model_bucket'],
object_key,
config['model_building_code_dir'])
except BucketNotFoundError as bnfe:
print('Error. {}'.format(bnfe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except COSWrapperError as cwe:
print('Error. {}'.format(cwe))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
except Exception as ex:
debug(' Exception type: {}'.format(type(ex)))
print('Error. {}'.format(ex))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
print_banner('Packaging model building files in "{}" ...'
.format(config['model_building_code_dir']))
try:
shutil.make_archive(re.sub('.zip$', '', config['model_code_archive']),
'zip',
config['model_building_code_dir'])
except Exception as ex:
print('Error. Packaging failed: {}'.format(str(ex)))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
if os.path.isfile(config['model_code_archive']):
# display archive content
print('Model building package "{}" contains the following entries:'
.format(config['model_code_archive']))
with ZipFile(config['model_code_archive'], 'r') as archive:
for entry in sorted(archive.namelist()):
print(' {}'.format(entry))
# check archive size; WML limits size to 4MB
archive_size = os.path.getsize(config['model_code_archive'])
archive_size_limit = 1024 * 1024 * 4
if archive_size > archive_size_limit:
print('Error. Your model building code archive "{}" is too large '
'({:.2f} MB). WLM rejects archives larger than {} MB. '
'Please remove unnecessary files from the "{}" directory '
'and try again.'
.format(config['model_code_archive'],
archive_size / (1024 * 1024),
archive_size_limit / (1024 * 1024),
config['model_building_code_dir']))
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# Status:
# - The model training job can now be started.
if cmd_parameters['command'] == 'prepare':
print('Skipping model training and post processing steps.')
sys.exit(ExitCode.SUCCESS.value)
# ---------------------------------------------------------
# Start model training
# --------------------------------------------------------
print_banner('Starting model training ...')
try:
# instantiate the WML client
w = WMLWrapper(os.environ['ML_ENV'],
os.environ['ML_APIKEY'],
os.environ['ML_INSTANCE'])
except WMLWrapperError as wmle:
print(wmle)
sys.exit(ExitCode.PRE_PROCESSING_FAILED.value)
# define training metadata
model_definition_metadata = {
w.get_client().repository.DefinitionMetaNames.NAME:
config['training_run_name'],
w.get_client().repository.DefinitionMetaNames.DESCRIPTION:
config['training_run_description'],
w.get_client().repository.DefinitionMetaNames.AUTHOR_NAME:
config['author_name'],
w.get_client().repository.DefinitionMetaNames.FRAMEWORK_NAME:
config['framework_name'],
w.get_client().repository.DefinitionMetaNames.FRAMEWORK_VERSION:
config['framework_version'],
w.get_client().repository.DefinitionMetaNames.RUNTIME_NAME:
config['runtime_name'],
w.get_client().repository.DefinitionMetaNames.RUNTIME_VERSION:
config['runtime_version'],
w.get_client().repository.DefinitionMetaNames.EXECUTION_COMMAND:
config['training_run_execution_command']
}
training_configuration_metadata = {
w.get_client().training.ConfigurationMetaNames.NAME:
config['training_run_name'],
w.get_client().training.ConfigurationMetaNames.AUTHOR_NAME:
config['author_name'],
w.get_client().training.ConfigurationMetaNames.DESCRIPTION:
config['training_run_description'],
w.get_client().training.ConfigurationMetaNames.COMPUTE_CONFIGURATION:
{'name': config['training_run_compute_configuration_name']},
w.get_client().training.ConfigurationMetaNames
.TRAINING_DATA_REFERENCE: {
'connection': {
'endpoint_url': config['cos_endpoint_url'],
'access_key_id': os.environ['AWS_ACCESS_KEY_ID'],
'secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY']
},
'source': {
'bucket': config['training_bucket'],
},
'type': 's3'
},
w.get_client().training.ConfigurationMetaNames
.TRAINING_RESULTS_REFERENCE: {
'connection': {
'endpoint_url': config['cos_endpoint_url'],
'access_key_id': os.environ['AWS_ACCESS_KEY_ID'],
'secret_access_key': os.environ['AWS_SECRET_ACCESS_KEY']
},
'target': {
'bucket': config['results_bucket'],
},
'type': 's3'
}
}
print('Training configuration summary:')
print(' Training run name : {}'.format(config['training_run_name']))
print(' Training data bucket : {}'.format(config['training_bucket']))
print(' Results bucket : {}'.format(config['results_bucket']))
print(' Model-building archive: {}'.format(config['model_code_archive']))
try:
training_guid = w.start_training(config['model_code_archive'],
model_definition_metadata,
training_configuration_metadata)
except Exception as ex:
print('Error. Model training could not be started: {}'.format(ex))
sys.exit(ExitCode.TRAINING_FAILED.value)
print('Model training was started. Training id: {}'.format(training_guid))
# --------------------------------------------------------
# Monitor the training run until it completes
# successfully or throws an error
# --------------------------------------------------------
#
print('Checking model training status every {} seconds.'
' Press Ctrl+C once to stop monitoring or '
' press Ctrl+C twice to cancel training.'
.format(config['training_progress_monitoring_interval']))
print('Status - (p)ending (r)unning (e)rror (c)ompleted or canceled:')
try:
training_in_progress = True
while training_in_progress:
try:
# poll training status; ignore server errors (e.g. caused
# by temporary issues not specific to our training run)
status = w.get_training_status(training_guid,
ignore_server_error=True)
if status:
training_status = status.get('state') or '?'
else:
# unknown status; continue and leave it up to the user
# to terminate monitoring
training_status = '?'
# display training status indicator
# [p]ending
# [r]unning
# [c]ompleted
# [e]rror
# [?]
print(training_status[0:1], end='', flush=True)
if training_status == 'completed':
# training completed successfully
print('\nTraining completed.')
training_in_progress = False
elif training_status == 'error':
print('\nTraining failed.')
# training ended with error
training_in_progress = False
elif training_status == 'canceled':
print('\nTraining canceled.')
# training ended with error
training_in_progress = False
else:
time.sleep(
int(config['training_progress_monitoring_interval']))
except KeyboardInterrupt:
print('\nTraining monitoring was stopped.')
try:
input('Press Ctrl+C again to cancel model training or '
'any other key to continue training.')
print('To resume monitoring, run "python {} {} {} {}"'
.format(sys.argv[0],
sys.argv[1],
'package',
training_guid))
sys.exit(ExitCode.SUCCESS.value)
except KeyboardInterrupt:
try:
w.cancel_training(training_guid)
print('\nModel training was canceled.')
except Exception as ex:
print('Model training could not be canceled: {}'
.format(ex))
debug(' Exception type: {}'.format(type(ex)))
debug(' Exception: {}'.format(ex))
sys.exit(ExitCode.TRAINING_FAILED.value)
except Exception as ex:
print('Error. Model training monitoring failed with an exception: {}'
.format(ex))
debug(' Exception type: {}'.format(type(ex)))
debug(' Exception: {}'.format(ex))
sys.exit(ExitCode.TRAINING_FAILED.value)
# Status:
# - The model training job completed.
# - The training log file TRAINING_LOG_NAME can now be downloaded from COS.
results_references = None
try:
# --------------------------------------------------------
# Identify where the training artifacts are stored on COS
# {
# 'bucket': 'ademoout3',
# 'model_location': 'training-BA8P0BgZg'
# }
# Re-try to fetch information multiple times in case the WML service
# encounters a temporary issue
max_tries = 5
ise = True
for count in range(max_tries):
results_references = \
w.get_training_results_references(training_guid,
ignore_server_error=ise)
if results_references:
# got a response; move on
break
if count + 1 == max_tries:
# last attempt; if it fails stop trying
ise = False
time.sleep(3)
# --------------------------------------------------------
# Download the training log file from the results
# bucket on COS to config['local_download_directory']
# --------------------------------------------------------
print_banner('Downloading training log file "{}" ...'
.format(TRAINING_LOG_NAME))
training_log = cw.download_file(results_references['bucket'],
TRAINING_LOG_NAME,
config['local_download_directory'],
results_references['model_location'])
if training_status in ['error', 'canceled']:
# Training ended with an error or was canceled.
# Notify the user where the training log file was stored and exit.
print('The training log file "{}" was saved in "{}".'
.format(TRAINING_LOG_NAME,
config['local_download_directory']))
sys.exit(ExitCode.TRAINING_FAILED.value)
except Exception as ex:
print('Error. Download of training log file "{}" failed: {}'
.format(TRAINING_LOG_NAME, ex))
sys.exit(ExitCode.DOWNLOAD_FAILED.value)
# terminate if the "train" command was specified
# when the utility was invoked
if cmd_parameters['command'] == 'train':
print('Skipping post-processing steps.')
sys.exit(ExitCode.SUCCESS.value)
# - If training completed successfully, the trained model archive
# TRAINING_OUTPUT_ARCHIVE_NAME can now be downloaded from COS.
try:
# --------------------------------------------------------
# Download the trained model archive from the results
# bucket on COS to LOCAL_DOWNLOAD_DIRECTORY
# --------------------------------------------------------
print_banner('Downloading trained model archive "{}" ...'
.format(TRAINING_OUTPUT_ARCHIVE_NAME))
training_output = cw.download_file(results_references['bucket'],
TRAINING_OUTPUT_ARCHIVE_NAME,
config['local_download_directory'],
results_references['model_location'])
except Exception as ex:
print('Error. Trained model archive "{}" could not be '
'downloaded from Cloud Object Storage bucket "{}": {}'
.format(TRAINING_OUTPUT_ARCHIVE_NAME,
results_references['bucket'],
ex))
sys.exit(ExitCode.DOWNLOAD_FAILED.value)
# Status:
# - The trained model archive and training log file were
# downloaded to the directory identified by
# config['local_download_directory'].
# --------------------------------------------------------
# Extract the downloaded model archive
# --------------------------------------------------------
archive = os.path.join(config['local_download_directory'],
TRAINING_OUTPUT_ARCHIVE_NAME)
print_banner('Extracting trained model artifacts from "{}" ...'
.format(archive))
extraction_ok = False
try:
if tarfile.is_tarfile(archive):
tf = tarfile.open(archive,
mode='r:gz')
for file in tf.getnames():
print(file)
tf.extractall(config['local_download_directory'])
print('Trained model artifacts are located in the "{}" directory.'
.format(config['local_download_directory']))
extraction_ok = True
else:
print('Error. The downloaded file "{}" is not a valid tar file.'
.format(archive))
except FileNotFoundError:
print('Error. "{}" was not found.'.format(archive))
except tarfile.TarError as te:
print(te)
if extraction_ok is False:
sys.exit(ExitCode.EXTRACTION_FAILED.value)
# Status:
# - The trained model archive was downloaded to LOCAL_DOWNLOAD_DIRECTORY.
# The directory structure inshould look as follows:
# /trained_model/<framework-name-1>/<format>/<file-1>
# /trained_model/<framework-name-1>/<format>/<file-2>
# /trained_model/<framework-name-1>/<format-2>/<subdirectory>/<file-3>
# /trained_model/<framework-name-2>/<file-4>
# -------------------------------------------------------------------
# Copy the appropriate framework and format specific artifacts
# to the final destination, where the Docker build will pick them up
# -------------------------------------------------------------------
trained_model_path = config['trained_model_path']
trained_assets_dir = os.path.join(config['local_download_directory'],
trained_model_path)
print_banner('Copying trained model artifacts from "{}" to "{}" ...'
.format(trained_assets_dir,
config['docker_model_asset_directory']))
try:
copy_dir(trained_assets_dir,
config['docker_model_asset_directory'])
except Exception as ex:
print('Error. Trained model files could not be copied: {}'.format(str(ex)))
sys.exit(ExitCode.COPY_FAILED.value)
# Status:
# - The trained model artifacts were copied to the Docker image's asset
# directory, where the model-serving microservice will load them from.
print('Done')
sys.exit(ExitCode.SUCCESS.value)
| 41.03375
| 79
| 0.559113
| 296
| 0.009017
| 0
| 0
| 0
| 0
| 0
| 0
| 14,373
| 0.437841
|
58146fc12bca47d19303bba6584622a1dcef7fcd
| 57
|
py
|
Python
|
tests/unit/sim_client/__init__.py
|
rkm/bluebird
|
2325ebb151724d4444c092c095a040d7365dda79
|
[
"MIT"
] | 8
|
2019-01-29T15:19:39.000Z
|
2020-07-16T03:55:36.000Z
|
tests/unit/sim_client/__init__.py
|
rkm/bluebird
|
2325ebb151724d4444c092c095a040d7365dda79
|
[
"MIT"
] | 46
|
2019-02-08T14:23:11.000Z
|
2021-04-06T13:45:10.000Z
|
tests/unit/sim_client/__init__.py
|
rkm/bluebird
|
2325ebb151724d4444c092c095a040d7365dda79
|
[
"MIT"
] | 3
|
2019-05-06T14:18:07.000Z
|
2021-06-17T10:39:59.000Z
|
"""
Module contains tests for the sim_client package
"""
| 14.25
| 48
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.982456
|
581495876b03363b5fef74a09d461c434b90c0d7
| 8,344
|
py
|
Python
|
glog.py
|
leoll2/python-glog
|
c809d16352bf061d0ee38e590c6f28d553d740e7
|
[
"BSD-2-Clause"
] | null | null | null |
glog.py
|
leoll2/python-glog
|
c809d16352bf061d0ee38e590c6f28d553d740e7
|
[
"BSD-2-Clause"
] | null | null | null |
glog.py
|
leoll2/python-glog
|
c809d16352bf061d0ee38e590c6f28d553d740e7
|
[
"BSD-2-Clause"
] | null | null | null |
"""A simple Google-style logging wrapper."""
import logging
import time
import traceback
import os
import sys
import gflags as flags
FLAGS = flags.FLAGS
def format_message(record):
try:
record_message = "%s" % (record.msg % record.args)
except TypeError:
record_message = record.msg
return record_message
class GlogFormatter(logging.Formatter):
LEVEL_MAP = {
logging.FATAL: "F", # FATAL is alias of CRITICAL
logging.ERROR: "E",
logging.WARN: "W",
logging.INFO: "I",
logging.DEBUG: "D",
}
def __init__(self):
logging.Formatter.__init__(self)
def format(self, record):
try:
level = GlogFormatter.LEVEL_MAP[record.levelno]
except KeyError:
level = "?"
date = time.localtime(record.created)
date_usec = (record.created - int(record.created)) * 1e6
record_message = "%c%02d%02d %02d:%02d:%02d.%06d %s %s:%d] %s" % (
level,
date.tm_mon,
date.tm_mday,
date.tm_hour,
date.tm_min,
date.tm_sec,
date_usec,
record.process if record.process is not None else "?????",
record.filename,
record.lineno,
format_message(record),
)
record.getMessage = lambda: record_message
return logging.Formatter.format(self, record)
class Logger(object):
def __init__(self, name, filename=None):
self.logger = logging.getLogger(name)
init(self.logger, filename)
self.debug = self.logger.debug
self.info = self.logger.info
self.warning = self.logger.warning
self.warn = self.logger.warning
self.error = self.logger.error
self.exception = self.logger.exception
self.fatal = self.logger.fatal
self.log = self.logger.log
def setLevel(self, newlevel):
setLevel(newlevel, self.logger)
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warning
error = logging.error
exception = logging.exception
fatal = logging.fatal
log = logging.log
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
WARN = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
_level_names = {
DEBUG: "DEBUG",
INFO: "INFO",
WARN: "WARN",
ERROR: "ERROR",
FATAL: "FATAL",
}
_level_letters = [name[0] for name in _level_names.values()]
GLOG_PREFIX_REGEX = (
(
r"""
(?x) ^
(?P<severity>[%s])
(?P<month>\d\d)(?P<day>\d\d)\s
(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)
\.(?P<microsecond>\d{6})\s+
(?P<process_id>-?\d+)\s
(?P<filename>[a-zA-Z<_][\w._<>-]+):(?P<line>\d+)
\]\s
"""
)
% "".join(_level_letters)
)
"""Regex you can use to parse glog line prefixes."""
global_logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
stderr_handler = logging.StreamHandler(sys.stderr)
file_handlers = dict()
def setLevel(newlevel, logger=global_logger):
logger.setLevel(newlevel)
logger.debug("Log level set to %s", newlevel)
def init(logger=None, filename=None):
if logger is None:
logger = global_logger
logger.propagate = False
if filename is None:
handler = stderr_handler
elif filename == "stderr":
handler = stderr_handler
elif filename == "stdout":
handler = stdout_handler
elif filename in file_handlers:
handler = file_handlers[filename]
else:
handler = logging.FileHandler(filename)
file_handlers[filename] = handler
handler.setFormatter(GlogFormatter())
logger.addHandler(handler)
class CaptureWarningsFlag(flags.BooleanFlag):
def __init__(self):
flags.BooleanFlag.__init__(
self,
"glog_capture_warnings",
True,
"Redirect warnings to log.warn messages",
)
def Parse(self, arg):
flags.BooleanFlag.Parse(self, arg)
logging.captureWarnings(self.value)
flags.DEFINE_flag(CaptureWarningsFlag())
class VerbosityParser(flags.ArgumentParser):
"""Sneakily use gflags parsing to get a simple callback."""
def Parse(self, arg):
try:
intarg = int(arg)
# Look up the name for this level (DEBUG, INFO, etc) if it exists
try:
level = logging._levelNames.get(intarg, intarg)
except AttributeError: # This was renamed somewhere b/w 2.7 and 3.4
level = logging._levelToName.get(intarg, intarg)
except ValueError:
level = arg
setLevel(level)
return level
flags.DEFINE(
parser=VerbosityParser(),
serializer=flags.ArgumentSerializer(),
name="verbosity",
default=logging.INFO,
help="Logging verbosity",
)
init(global_logger)
# Define functions emulating C++ glog check-macros
# https://htmlpreview.github.io/?https://github.com/google/glog/master/doc/glog.html#check
def format_stacktrace(stack):
"""Print a stack trace that is easier to read.
* Reduce paths to basename component
* Truncates the part of the stack after the check failure
"""
lines = []
for _, f in enumerate(stack):
fname = os.path.basename(f[0])
line = "\t%s:%d\t%s" % (fname + "::" + f[2], f[1], f[3])
lines.append(line)
return lines
class FailedCheckException(AssertionError):
"""Exception with message indicating check-failure location and values."""
def check_failed(message):
stack = traceback.extract_stack()
stack = stack[0:-2]
stacktrace_lines = format_stacktrace(stack)
filename, line_num, _, _ = stack[-1]
try:
raise FailedCheckException(message)
except FailedCheckException:
log_record = global_logger.makeRecord(
"CRITICAL", 50, filename, line_num, message, None, None
)
stderr_handler.handle(log_record)
log_record = global_logger.makeRecord(
"DEBUG", 10, filename, line_num, "Check failed here:", None, None
)
stderr_handler.handle(log_record)
for line in stacktrace_lines:
log_record = global_logger.makeRecord(
"DEBUG", 10, filename, line_num, line, None, None
)
stderr_handler.handle(log_record)
raise
return
def check(condition, message=None):
"""Raise exception with message if condition is False."""
if not condition:
if message is None:
message = "Check failed."
check_failed(message)
def check_eq(obj1, obj2, message=None):
"""Raise exception with message if obj1 != obj2."""
if obj1 != obj2:
if message is None:
message = "Check failed: %s != %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ne(obj1, obj2, message=None):
"""Raise exception with message if obj1 == obj2."""
if obj1 == obj2:
if message is None:
message = "Check failed: %s == %s" % (str(obj1), str(obj2))
check_failed(message)
def check_le(obj1, obj2, message=None):
"""Raise exception with message if not (obj1 <= obj2)."""
if obj1 > obj2:
if message is None:
message = "Check failed: %s > %s" % (str(obj1), str(obj2))
check_failed(message)
def check_ge(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 >= obj2)."""
if obj1 < obj2:
if message is None:
message = "Check failed: %s < %s" % (str(obj1), str(obj2))
check_failed(message)
def check_lt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 < obj2)."""
if obj1 >= obj2:
if message is None:
message = "Check failed: %s >= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_gt(obj1, obj2, message=None):
"""Raise exception with message unless (obj1 > obj2)."""
if obj1 <= obj2:
if message is None:
message = "Check failed: %s <= %s" % (str(obj1), str(obj2))
check_failed(message)
def check_notnone(obj, message=None):
"""Raise exception with message if obj is None."""
if obj is None:
if message is None:
message = "Check failed: Object is None."
check_failed(message)
| 27.447368
| 90
| 0.615532
| 2,683
| 0.321548
| 0
| 0
| 0
| 0
| 0
| 0
| 1,843
| 0.220877
|
581495ab37cf4df801b88c86040220d6464bbc32
| 4,141
|
py
|
Python
|
ref_rna.py
|
entn-at/warp-rna
|
f6bf19634564068f23f9906373754e04f9b653a3
|
[
"MIT"
] | 39
|
2019-08-11T09:06:55.000Z
|
2022-03-30T03:24:34.000Z
|
ref_rna.py
|
entn-at/warp-rna
|
f6bf19634564068f23f9906373754e04f9b653a3
|
[
"MIT"
] | null | null | null |
ref_rna.py
|
entn-at/warp-rna
|
f6bf19634564068f23f9906373754e04f9b653a3
|
[
"MIT"
] | 6
|
2019-12-11T03:02:48.000Z
|
2021-11-29T09:01:51.000Z
|
"""
Python reference implementation of the Recurrent Neural Aligner.
Author: Ivan Sorokin
Based on the papers:
- "Recurrent Neural Aligner: An Encoder-Decoder Neural Network Model for Sequence to Sequence Mapping"
Hasim Sak, et al., 2017
- "Extending Recurrent Neural Aligner for Streaming End-to-End Speech Recognition in Mandarin"
Linhao Dong, et al., 2018
"""
import numpy as np
NEG_INF = -float("inf")
def logsumexp(*args):
"""
Stable log sum exp.
"""
if all(a == NEG_INF for a in args):
return NEG_INF
a_max = max(args)
lsp = np.log(sum(np.exp(a - a_max) for a in args))
return a_max + lsp
def log_softmax(acts, axis):
"""
Log softmax over the last axis of the 3D array.
"""
acts = acts - np.max(acts, axis=axis, keepdims=True)
probs = np.sum(np.exp(acts), axis=axis, keepdims=True)
log_probs = acts - np.log(probs)
return log_probs
def forward_pass(log_probs, labels, blank):
T, U, _ = log_probs.shape
S = T-U+2
alphas = np.zeros((S, U))
for u in range(1, U):
alphas[0, u] = alphas[0, u-1] + log_probs[u-1, u-1, labels[u-1]]
for t in range(1, S):
alphas[t, 0] = alphas[t-1, 0] + log_probs[t-1, 0, blank]
for t in range(1, S):
for u in range(1, U):
skip = alphas[t-1, u] + log_probs[t+u-1, u, blank]
emit = alphas[t, u-1] + log_probs[t+u-1, u-1, labels[u-1]]
alphas[t, u] = logsumexp(emit, skip)
return alphas, alphas[S-1, U-1]
def backward_pass(log_probs, labels, blank):
T, U, _ = log_probs.shape
S = T-U+2
S1 = S-1
U1 = U-1
betas = np.zeros((S, U))
for i in range(1, U):
u = U1-i
betas[S1, u] = betas[S1, u+1] + log_probs[T-i, u, labels[u]]
for i in range(1, S):
t = S1-i
betas[t, U1] = betas[t+1, U1] + log_probs[T-i, U1, blank]
for i in range(1, S):
t = S1-i
for j in range(1, U):
u = U1-j
skip = betas[t+1, u] + log_probs[T-i-j, u, blank]
emit = betas[t, u+1] + log_probs[T-i-j, u, labels[u]]
betas[t, u] = logsumexp(emit, skip)
return betas, betas[0, 0]
def analytical_gradient(log_probs, alphas, betas, labels, blank):
T, U, _ = log_probs.shape
S = T-U+2
log_like = betas[0, 0]
grads = np.full(log_probs.shape, NEG_INF)
for t in range(S-1):
for u in range(U):
grads[t+u, u, blank] = alphas[t, u] + betas[t+1, u] + log_probs[t+u, u, blank] - log_like
for t in range(S):
for u, l in enumerate(labels):
grads[t+u, u, l] = alphas[t, u] + betas[t, u+1] + log_probs[t+u, u, l] - log_like
return -np.exp(grads)
def numerical_gradient(log_probs, labels, neg_loglike, blank):
epsilon = 1e-5
T, U, V = log_probs.shape
grads = np.zeros_like(log_probs)
for t in range(T):
for u in range(U):
for v in range(V):
log_probs[t, u, v] += epsilon
alphas, ll_forward = forward_pass(log_probs, labels, blank)
grads[t, u, v] = (-ll_forward - neg_loglike) / epsilon
log_probs[t, u, v] -= epsilon
return grads
def test():
np.random.seed(0)
blank = 0
vocab_size = 4
input_len = 5
output_len = 3
inputs = np.random.rand(input_len, output_len + 1, vocab_size)
labels = np.random.randint(1, vocab_size, output_len)
log_probs = log_softmax(inputs, axis=2)
alphas, ll_forward = forward_pass(log_probs, labels, blank)
betas, ll_backward = backward_pass(log_probs, labels, blank)
assert np.allclose(ll_forward, ll_backward, atol=1e-12, rtol=1e-12), \
"Log-likelihood from forward and backward pass mismatch."
neg_loglike = -ll_forward
analytical_grads = analytical_gradient(log_probs, alphas, betas, labels, blank)
numerical_grads = numerical_gradient(log_probs, labels, neg_loglike, blank)
assert np.allclose(analytical_grads, numerical_grads, atol=1e-6, rtol=1e-6), \
"Analytical and numerical computation of gradient mismatch."
if __name__ == "__main__":
test()
| 26.544872
| 103
| 0.59744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 606
| 0.146341
|
581517f5427032699dff194265e55b485b52ab39
| 2,994
|
py
|
Python
|
tests/coretests.py
|
thomasms/coiny
|
1f51eac2542e46b03abd9f66fd3b58fbd80cb177
|
[
"MIT"
] | null | null | null |
tests/coretests.py
|
thomasms/coiny
|
1f51eac2542e46b03abd9f66fd3b58fbd80cb177
|
[
"MIT"
] | null | null | null |
tests/coretests.py
|
thomasms/coiny
|
1f51eac2542e46b03abd9f66fd3b58fbd80cb177
|
[
"MIT"
] | null | null | null |
import unittest
from typing import Any
from coiny.core import CoinPrice, CoinyQueue, CoinySession, price_now_url, price_task
from coiny.utils import NullCoinPrice
class HasJson:
def __init__(self, data) -> None:
self.data = data
async def __aenter__(self):
return self
async def __aexit__(self, *args, **kwargs):
pass
async def json(self):
return self.data
class PriceTaskTests(unittest.IsolatedAsyncioTestCase):
async def test_price_task_empty_queue(self):
queue = CoinyQueue()
session = CoinySession()
result = await price_task(queue, session)
self.assertEqual(NullCoinPrice, result)
async def test_price_task_queue(self):
class NoGetSession(CoinySession):
"""HACK: Not a good idea to inherit from CoinySession"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.mock_url = ""
def get(
self, url: str, *, allow_redirects: bool = True, **kwargs: Any
) -> HasJson:
self.mock_url = f"called:{url}"
return HasJson({"mycoin": {"XYZ": 3.4}})
queue = CoinyQueue()
await queue.put(("mycoin", "XYZ", "https://myurl"))
async with NoGetSession() as session:
result = await price_task(queue, session)
expected = CoinPrice(fiat="XYZ", coin="mycoin", rate=3.4)
self.assertEqual(expected, result)
self.assertEqual("called:https://myurl", session.mock_url)
async def test_price_task_mock_eth(self):
mock_url = "https://run.mocky.io/v3/09750cfe-39a5-4d31-9651-2292765a8fe3"
# returns -> {"ethereum": {"eur": 3295.23}}
queue = CoinyQueue()
await queue.put(("ethereum", "eur", mock_url))
async with CoinySession() as session:
result = await price_task(queue, session)
expected = CoinPrice(fiat="eur", coin="ethereum", rate=3295.23)
self.assertEqual(expected, result)
async def test_price_task_mock_eth_invalid(self):
mock_url = "https://run.mocky.io/v3/09750cfe-39a5-4d31-9651-2292765a8fe3"
queue = CoinyQueue()
await queue.put(("bitcoin", "gbp", mock_url))
async with CoinySession() as session:
result = await price_task(queue, session)
self.assertEqual(NullCoinPrice, result)
async def test_price_task_real_eth(self):
queue = CoinyQueue()
await queue.put(("ethereum", "eur", price_now_url("ethereum", "eur")))
async with CoinySession() as session:
result = await price_task(queue, session)
# no way to test the live price of course
half_expected = CoinPrice(fiat="eur", coin="ethereum", rate=0.0)
self.assertEqual(half_expected.fiat, result.fiat)
self.assertEqual(half_expected.coin, result.coin)
__all__ = ["PriceTaskTests"]
| 34.022727
| 85
| 0.62024
| 2,793
| 0.932866
| 0
| 0
| 0
| 0
| 2,614
| 0.873079
| 462
| 0.154309
|
5816e949ba4a9d3600362e45768d66548fbd4d4b
| 969
|
py
|
Python
|
legacy/dx/simulator/simulator_diagnoser/test/graph/traversal/forward_test.py
|
GaloisInc/adapt
|
2ccff778d3e77505899266572f8f7caacb5b630f
|
[
"BSD-3-Clause"
] | 2
|
2020-04-09T13:04:25.000Z
|
2021-09-24T14:17:26.000Z
|
legacy/dx/simulator/simulator_diagnoser/test/graph/traversal/forward_test.py
|
GaloisInc/adapt
|
2ccff778d3e77505899266572f8f7caacb5b630f
|
[
"BSD-3-Clause"
] | null | null | null |
legacy/dx/simulator/simulator_diagnoser/test/graph/traversal/forward_test.py
|
GaloisInc/adapt
|
2ccff778d3e77505899266572f8f7caacb5b630f
|
[
"BSD-3-Clause"
] | 3
|
2019-09-20T20:49:54.000Z
|
2021-09-02T17:33:47.000Z
|
import unittest
from simulator_diagnoser.graph import InmemoryGraph
from simulator_diagnoser.graph.traversal import ForwardAnalysis
class ForwardAnalysisTest(unittest.TestCase):
def setUp(self):
# Graph =
# 9
# / | \
# 6 7 8
# \ / \ /
# 4 5
# / \ / \
# 1 2 3
self.g1 = InmemoryGraph()
edges = [(1, 4), (2, 4), (2, 5), (3, 5),
(4, 6), (4, 7), (5, 7), (5, 8),
(6, 9), (7, 9), (8, 9)]
for edge in edges:
self.g1.add_edge(*edge)
def test_none(self):
fa = ForwardAnalysis(None)
for x in fa:
fail()
def test_graph(self):
fa = ForwardAnalysis(self.g1)
for i, (node, parents) in enumerate(fa, start=1):
self.assertEqual(i, node)
self.assertEqual(parents, self.g1.get_node_parents(i)[0])
if __name__ == '__main__':
unittest.main()
| 24.846154
| 69
| 0.49742
| 786
| 0.811146
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.089783
|
581774fbaaecfebcc97c105cd9ba5717bc57c3de
| 5,396
|
py
|
Python
|
SONOS/sonos-fadein-alarm.py
|
tksunw/IoT
|
2148c49e9a90822400f195be7b1de3f8e8b8ba2a
|
[
"MIT"
] | 1
|
2018-01-30T23:30:27.000Z
|
2018-01-30T23:30:27.000Z
|
SONOS/sonos-fadein-alarm.py
|
tksunw/IoT
|
2148c49e9a90822400f195be7b1de3f8e8b8ba2a
|
[
"MIT"
] | 1
|
2018-02-14T19:58:56.000Z
|
2018-02-14T19:58:56.000Z
|
SONOS/sonos-fadein-alarm.py
|
tksunw/IoT
|
2148c49e9a90822400f195be7b1de3f8e8b8ba2a
|
[
"MIT"
] | 2
|
2018-02-13T18:52:09.000Z
|
2021-09-29T14:27:49.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
sonos-fadein-alarm.py - a gentle alarm using Sonos Favorites.
This module allows a user to choose a SONOS favorite channel to
play for a gentle alarm. Select the maximum desired volume, the
number of minutes over which to ramp volume from 0 to the chosen
maxium, and choose a favorite to use (by title), and the script
will do the rest.
2017-01-21 my new alarm clock.
2017-09-15 added ability to group a second speaker to the main speaker
also aded the ability to specify 'all' to group all
available speakers to the main speaker.
'''
import argparse
import datetime
import time
import os.path
import soco
# Set some default values. These are mine. The channel is listed
# by name, and comes from the Sonos players 'favorites'. Volume
# on the player(s) specified will ramp up from 0 to MAXVOL over
# the number of minutes specified. For me, I like a 30 minute
# ramp from 0 to 12. So the volume will increase by 1 every 2.5
# minutes.
# Set _WEEKEND days to skip certain days of the week, if you want
# to skip your days off work.
_SPEAKER = 'master bedroom'
_CHANNEL = 'Everybody Talks Radio'
_MINUTES = 30
_MAXVOL = 12
_WEEKEND = ('Saturday', 'Sunday')
def get_sonos_favorites(from_speaker):
''' get_sonos_favorites: gets the saved "favorites" from a Sonos speaker.
Args:
from_speaker (soco.core.Soco object): the speaker to pull favorites from.
Returns:
favs (list): a list of Sonos Favorites (title, meta, uri)
'''
favs = from_speaker.get_sonos_favorites()['favorites']
return favs
def main():
''' main function:
Args:
None
Returns:
None
Process command line arguments, and turn a Sonos speaker into an alarm
clock, with the flexibility to ramp the volume slowly over a defined
time period, to a "max vol" limit.
'''
parser = argparse.ArgumentParser(description='Sonos/Favorites ramping alarm.')
parser.add_argument('-S', '--speaker', type=str,
help='The Sonos speaker to use for the alarm',
default=_SPEAKER)
parser.add_argument('-s', '--slave', type=str,
help='The Sonos speaker(s) to join to a group for the alarm. Use the word "all" to join all available players.')
parser.add_argument('-c', '--channel', type=str,
help='The Sonos Favorite Channel to use for the alarm',
default=_CHANNEL)
parser.add_argument('-m', '--minutes', type=int,
help='The number of minutes the alarm will ramp up over',
default=_MINUTES)
parser.add_argument('-v', '--volume', type=int,
help='Set the maximum volume for the alarm',
default=_MAXVOL)
parser.add_argument('-p', '--pause',
help='Pause a speaker that is playing.',
action='store_true')
parser.epilog = "The channel you select must be a Sonos Favorite. Because\n"
parser.epilog += "I'm lazy and didn't feel like figuring out SoCo to get\n"
parser.epilog += "it working directly with Pandora, which SoCo doesn't seem\n"
parser.epilog += "to work with yet."
args = parser.parse_args()
speakers = soco.discover()
player = [x for x in speakers if x.player_name.lower() == args.speaker.lower()][0]
if args.slave:
if args.slave.lower() == 'all':
[x.join(player) for x in speakers if x.player_name.lower() != player.player_name.lower()]
else:
slave = [x for x in speakers if x.player_name.lower() == args.slave.lower()][0]
slave.join(player)
if args.pause:
''' this will stop the indicated sonos speaker. even if the alarm is
still running.
'''
player.stop()
else:
favorites = get_sonos_favorites(player)
for favorite in favorites:
if args.channel.lower() in favorite['title'].lower():
my_choice = favorite
break
print "Playing {} on {}".format(my_choice['title'], player.player_name)
player.play_uri(uri=my_choice['uri'], meta=my_choice['meta'], start=True)
if args.minutes == 0:
player.volume = args.volume
else:
player.volume = 0
seconds = args.minutes * 60
ramp_interval = seconds / args.volume
for _ in xrange(args.volume):
player.volume += 1
time.sleep(ramp_interval)
if __name__ == "__main__":
today = datetime.datetime.today().strftime('%A')
date = datetime.datetime.today().strftime('%Y-%m-%d')
holidays = set(line.strip() for line in open('holidays.txt'))
if today in _WEEKEND:
print today, 'is a scheduled weekend day. Not running.'
elif date in holidays:
print date, 'is a scheduled holiday. Not running.'
elif os.path.isfile('/tmp/holiday'):
''' /tmp/holiday allows us to mark when we don't want the alarm to run
tomorrow. Especially when we're using cron. Just touch the file.
'''
print "Today is marked as a holiday via /tmp/holiday, not running the alarm"
else:
main()
else:
print "This file is not intended to be included by other scripts."
| 38.542857
| 137
| 0.623981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,861
| 0.530208
|
58183b1abecb86537c0a52b35966e7d8ef3e9a5f
| 5,775
|
py
|
Python
|
Agent5_a_0_5_knots_512_d_0_02/step_node_Agent6_rewards.py
|
schigeru/Bachelorarbeit_Code
|
261b2552221f768e7022abc60a4e5a7d2fedbbae
|
[
"MIT"
] | null | null | null |
Agent5_a_0_5_knots_512_d_0_02/step_node_Agent6_rewards.py
|
schigeru/Bachelorarbeit_Code
|
261b2552221f768e7022abc60a4e5a7d2fedbbae
|
[
"MIT"
] | null | null | null |
Agent5_a_0_5_knots_512_d_0_02/step_node_Agent6_rewards.py
|
schigeru/Bachelorarbeit_Code
|
261b2552221f768e7022abc60a4e5a7d2fedbbae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import math
import os
import numpy as np
import time
import sys
import copy
import rospy
import moveit_msgs.msg
import geometry_msgs.msg
import random
import csv
from sensor_msgs.msg import JointState
from gazebo_msgs.msg import LinkStates
from gazebo_msgs.msg import LinkState
from std_msgs.msg import Float64
from std_msgs.msg import String
from sensor_msgs.msg import Joy
import moveit_commander
from panda_rl.srv import StepAction, StepActionResponse
group_name = "panda_arm_hand"
move_group = moveit_commander.MoveGroupCommander(group_name)
quat_goal = np.array([1, 0, 0.0075, 0])
def vector2points(v, u):
v = np.array(v)
u = np.array(u)
vector = u - v
vector = np.round(vector, 5)
return vector
def get_hand_position():
msg = rospy.wait_for_message('/gazebo/link_states', LinkStates)
hand_positionx = (msg.pose[9].position.x + msg.pose[10].position.x) / 2
hand_positiony = (msg.pose[9].position.y + msg.pose[10].position.y) / 2
hand_positionz = (msg.pose[9].position.z + msg.pose[10].position.z) / 2
hand_position = [hand_positionx, hand_positiony, hand_positionz]
hand_position = np.round(hand_position, 5)
return hand_position
def get_hand_orientation():
msg = rospy.wait_for_message('/gazebo/link_states', LinkStates)
hand_orientation_x = (msg.pose[9].orientation.x + msg.pose[10].orientation.x) / 2
hand_orientation_y = (msg.pose[9].orientation.y + msg.pose[10].orientation.y) / 2
hand_orientation_z = (msg.pose[9].orientation.z + msg.pose[10].orientation.z) / 2
hand_orientation_w = (msg.pose[9].orientation.w + msg.pose[10].orientation.w) / 2
hand_orientation = [hand_orientation_x, hand_orientation_y, hand_orientation_z, hand_orientation_w]
hand_orientation = np.round(hand_orientation, 5)
return hand_orientation
def goal_distance(x, y):
x = np.array(x)
y = np.array(y)
distance = np.linalg.norm(x-y)
distance = np.round(distance, 5)
return distance
def take_action(msg):
done = False
goal = msg.goal
joint_state = move_group.get_current_joint_values()
joint_state[0] = joint_state[0] + (msg.action[0] / 20)
joint_state[1] = joint_state[1] + (msg.action[1] / 20)
joint_state[2] = joint_state[2] + (msg.action[2] / 20)
joint_state[3] = joint_state[3] + (msg.action[3] / 20)
joint_state[4] = joint_state[4] + (msg.action[4] / 20)
joint_state[5] = joint_state[5] + (msg.action[5] / 20)
joint_state[7] = 0.04
joint_state[8] = 0.04
if joint_state[0] < joint1_threshold_min or joint_state[0] > joint1_threshold_max \
or joint_state[1] < joint2_threshold_min or joint_state[1] > joint2_threshold_max \
or joint_state[2] < joint3_threshold_min or joint_state[2] > joint3_threshold_max \
or joint_state[3] < joint4_threshold_min or joint_state[3] > joint4_threshold_max \
or joint_state[4] < joint5_threshold_min or joint_state[4] > joint5_threshold_max \
or joint_state[5] < joint6_threshold_min or joint_state[5] > joint6_threshold_max:
hand_position = get_hand_position()
vector = vector2points(hand_position, goal)
obs = joint_state[0:7]
obs = np.round(obs, 5)
obs = np.append(obs, vector)
done = True
reward = -50
return StepActionResponse(obs=obs, reward=reward, done=done)
else:
move_group.go(joint_state, wait=True)
move_group.stop()
joint_state = move_group.get_current_joint_values()
obs = joint_state[0:7]
obs = np.round(obs, 5)
hand_position = get_hand_position()
quat = get_hand_orientation()
quat_reward = np.linalg.norm(quat_goal - quat)
d = goal_distance(hand_position, goal)
vector = vector2points(hand_position, goal)
z = hand_position[2] - goal[2]
obs = np.append(obs, vector)
if d < 0.02 and z > 0:
reward = 0
print("Action: ", msg.action)
print("Handpos: ", hand_position)
print("Goal: ", goal)
print("Observation ", obs)
print("reward target reached: ", reward)
done = True
group_name_gripper = "hand"
move_group_gripper = moveit_commander.MoveGroupCommander(group_name_gripper)
joint_values = move_group_gripper.get_current_joint_values()
joint_values[0] = 0.02
joint_values[1] = 0.02
move_group_gripper.go(joint_values, wait=True)
move_group_gripper.stop()
return StepActionResponse(obs=obs, reward=reward, done=done)
elif d > 0.08 and z < 0.05 or z < 0: #Fördert Anfahren von oben durch Bestrafung wenn EE weit weg ist, aber bereits auf ähnlicher Höhe zum Ziel
reward = 5 * (-d - quat_reward)
return StepActionResponse(obs=obs, reward=reward, done=done)
else:
reward = (-d - quat_reward)
#print("Action: ", msg.action)
print("Handpos: ", hand_position)
print("Goal: ", goal)
#print("Observation ", obs)
print("reward: ", reward)
print("Distance", d)
return StepActionResponse(obs=obs, reward=reward, done=done)
joint1_threshold_min = -2.8973
joint2_threshold_min = -1.7628
joint3_threshold_min = -2.8973
joint4_threshold_min = -3.0718
joint5_threshold_min = -2.8973
joint6_threshold_min = -0.0175
joint1_threshold_max = 2.8973
joint2_threshold_max = 1.7628
joint3_threshold_max = 2.8973
joint4_threshold_max = -0.0698
joint5_threshold_max = 2.8973
joint6_threshold_max = 3.7525
rospy.init_node('step_service', anonymous=False)
print("step_nodeaktiv")
s = rospy.Service('step_env', StepAction, take_action)
rospy.spin()
| 35.429448
| 151
| 0.675152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 398
| 0.068882
|
5818909f1789bffb946f4dcc647ac54b08e00f22
| 10,043
|
py
|
Python
|
pwnlib/elf/corefile.py
|
jdsecurity/binjitsu
|
999ad632004bfc3e623eead20eb11de98fc1f4dd
|
[
"MIT"
] | 5
|
2018-05-15T13:00:56.000Z
|
2020-02-09T14:29:00.000Z
|
pwnlib/elf/corefile.py
|
FDlucifer/binjitsu
|
999ad632004bfc3e623eead20eb11de98fc1f4dd
|
[
"MIT"
] | null | null | null |
pwnlib/elf/corefile.py
|
FDlucifer/binjitsu
|
999ad632004bfc3e623eead20eb11de98fc1f4dd
|
[
"MIT"
] | 6
|
2017-09-07T02:31:11.000Z
|
2021-07-05T16:59:18.000Z
|
import collections
import ctypes
import elftools
from elftools.common.utils import roundup, struct_parse
from elftools.common.py3compat import bytes2str
from elftools.construct import CString
from ..context import context
from ..log import getLogger
from .datatypes import *
from .elf import ELF
from ..tubes.tube import tube
log = getLogger(__name__)
types = {
'i386': elf_prstatus_i386,
'amd64': elf_prstatus_amd64,
}
# Slightly modified copy of the pyelftools version of the same function,
# until they fix this issue:
# https://github.com/eliben/pyelftools/issues/93
def iter_notes(self):
""" Iterates the list of notes in the segment.
"""
offset = self['p_offset']
end = self['p_offset'] + self['p_filesz']
while offset < end:
note = struct_parse(
self._elfstructs.Elf_Nhdr,
self.stream,
stream_pos=offset)
note['n_offset'] = offset
offset += self._elfstructs.Elf_Nhdr.sizeof()
self.stream.seek(offset)
# n_namesz is 4-byte aligned.
disk_namesz = roundup(note['n_namesz'], 2)
note['n_name'] = bytes2str(
CString('').parse(self.stream.read(disk_namesz)))
offset += disk_namesz
desc_data = bytes2str(self.stream.read(note['n_descsz']))
note['n_desc'] = desc_data
offset += roundup(note['n_descsz'], 2)
note['n_size'] = offset - note['n_offset']
yield note
class Mapping(object):
def __init__(self, name, start, stop, flags):
self.name=name
self.start=start
self.stop=stop
self.size=stop-start
self.flags=flags
@property
def permstr(self):
flags = self.flags
return ''.join(['r' if flags & 4 else '-',
'w' if flags & 2 else '-',
'x' if flags & 1 else '-',
'p'])
def __str__(self):
return '%x-%x %s %x %s' % (self.start,self.stop,self.permstr,self.size,self.name)
def __repr__(self):
return '%s(%r, %#x, %#x, %#x, %#x)' % (self.__class__.__name__,
self.name,
self.start,
self.stop,
self.size,
self.flags)
def __int__(self):
return self.start
class Core(ELF):
"""Core(*a, **kw) -> Core
Enhances the inforation available about a corefile (which is an extension
of the ELF format) by permitting extraction of information about the mapped
data segments, and register state.
Registers can be accessed directly, e.g. via ``core_obj.eax``.
Mappings can be iterated in order via ``core_obj.mappings``.
"""
def __init__(self, *a, **kw):
self.prstatus = None
self.files = {}
self.mappings = []
self.stack = None
self.env = {}
try:
super(Core, self).__init__(*a, **kw)
except IOError:
log.warning("No corefile. Have you set /proc/sys/kernel/core_pattern?")
raise
self.load_addr = 0
self._address = 0
if not self.elftype == 'CORE':
log.error("%s is not a valid corefile" % e.file.name)
if not self.arch in ('i386','amd64'):
log.error("%s does not use a supported corefile architecture" % e.file.name)
prstatus_type = types[self.arch]
with log.waitfor("Parsing corefile...") as w:
self._load_mappings()
for segment in self.segments:
if not isinstance(segment, elftools.elf.segments.NoteSegment):
continue
for note in iter_notes(segment):
# Try to find NT_PRSTATUS. Note that pyelftools currently
# mis-identifies the enum name as 'NT_GNU_ABI_TAG'.
if note.n_descsz == ctypes.sizeof(prstatus_type) and \
note.n_type == 'NT_GNU_ABI_TAG':
self.NT_PRSTATUS = note
self.prstatus = prstatus_type.from_buffer_copy(note.n_desc)
# Try to find the list of mapped files
if note.n_type == constants.NT_FILE:
with context.local(bytes=self.bytes):
self._parse_nt_file(note)
# Try to find the auxiliary vector, which will tell us
# where the top of the stack is.
if note.n_type == constants.NT_AUXV:
with context.local(bytes=self.bytes):
self._parse_auxv(note)
if self.stack and self.mappings:
for mapping in self.mappings:
if mapping.stop == self.stack:
mapping.name = '[stack]'
self.stack = mapping
with context.local(bytes=self.bytes, log_level='error'):
try:
self._parse_stack()
except ValueError:
# If there are no environment variables, we die by running
# off the end of the stack.
pass
def _parse_nt_file(self, note):
t = tube()
t.unrecv(note.n_desc)
count = t.unpack()
page_size = t.unpack()
starts = []
addresses = {}
for i in range(count):
start = t.unpack()
end = t.unpack()
ofs = t.unpack()
starts.append(start)
for i in range(count):
filename = t.recvuntil('\x00', drop=True)
start = starts[i]
for mapping in self.mappings:
if mapping.start == start:
mapping.name = filename
self.mappings = sorted(self.mappings, key=lambda m: m.start)
def _load_mappings(self):
for s in self.segments:
if s.header.p_type != 'PT_LOAD':
continue
mapping = Mapping(None,
s.header.p_vaddr,
s.header.p_vaddr + s.header.p_memsz,
s.header.p_flags)
self.mappings.append(mapping)
def _parse_auxv(self, note):
t = tube()
t.unrecv(note.n_desc)
for i in range(0, note.n_descsz, context.bytes * 2):
key = t.unpack()
value = t.unpack()
# The AT_EXECFN entry is a pointer to the executable's filename
# at the very top of the stack, followed by a word's with of
# NULL bytes. For example, on a 64-bit system...
#
# 0x7fffffffefe8 53 3d 31 34 33 00 2f 62 69 6e 2f 62 61 73 68 00 |S=14|3./b|in/b|ash.|
# 0x7fffffffeff8 00 00 00 00 00 00 00 00 |....|....| | |
if key == constants.AT_EXECFN:
self.at_execfn = value
value = value & ~0xfff
value += 0x1000
self.stack = value
def _parse_stack(self):
# AT_EXECFN is the start of the filename, e.g. '/bin/sh'
# Immediately preceding is a NULL-terminated environment variable string.
# We want to find the beginning of it
address = self.at_execfn-1
# Sanity check!
try:
assert self.u8(address) == 0
except AssertionError:
# Something weird is happening. Just don't touch it.
return
except ValueError:
# If the stack is not actually present in the coredump, we can't
# read from the stack. This will fail as:
# ValueError: 'seek out of range'
return
# Find the next NULL, which is 1 byte past the environment variable.
while self.u8(address-1) != 0:
address -= 1
# We've found the beginning of the last environment variable.
# We should be able to search up the stack for the envp[] array to
# find a pointer to this address, followed by a NULL.
last_env_addr = address
address &= ~(context.bytes-1)
while self.unpack(address) != last_env_addr:
address -= context.bytes
assert self.unpack(address+context.bytes) == 0
# We've successfully located the end of the envp[] array.
# It comes immediately after the argv[] array, which itself
# is NULL-terminated.
end_of_envp = address+context.bytes
while self.unpack(address - context.bytes) != 0:
address -= context.bytes
start_of_envp = address
# Now we can fill in the environment easier.
for env in range(start_of_envp, end_of_envp, context.bytes):
envaddr = self.unpack(env)
value = self.string(envaddr)
name, value = value.split('=', 1)
self.env[name] = envaddr + len(name) + 1
@property
def maps(self):
"""A printable string which is similar to /proc/xx/maps."""
return '\n'.join(map(str, self.mappings))
def getenv(self, name):
"""getenv(name) -> int
Read an environment variable off the stack, and return its address.
Arguments:
name(str): Name of the environment variable to read.
Returns:
The address of the environment variable.
"""
if name not in self.env:
log.error("Environment variable %r not set" % name)
return self.string(self.env[name]).split('=',1)[-1]
def __getattr__(self, attribute):
if self.prstatus:
if hasattr(self.prstatus, attribute):
return getattr(self.prstatus, attribute)
if hasattr(self.prstatus.pr_reg, attribute):
return getattr(self.prstatus.pr_reg, attribute)
return super(Core, self).__getattribute__(attribute)
| 34.631034
| 103
| 0.54416
| 8,594
| 0.85572
| 860
| 0.085632
| 389
| 0.038733
| 0
| 0
| 2,851
| 0.283879
|
5819716bac9c4b729336569c993ab6648380ee01
| 2,875
|
py
|
Python
|
kNN.py
|
skywind3000/ml
|
d3ac3d6070b66d84e25537915ee634723ddb8c51
|
[
"MIT"
] | 9
|
2019-03-25T02:14:23.000Z
|
2020-05-19T20:46:10.000Z
|
kNN.py
|
skywind3000/ml
|
d3ac3d6070b66d84e25537915ee634723ddb8c51
|
[
"MIT"
] | null | null | null |
kNN.py
|
skywind3000/ml
|
d3ac3d6070b66d84e25537915ee634723ddb8c51
|
[
"MIT"
] | 2
|
2020-07-06T04:44:02.000Z
|
2022-02-17T01:27:55.000Z
|
from __future__ import print_function
import numpy as np
import operator
import os
import sys
if sys.version_info[0] >= 3:
xrange = range
def createDataSet():
group = np.array([[1.0,1.1], [1.0,1.0], [0,0], [0,0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
# kNN classifier
def classify0(inX, dataSet, labels, k):
# calculate distance
dataSetSize = dataSet.shape[0]
diffMat = np.tile(inX, (dataSetSize, 1)) - dataSet
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis = 1)
distances = sqDistances ** 0.5
sortedDistIndicies = distances.argsort()
classCount = {}
# calculate minimal distance
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1
sortedClassCount = sorted(classCount.items(),
key = operator.itemgetter(1), reverse = True)
return sortedClassCount[0][0]
# load image
def img2vector(filename):
returnVect = np.zeros((1, 1024))
fp = open(filename)
for i in xrange(32):
lineStr = fp.readline()
for j in xrange(32):
returnVect[0, 32 * i + j] = int(lineStr[j])
return returnVect
# hand writing classifier
def handwritingClassTest():
hwLabels = []
trainingFileList = os.listdir('data/digits/trainingDigits')
m = len(trainingFileList)
trainingMat = np.zeros((m, 1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i,:] = img2vector('data/digits/trainingDigits/%s' % fileNameStr)
testFileList = os.listdir('data/digits/testDigits')
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('data/digits/testDigits/%s' % fileNameStr)
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
print("the classifier came back with: %d, the real answer is %d\n"%(
classifierResult, classNumStr))
if classifierResult != classNumStr: errorCount += 1.0
print('the total number of error is: %d' % errorCount)
print('the total error rate is: %f'%(errorCount / float(mTest)))
return 0
# testing case
if __name__ == '__main__':
def test1():
group, labels = createDataSet()
print(classify0([0,0], group, labels, 3))
return 0
def test2():
testVector = img2vector('data/digits/testDigits/0_13.txt')
print(testVector[0,0:31])
def test3():
handwritingClassTest()
test3()
| 33.823529
| 85
| 0.619478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 421
| 0.146435
|
5819a9286725e2bb1d31cefd9b8edf4e2e05b208
| 642
|
py
|
Python
|
simfin/revenue/personal_taxes.py
|
CREEi-models/simfin
|
a7c632ac8bc8f795cd46028c1a49e65a1c1b44eb
|
[
"MIT"
] | 1
|
2021-06-11T15:16:13.000Z
|
2021-06-11T15:16:13.000Z
|
simfin/revenue/personal_taxes.py
|
CREEi-models/simfin
|
a7c632ac8bc8f795cd46028c1a49e65a1c1b44eb
|
[
"MIT"
] | 1
|
2021-06-07T14:39:27.000Z
|
2021-06-07T14:39:27.000Z
|
simfin/revenue/personal_taxes.py
|
CREEi-models/simfin
|
a7c632ac8bc8f795cd46028c1a49e65a1c1b44eb
|
[
"MIT"
] | 1
|
2021-03-17T03:52:21.000Z
|
2021-03-17T03:52:21.000Z
|
from simfin.tools import account
class personal_taxes(account):
'''
Classe permettant d'intégrer l'impôt des particuliers.
'''
def set_align(self,pop,eco):
earnings = pop.multiply(eco['emp']*eco['earn_c']+eco['taxinc'],fill_value=0.0)
value = earnings.multiply(eco['personal_taxes'],fill_value=0.0).sum()
self.align = self.value/value
return
def grow(self,macro,pop,eco,others):
earnings = pop.multiply(eco['emp']*eco['earn_c']+eco['taxinc'],fill_value=0.0)
self.value = (earnings.multiply(eco['personal_taxes'],fill_value=0.0).sum())*self.align
return
pass
| 33.789474
| 95
| 0.65109
| 609
| 0.945652
| 0
| 0
| 0
| 0
| 0
| 0
| 146
| 0.226708
|
5819cc4c01f213155dbdad2c086e2c95b1ccd432
| 16,094
|
py
|
Python
|
pandaserver/brokerage/PandaSiteIDs.py
|
rybkine/panda-server
|
30fdeaa658a38fe2049849446c300c1e1f5b5231
|
[
"Apache-2.0"
] | 1
|
2019-08-30T13:47:51.000Z
|
2019-08-30T13:47:51.000Z
|
pandaserver/brokerage/PandaSiteIDs.py
|
mkycanopus/panda-server
|
0f7c36800c033fada8bbde53dceaab98770b6df2
|
[
"Apache-2.0"
] | null | null | null |
pandaserver/brokerage/PandaSiteIDs.py
|
mkycanopus/panda-server
|
0f7c36800c033fada8bbde53dceaab98770b6df2
|
[
"Apache-2.0"
] | null | null | null |
# !!!!!!! This file is OBSOLETE. Its content has been absorbed into pilotController.py in the autopilot repository.
# !!!!!!! Questions to Torre Wenaus.
PandaSiteIDs = {
'AGLT2' : {'nickname':'AGLT2-condor','status':'OK'},
'ALBERTA-LCG2' : {'nickname':'ALBERTA-LCG2-lcgce01-atlas-lcgpbs','status':'OK'},
'ANALY_AGLT2' : {'nickname':'ANALY_AGLT2-condor','status':'OK'},
'ANALY_ALBERTA' : {'nickname':'ALBERTA-LCG2-lcgce01-atlas-lcgpbs','status':'OK'},
'ANALY_BEIJING' : {'nickname':'BEIJING-LCG2-lcg002-atlas-lcgpbs','status':'OK'},
'ANALY_BNL' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_ATLAS_1' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_ATLAS_2' : {'nickname':'BNL_ATLAS_2-condor','status':'OK'},
#'ANALY_BNL_LOCAL' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_test' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_test2' : {'nickname':'ANALY_BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BNL_test3' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_BRUNEL' : {'nickname':'UKI-LT2-Brunel-dgc-grid-44-atlas-lcgpbs','status':'notOK'},
'ANALY_CERN' : {'nickname':'CERN-PROD-ce123-grid_atlas-lcglsf','status':'notOK'},
'ANALY_CNAF' : {'nickname':'INFN-CNAF-gridit-ce-001-lcg-lcgpbs','status':'notOK'},
'ANALY_CPPM' : {'nickname':'IN2P3-CPPM-marce01-atlas-pbs','status':'OK'},
'ANALY_FZK' : {'nickname':'FZK-LCG2-ce-5-fzk-atlasXS-pbspro','status':'OK'},
'ANALY_GLASGOW' : {'nickname':'UKI-SCOTGRID-GLASGOW-svr021-q3d-lcgpbs','status':'OK'},
'ANALY_GLOW-ATLAS' : {'nickname':'GLOW-ATLAS-condor','status':'OK'},
'ANALY_GRIF-IRFU' : {'nickname':'GRIF-IRFU-node07-atlas-lcgpbs','status':'OK'},
'ANALY_GRIF-LAL' : {'nickname':'GRIF-LAL-grid10-atlasana-pbs','status':'notOK'},
'ANALY_GRIF-LPNHE' : {'nickname':'GRIF-LPNHE-lpnce-atlas-pbs','status':'notOK'},
'ANALY_HU_ATLAS_Tier2' : {'nickname':'ANALY_HU_ATLAS_Tier2-lsf','status':'OK'},
'ANALY_LANCS' : {'nickname':'UKI-NORTHGRID-LANCS-HEP-fal-pygrid-18-atlas-lcgpbs','status':'notOK'},
'ANALY_LAPP' : {'nickname':'IN2P3-LAPP-lapp-ce01-atlas-pbs','status':'notOK'},
'ANALY_LIV' : {'nickname':'UKI-NORTHGRID-LIV-HEP-hepgrid2-atlas-lcgpbs','status':'notOK'},
'ANALY_LONG_BNL' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_LONG_BNL_ATLAS' : {'nickname':'BNL_ATLAS_2-condor','status':'OK'},
'ANALY_LONG_BNL_LOCAL' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'ANALY_LONG_LYON' : {'nickname':'IN2P3-CC-T2-cclcgceli05-long-bqs','status':'OK'},
'ANALY_LPC' : {'nickname':'IN2P3-LPC-clrlcgce03-atlas-lcgpbs','status':'notOK'},
'ANALY_LPSC' : {'nickname':'IN2P3-LPSC-lpsc-ce-atlas-pbs','status':'OK'},
'ANALY_LYON' : {'nickname':'IN2P3-CC-T2-cclcgceli05-medium-bqs','status':'OK'},
'ANALY_MANC' : {'nickname':'UKI-NORTHGRID-MAN-HEP-ce01-atlas-lcgpbs','status':'OK'},
'ANALY_MCGILL' : {'nickname':'MCGILL-LCG2-atlas-ce-atlas-lcgpbs','status':'OK'},
'ANALY_MWT2' : {'nickname':'ANALY_MWT2-condor','status':'notOK'},
'ANALY_MWT2_SHORT' : {'nickname':'ANALY_MWT2_SHORT-pbs','status':'notOK'},
'ANALY_NET2' : {'nickname':'ANALY_NET2-pbs','status':'OK'},
'ANALY_OU_OCHEP_SWT2' : {'nickname':'ANALY_OU_OCHEP_SWT2-condor','status':'notOK'},
'ANALY_PIC' : {'nickname':'pic-ce07-gshort-lcgpbs','status':'OK'},
'ANALY_RAL' : {'nickname':'RAL-LCG2-lcgce01-atlasL-lcgpbs','status':'OK'},
'ANALY_ROMANIA02' : {'nickname':'RO-02-NIPNE-tbat01-atlas-lcgpbs','status':'notOK'},
'ANALY_ROMANIA07' : {'nickname':'RO-07-NIPNE-tbit01-atlas-lcgpbs','status':'notOK'},
'ANALY_SARA' : {'nickname':'SARA-MATRIX-mu6-short-pbs','status':'notOK'},
'ANALY_SFU' : {'nickname':'SFU-LCG2-snowpatch-hep-atlas-lcgpbs','status':'notOK'},
'ANALY_SHEF' : {'nickname':'UKI-NORTHGRID-SHEF-HEP-lcgce0-atlas-lcgpbs','status':'OK'},
'ANALY_SLAC' : {'nickname':'ANALY_SLAC-lsf','status':'OK'},
'ANALY_SWT2_CPB' : {'nickname':'ANALY_SWT2_CPB-pbs','status':'OK'},
'ANALY_TAIWAN' : {'nickname':'Taiwan-LCG2-w-ce01-atlas-lcgpbs','status':'OK'},
'ANALY_TEST' : {'nickname':'ANALY_TEST','status':'OK'},
'ANALY_TORONTO' : {'nickname':'TORONTO-LCG2-bigmac-lcg-ce2-atlas-pbs','status':'OK'},
'ANALY_TOKYO' : {'nickname':'TOKYO-LCG2-lcg-ce01-atlas-lcgpbs','status':'OK'},
'ANALY_TRIUMF' : {'nickname':'TRIUMF-LCG2-ce1-atlas-lcgpbs','status':'OK'},
'ANALY_UBC' : {'nickname':'UBC-pbs','status':'OK'},
'ANALY_UIUC-HEP' : {'nickname':'ANALY_UIUC-HEP-condor','status':'OK'},
'ANALY_UTA' : {'nickname':'UTA-DPCC-pbs','status':'OK'},
'ANALY_UTA-DPCC' : {'nickname':'UTA-DPCC-test-pbs','status':'notOK'},
'ANALY_VICTORIA' : {'nickname':'VICTORIA-LCG2-lcg-ce-general-lcgpbs','status':'OK'},
'AUVERGRID' : {'nickname':'AUVERGRID-iut15auvergridce01-atlas-lcgpbs','status':'notOK'},
'ASGC' : {'nickname':'Taiwan-LCG2-w-ce01-atlas-lcgpbs','status':'OK'},
'ASGC_REPRO' : {'nickname':'ASGC_REPRO','status':'notOK'},
'Australia-ATLAS' : {'nickname':'Australia-ATLAS-agh2-atlas-lcgpbs','status':'OK'},
'BARNETT_TEST' : {'nickname':'BARNETT_TEST','status':'notOK'},
'BEIJING' : {'nickname':'BEIJING-LCG2-lcg002-atlas-lcgpbs','status':'OK'},
'BNLPROD' : {'nickname':'BNL_ATLAS_1-condor','status':'notOK'},
'BNL_ATLAS_1' : {'nickname':'BNL_ATLAS_1-condor','status':'OK'},
'BNL_ATLAS_2' : {'nickname':'BNL_ATLAS_2-condor','status':'OK'},
'BNL_ATLAS_DDM' : {'nickname':'BNL_DDM-condor','status':'notOK'},
'BNL_ATLAS_test' : {'nickname':'BNL_ATLAS_2-condor','status':'notOK'},
'BU_ATLAS_Tier2' : {'nickname':'BU_ATLAS_Tier2-pbs','status':'OK'},
'BU_ATLAS_Tier2o' : {'nickname':'BU_ATLAS_Tier2o-pbs','status':'OK'},
'BU_ATLAS_test' : {'nickname':'BU_ATLAS_Tier2-pbs','status':'NOTOK'},
'HU_ATLAS_Tier2' : {'nickname':'HU_ATLAS_Tier2-lsf','status':'OK'},
'CERN-BUILDS' : {'nickname':'CERN-BUILDS','status':'notOK'},
'CERN-RELEASE' : {'nickname':'CERN-RELEASE','status':'notOK'},
'CERN-UNVALID' : {'nickname':'CERN-UNVALID','status':'notOK'},
'CGG' : {'nickname':'CGG-LCG2-ce1-atlas-lcgpbs','status':'notOK'},
'CHARMM' : {'nickname':'CHARMM','status':'notOK'},
'CNR-ILC-PISA' : {'nickname':'CNR-ILC-PISA-gridce-atlas-lcgpbs','status':'notOK'},
'CPPM' : {'nickname':'IN2P3-CPPM-marce01-atlas-pbs','status':'OK'},
'CSCS-LCG2' : {'nickname':'CSCS-LCG2-ce01-egee48h-lcgpbs','status':'OK'},
'csTCDie' : {'nickname':'csTCDie-gridgate-himem-pbs','status':'OK'},
'CYF' : {'nickname':'CYFRONET-LCG2-ce-atlas-pbs','status':'OK'},
'DESY-HH' : {'nickname':'DESY-HH-grid-ce3-default-lcgpbs','status':'OK'},
'DESY-ZN' : {'nickname':'DESY-ZN-lcg-ce0-atlas-lcgpbs','status':'OK'},
'EFDA-JET' : {'nickname':'EFDA-JET-grid002-atlas-lcgpbs','status':'notok'},
'FZK-LCG2' : {'nickname':'FZK-LCG2-ce-1-fzk-atlasXL-pbspro','status':'OK'},
'FZK_REPRO' : {'nickname':'FZK_REPRO','status':'notOK'},
'FZU' : {'nickname':'praguelcg2-golias25-lcgatlas-lcgpbs','status':'OK'},
'GLOW' : {'nickname':'GLOW-CMS-cmsgrid02-atlas-condor','status':'notOK'},
'GLOW-ATLAS' : {'nickname':'GLOW-ATLAS-condor','status':'OK'},
'GoeGrid' : {'nickname':'GoeGrid-ce-goegrid-atlas-lcgpbs','status':'OK'},
'GRIF-IRFU' : {'nickname':'GRIF-IRFU-node07-atlas-lcgpbs','status':'OK'},
'GRIF-LAL' : {'nickname':'GRIF-LAL-grid10-atlas-pbs','status':'OK'},
'GRIF-LPNHE' : {'nickname':'GRIF-LPNHE-lpnce-atlas-pbs','status':'OK'},
'HEPHY-UIBK' : {'nickname':'HEPHY-UIBK-hepx4-atlas-lcgpbs','status':'OK'},
'IFAE' : {'nickname':'ifae-ifaece01-ifae-lcgpbs','status':'OK'},
'IFIC' : {'nickname':'IFIC-LCG2-ce01-atlas-pbs','status':'OK'},
'IHEP' : {'nickname':'BEIJING-LCG2-lcg002-atlas-lcgpbs','status':'OK'},
'ITEP' : {'nickname':'ITEP-ceglite-atlas-lcgpbs','status':'OK'},
'IN2P3-LPSC' : {'nickname':'IN2P3-LPSC-lpsc-ce-atlas-pbs','status':'OK'},
'JINR-LCG2' : {'nickname':'JINR-LCG2-lcgce01-atlas-lcgpbs', 'status':'OK'},
'LAPP' : {'nickname':'IN2P3-LAPP-lapp-ce01-atlas-pbs','status':'OK'},
'LIP-COIMBRA' : {'nickname':'LIP-Coimbra-grid006-atlas-lcgpbs','status':'OK'},
'LIP-LISBON' : {'nickname':'LIP-Lisbon-ce02-atlasgrid-lcgsge','status':'OK'},
'LLR' : {'nickname':'GRIF-LLR-polgrid1-atlas-pbs','status':'notOK'},
'LPC' : {'nickname':'IN2P3-LPC-clrlcgce03-atlas-lcgpbs','status':'OK'},
'LRZ' : {'nickname':'LRZ-LMU-lcg-lrz-ce-atlas-sge','status':'OK'},
'LYON' : {'nickname':'IN2P3-CC-cclcgceli02-long-bqs','status':'OK'},
'LYON_REPRO' : {'nickname':'LYON_REPRO','status':'notOK'},
'Lyon-T2' : {'nickname':'IN2P3-CC-T2-cclcgceli05-long-bqs','status':'OK'},
'LTU_CCT' : {'nickname':'LTU_CCT-pbs','status':'OK'},
'MANC' : {'nickname':'UKI-NORTHGRID-MAN-HEP-ce02-atlas-lcgpbs','status':'OK'},
'MCGILL-LCG2' : {'nickname':'MCGILL-LCG2-atlas-ce-atlas-pbs','status':'OK'},
'MONTREAL' : {'nickname':'Umontreal-LCG2-lcg-ce-atlas-lcgpbs','status':'notOK'},
'MPP' : {'nickname':'MPPMU-grid-ce-long-sge','status':'OK'},
'MWT2_IU' : {'nickname':'MWT2_IU-pbs','status':'OK'},
'MWT2_UC' : {'nickname':'MWT2_UC-pbs','status':'OK'},
'NDGF' : {'nickname':'NDGF-condor','status':'OK'},
'NIKHEF-ELPROD' : {'nickname':'NIKHEF-ELPROD-gazon-atlas-pbs','status':'OK'},
'NIKHEF_REPRO' : {'nickname':'NIKHEF_REPRO','status':'notOK'},
'OUHEP_ITB' : {'nickname':'OUHEP_ITB-condor','status':'notOK'},
'OU_PAUL_TEST' : {'nickname':'OU_OCHEP_SWT2-condor','status':'notOK'},
'OU_OCHEP_SWT2' : {'nickname':'OU_OCHEP_SWT2-condor','status':'OK'},
'OU_OSCER_ATLAS' : {'nickname':'OU_OSCER_ATLAS-lsf','status':'OK'},
'OU_OSCER_ATLASdeb' : {'nickname':'OU_OSCER_ATLASdeb-lsf','status':'notOK'},
'PSNC' : {'nickname':'PSNC-ce-atlas-pbs','status':'OK'},
'PIC' : {'nickname':'pic-ce05-glong-lcgpbs','status':'OK'},
'PIC_REPRO' : {'nickname':'PIC_REPRO','status':'notOK'},
'prague_cesnet_lcg2' : {'nickname':'prague_cesnet_lcg2-skurut17-egee_atlas-lcgpbs','status':'notOK'},
'RAL' : {'nickname':'RAL-LCG2-lcgce02-grid1000M-lcgpbs','status':'OK'},
'RAL_REPRO' : {'nickname':'RAL_REPRO','status':'notOK'},
'ru-Moscow-SINP-LCG2' : {'nickname':'ru-Moscow-SINP-LCG2-lcg02-atlas-lcgpbs','status':'OK'},
'ru-PNPI' : {'nickname':'ru-PNPI-cluster-atlas-pbs','status':'OK'},
'RDIGTEST' : {'nickname':'RDIGTEST','status':'notOK'},
'ROMANIA02' : {'nickname':'RO-02-NIPNE-tbat01-atlas-lcgpbs','status':'OK'},
'ROMANIA07' : {'nickname':'RO-07-NIPNE-tbit01-atlas-lcgpbs','status':'OK'},
'RRC-KI' : {'nickname':'RRC-KI-gate-atlas-lcgpbs','status':'OK'},
'RU-Protvino-IHEP' : {'nickname':'RU-Protvino-IHEP-ce0003-atlas-lcgpbs','status':'OK'},
'SARA_REPRO' : {'nickname':'SARA_REPRO','status':'notOK'},
'SFU-LCG2' : {'nickname':'SFU-LCG2-snowpatch-atlas-lcgpbs','status':'OK'},
'SLACXRD' : {'nickname':'SLACXRD-lsf','status':'OK'},
'SLAC_PAUL_TEST' : {'nickname':'SLACXRD-lsf','status':'notOK'},
'SNS-PISA' : {'nickname':'SNS-PISA-gridce-atlas-lcgpbs','status':'notOK'},
'SPACI-CS-IA64' : {'nickname':'SPACI-CS-IA64-square-atlas-lsf','status':'notOK'},
'SWT2_CPB' : {'nickname':'SWT2_CPB-pbs','status':'OK'},
'Taiwan-IPAS-LCG2' : {'nickname':'Taiwan-IPAS-LCG2-atlasce-atlas-lcgcondor','status':'notOK'},
'TEST1' : {'nickname':'TEST1','status':'notOK'},
'TEST2' : {'nickname':'TEST2','status':'notOK'},
'TEST3' : {'nickname':'TEST3','status':'notOK'},
'TEST4' : {'nickname':'TEST4','status':'notOK'},
'TESTCHARMM' : {'nickname':'TESTCHARMM','status':'notOK'},
'TESTGLIDE' : {'nickname':'TESTGLIDE','status':'notOK'},
'TOKYO' : {'nickname':'TOKYO-LCG2-lcg-ce01-atlas-lcgpbs','status':'OK'},
'TORONTO-LCG2' : {'nickname':'TORONTO-LCG2-bigmac-lcg-ce2-atlas-pbs','status':'OK'},
'TPATHENA' : {'nickname':'TPATHENA','status':'notOK'},
'TPPROD' : {'nickname':'TPPROD','status':'notOK'},
'TRIUMF' : {'nickname':'TRIUMF-LCG2-ce1-atlas-lcgpbs','status':'OK'},
'TRIUMF_DDM' : {'nickname':'TRIUMF_DDM','status':'notOK'},
'TRIUMF_REPRO' : {'nickname':'TRIUMF_REPRO','status':'notOK'},
'TW-FTT' : {'nickname':'TW-FTT-f-ce01-atlas-lcgpbs','status':'OK'},
'TWTEST' : {'nickname':'TWTEST','status':'notOK'},
'TestPilot' : {'nickname':'TestPilot','status':'notOK'},
'UAM-LCG2' : {'nickname':'UAM-LCG2-grid003-atlas-lcgpbs','status':'OK'},
'UBC' : {'nickname':'UBC-pbs','status':'OK'},
'UBC_PAUL_TEST' : {'nickname':'UBC-pbs','status':'notOK'},
'UIUC-HEP' : {'nickname':'UIUC-HEP-condor','status':'OK'},
'UCITB_EDGE7' : {'nickname':'UCITB_EDGE7-pbs','status':'OK'},
'UC_ATLAS_MWT2' : {'nickname':'UC_ATLAS_MWT2-condor','status':'OK'},
'UC_ATLAS_test' : {'nickname':'UC_ATLAS_MWT2-condor','status':'OK'},
'UC_Teraport' : {'nickname':'UC_Teraport-pbs','status':'notOK'},
'UMESHTEST' : {'nickname':'UMESHTEST','status':'notOK'},
'UNI-FREIBURG' : {'nickname':'UNI-FREIBURG-ce-atlas-pbs','status':'OK'},
'UTA-DPCC' : {'nickname':'UTA-DPCC-pbs','status':'OK'},
'UTA-DPCC-test' : {'nickname':'UTA-DPCC-test-pbs','status':'OK'},
'UTA_PAUL_TEST' : {'nickname':'UTA-SWT2-pbs','status':'notOK'},
'UTA_SWT2' : {'nickname':'UTA-SWT2-pbs','status':'OK'},
'UTD-HEP' : {'nickname':'UTD-HEP-pbs','status':'OK'},
'VICTORIA-LCG2' : {'nickname':'VICTORIA-LCG2-lcg-ce-general-lcgpbs','status':'OK'},
'Wuppertal' : {'nickname':'wuppertalprod-grid-ce-dg_long-lcgpbs','status':'OK'},
}
# cloud-MoverID mapping
PandaMoverIDs = {
'US' : 'BNL_ATLAS_DDM',
'CA' : 'TRIUMF_DDM',
'FR' : 'TRIUMF_DDM',
'IT' : 'TRIUMF_DDM',
'NL' : 'TRIUMF_DDM',
'DE' : 'TRIUMF_DDM',
'TW' : 'TRIUMF_DDM',
'UK' : 'TRIUMF_DDM',
'ES' : 'TRIUMF_DDM',
}
| 80.874372
| 115
| 0.541258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11,140
| 0.692183
|
581c0ca0e2bb4ab7335e22da97be7ac35a4e0e71
| 513
|
py
|
Python
|
tools/scenario-player/scenario_player/exceptions.py
|
karlb/raiden
|
61ade0559add1a97588ae6bdedd5e0b99ed41de3
|
[
"MIT"
] | 8
|
2019-06-12T14:50:06.000Z
|
2022-02-15T16:20:07.000Z
|
tools/scenario-player/scenario_player/exceptions.py
|
karlb/raiden
|
61ade0559add1a97588ae6bdedd5e0b99ed41de3
|
[
"MIT"
] | 141
|
2019-06-18T13:04:08.000Z
|
2021-11-23T22:00:32.000Z
|
tools/scenario-player/scenario_player/exceptions.py
|
karlb/raiden
|
61ade0559add1a97588ae6bdedd5e0b99ed41de3
|
[
"MIT"
] | 17
|
2019-05-21T18:09:05.000Z
|
2020-10-29T13:01:01.000Z
|
class ScenarioError(Exception):
pass
class ScenarioTxError(ScenarioError):
pass
class TokenRegistrationError(ScenarioTxError):
pass
class ChannelError(ScenarioError):
pass
class TransferFailed(ScenarioError):
pass
class NodesUnreachableError(ScenarioError):
pass
class RESTAPIError(ScenarioError):
pass
class RESTAPIStatusMismatchError(ScenarioError):
pass
class UnknownTaskTypeError(ScenarioError):
pass
class ScenarioAssertionError(ScenarioError):
pass
| 13.153846
| 48
| 0.769981
| 485
| 0.945419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
581d47d6e3101d07297475a1a84d27b2898647b8
| 1,002
|
py
|
Python
|
explain.py
|
jcsalterego/gh-contest
|
033f87c5338e3066ee4c80df2ee8e1ae4d6f1c7b
|
[
"BSD-3-Clause"
] | 1
|
2015-11-05T02:50:57.000Z
|
2015-11-05T02:50:57.000Z
|
explain.py
|
jcsalterego/gh-contest
|
033f87c5338e3066ee4c80df2ee8e1ae4d6f1c7b
|
[
"BSD-3-Clause"
] | null | null | null |
explain.py
|
jcsalterego/gh-contest
|
033f87c5338e3066ee4c80df2ee8e1ae4d6f1c7b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from pprint import pprint
from matchmaker.database import *
import sys
def main(argv):
if len(argv) == 1:
return
line = argv[1]
if line[0] in '+-':
line = line[1:]
user, repos = line.split(":")
user = int(user)
repos = [int(r) for r in repos.split(",")]
print("Loading database...")
db = Database("data")
print("original watchlist")
watching = sorted(db.u_watching[user])
for r in watching:
print "%6d" % r,
if r in db.r_info:
print("%18s - %20s - %10s"
% tuple([x[:20] for x in db.r_info[r]]))
else:
print("")
print("")
print("new additions")
watching = sorted(repos)
for r in watching:
print "%6d" % r,
if r in db.r_info:
print("%18s - %20s - %10s"
% tuple([x[:20] for x in db.r_info[r]]))
else:
print("")
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 22.266667
| 58
| 0.505988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 159
| 0.158683
|
581e242497be1d7d21237861371ea688ae66e1e5
| 3,862
|
py
|
Python
|
qiskit/pulse/commands/command.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | 2
|
2019-06-28T19:58:42.000Z
|
2019-07-26T05:04:02.000Z
|
qiskit/pulse/commands/command.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | 3
|
2018-11-13T17:33:37.000Z
|
2018-12-03T09:35:00.000Z
|
qiskit/pulse/commands/command.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | 2
|
2017-12-03T15:48:14.000Z
|
2018-03-11T13:08:03.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Base command.
"""
import re
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Union
import numpy as np
from qiskit.pulse.exceptions import PulseError
from qiskit.pulse.channels import Channel
class MetaCount(ABCMeta):
"""Meta class to count class instances."""
def __new__(mcs, name, bases, namespace, **_):
new_cls = super(MetaCount, mcs).__new__(mcs, name, bases, namespace)
new_cls.instances_counter = 0
return new_cls
class Command(metaclass=MetaCount):
"""Abstract command class."""
# Counter for the number of instances in this class
prefix = 'c'
@abstractmethod
def __init__(self, duration: Union[int, np.integer] = None):
"""Create a new command.
Args:
duration: Duration of this command.
Raises:
PulseError: when duration is not number of points
"""
if isinstance(duration, (int, np.integer)):
self._duration = int(duration)
else:
raise PulseError('Pulse duration should be integer.')
self._name = Command.create_name()
@classmethod
def create_name(cls, name: str = None) -> str:
"""Autogenerate names for pulse commands."""
if name is None:
try:
name = '%s%i' % (cls.prefix, cls.instances_counter) # pylint: disable=E1101
except TypeError:
raise PulseError("prefix and counter must be non-None when name is None.")
else:
try:
name = str(name)
except Exception:
raise PulseError("The pulse command name should be castable to a string "
"(or None for autogenerate a name).")
name_format = re.compile('[a-zA-Z][a-zA-Z0-9_]*')
if name_format.match(name) is None:
raise PulseError("%s is an invalid OpenPulse command name." % name)
cls.instances_counter += 1 # pylint: disable=E1101
return name
@property
def duration(self) -> int:
"""Duration of this command."""
return self._duration
@property
def name(self) -> str:
"""Name of this command."""
return self._name
@abstractmethod
def to_instruction(self, command, *channels: List[Channel],
name: Optional[str] = None):
"""Create an instruction from command.
Returns:
Instruction
"""
pass
def __call__(self, *args, **kwargs):
"""Creates an Instruction obtained from call to `to_instruction` wrapped in a Schedule."""
return self.to_instruction(*args, **kwargs)
def __eq__(self, other: 'Command'):
"""Two Commands are the same if they are of the same type
and have the same duration and name.
Args:
other: other Command
Returns:
bool: are self and other equal
"""
return (type(self) is type(other)) and (self.duration == other.duration)
def __hash__(self):
return hash((type(self), self.duration, self.name))
def __repr__(self):
return '%s(duration=%d, name="%s")' % (self.__class__.__name__,
self.duration,
self.name)
| 31.398374
| 98
| 0.599689
| 3,124
| 0.808907
| 0
| 0
| 1,841
| 0.476696
| 0
| 0
| 1,662
| 0.430347
|
581f418d3f23d0acfebe881f3102cd64dfbdffef
| 6,654
|
py
|
Python
|
data_loader/data_loaders.py
|
brendanwallison/birds
|
b70c01b8c953dfa172c65a51f7bdf100e47853d3
|
[
"MIT"
] | null | null | null |
data_loader/data_loaders.py
|
brendanwallison/birds
|
b70c01b8c953dfa172c65a51f7bdf100e47853d3
|
[
"MIT"
] | null | null | null |
data_loader/data_loaders.py
|
brendanwallison/birds
|
b70c01b8c953dfa172c65a51f7bdf100e47853d3
|
[
"MIT"
] | null | null | null |
from torchvision import datasets, transforms
from torchvision.transforms import functional as TF
from base import BaseDataLoader
from six.moves import urllib
from parse_config import ConfigParser
# downloads
import requests
import json
from collections import Counter
import os
import errno
import csv
import numpy as np
import pandas as pd
import splitfolders
import pathlib
import torchaudio
import torch
# Note: horizontal dimension = 2 * time_window * sample_rate // n_fft + 1
# vertical crop = n_fft // 2 + 1
class SpectrogramLoader(BaseDataLoader):
def __init__(self, dataset=None, batch_size=128, shuffle=False, validation_split=0.0, weighted_sample = False, num_workers=1, data_dir="data/processed", training=True):
self.dataset = dataset
self.data_dir = data_dir
if dataset is not None:
self.vertical_crop = dataset.vertical_crop
self.horizontal_crop = dataset.horizontal_crop
if dataset.mode == 'xeno':
# Stack of numpy melspecs -> one torch melspec
#self.horizontal_crop=dataset.horizontal_crop - 1
trsfm = transforms.Compose([
RandomImage(dataset.split_files, self.horizontal_crop),
#Superimpose(self.dataset, dataset.split_files, self.horizontal_crop),
NormalizeLabels(),
ThreeChannel(),
NumpyStackToTensors()
#transforms.RandomCrop(size = (self.vertical_crop, self.horizontal_crop), pad_if_needed=True, padding_mode = 'constant')
])
else:
trsfm = transforms.Compose([
# RandomImage(),
ThreeChannel(),
AxisOrderChange(),
NumpyStackToTensors(),
Crop()
#transforms.ToTensor(),
#transforms.RandomCrop(size = (self.vertical_crop, self.horizontal_crop), pad_if_needed=True, padding_mode = 'constant')
])
dataset.set_transform(trsfm)
else:
self.vertical_crop = 128
self.horizontal_crop = 281
dataset = datasets.DatasetFolder(root = self.data_dir, loader = self.default_loader, transform = trsfm, extensions=('.pickle'))
super().__init__(self.dataset, batch_size, shuffle, validation_split, weighted_sample, num_workers)
# assumes we have used torch.save() or another pickle saver
# on tensor-based spectrogram
def default_loader(self, path):
mel_specgram = torch.load(path)
return mel_specgram.numpy()
class AddChannel(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, t):
sample = t[0]
label = t[1]
new_sample = sample[:, :, None]
return (new_sample, label)
class RandomImage(object):
# Pick a random image from a stack
def __init__(self, split_files, horizontal_crop = None):
self.split_files = split_files
self.horizontal_crop = horizontal_crop
def __call__(self, t):
sample = t[0]
label = t[1]
if self.split_files:
choices = range(sample.shape[0])
choice = np.random.choice(choices)
new_sample = sample[choice]
else:
low = 0
high = sample.shape[-1] - self.horizontal_crop - 1
while high < 0:
sample = np.hstack((sample, sample))
high = sample.shape[-1] - self.horizontal_crop - 1
offset = int(np.random.uniform(low=low, high=high, size = 1))
new_sample = sample[..., offset: offset+self.horizontal_crop]
return (new_sample, label)
class ThreeChannel(object):
# Converts a stack of images to color
def __call__(self, t):
sample = t[0]
label = t[1]
sample = np.stack([sample, sample, sample])
return (sample, label)
class NumpyStackToTensors(object):
def __call__(self, t):
sample = t[0]
label = t[1]
sample = [transforms.ToTensor()(sample[i]) for i in range(sample.shape[0])]
sample = torch.stack(sample)
return (torch.squeeze(sample), label)
class AxisOrderChange(object):
# Torch tensor transform expects:
# HxWxC, from 0 to 255
# Returns CxHxW
def __call__(self, t):
sample = t[0]
label = t[1]
sample = np.moveaxis(sample, 0, -1)
return (sample, label)
class Crop(object):
def __call__(self, t):
sample = t[0]
label = t[1]
return (TF.crop(sample, top = 0, left = 0, height = 128, width = 201), label)
# Assumes one image file
class Superimpose(object):
def __init__(self, dataset, split_files, horizontal_crop = None):
self.dataset = dataset
self.split_files = split_files
self.horizontal_crop = horizontal_crop
def __call__(self, t):
sample = t[0]
label = t[1]
mix_idx = np.random.choice(len(self.dataset))
mixer, mix_label = self.dataset.__getitem__(mix_idx, no_transform = True)
mixer, mix_label = RandomImage(self.split_files, self.horizontal_crop)((sample, mix_label))
w = self.weight()
sample = sample + mixer*w
label = label + mix_label*w
return (sample, label)
def weight(self):
w = np.random.beta(1, 3)
return w
# Assumes one image file
class NormalizeLabels(object):
def __call__(self, t):
sample = t[0]
label = t[1]
sum_of_rows = torch.sum(label)
normalized_labels = label / sum_of_rows
return (sample, normalized_labels)
class MnistDataLoader(BaseDataLoader):
"""
MNIST data loading demo using BaseDataLoader
"""
#def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, weighted_sample = False, num_workers=1, training=True):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# headers required for valid request to cloudflare-protected dataset
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
self.data_dir = data_dir
self.dataset = datasets.MNIST(self.data_dir, train=training, download=True, transform=trsfm)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
| 37.382022
| 172
| 0.622483
| 6,048
| 0.908927
| 0
| 0
| 0
| 0
| 0
| 0
| 1,173
| 0.176285
|
5820628189dcbe4c683064fd6478349ee7f02524
| 5,855
|
py
|
Python
|
stockscanner/model/portfolio/portfolio.py
|
adityazagade/StockScanner
|
4aecf886a8858757e4720b68d0af5ed94f4d371a
|
[
"Apache-2.0"
] | null | null | null |
stockscanner/model/portfolio/portfolio.py
|
adityazagade/StockScanner
|
4aecf886a8858757e4720b68d0af5ed94f4d371a
|
[
"Apache-2.0"
] | null | null | null |
stockscanner/model/portfolio/portfolio.py
|
adityazagade/StockScanner
|
4aecf886a8858757e4720b68d0af5ed94f4d371a
|
[
"Apache-2.0"
] | null | null | null |
from datetime import date
from typing import List
from stockscanner.model.asset.asset_type import AssetType
from stockscanner.model.exceptions.exceptions import AssetNotFoundException
from stockscanner.model.asset.asset import Asset
from stockscanner.model.asset.cash import Cash
from stockscanner.model.asset.debt import Debt
from stockscanner.model.asset.equity import Equity
from stockscanner.model.strategies.strategy import Strategy
class Portfolio:
def __init__(self, name) -> None:
self.name = name
self.description = ""
self.__assets: List[Asset] = list()
self.__change_logs = list()
self.__strategy = None
def get_change_logs(self):
return self.__change_logs
def add_asset(self, a: Asset):
self.__assets.append(a)
def set_description(self, description):
self.description = description
def get_returns(self):
pass
def get_xirr(self):
pass
def total_invested(self):
total_invested = 0
for a in self.__assets:
total_invested += a.get_invested_amount()
return total_invested
def apply_strategy(self, s: Strategy):
self.__strategy = s
def rebalance_by_weights(self, **kwargs):
curr_date: date = kwargs.get("curr_date")
curr_eq_weight = self.get_asset_weight(AssetType.EQUITY, curr_date)
curr_debt_weight = self.get_asset_weight(AssetType.DEBT, curr_date)
curr_gold_weight = self.get_asset_weight(AssetType.GOLD, curr_date)
curr_cash_weight = self.get_asset_weight(AssetType.CASH, curr_date)
val_as_of_today = self.get_value_as_of_date(curr_date)
# change in weight
amount = (kwargs.get("eq_weight", 0) - curr_eq_weight) * val_as_of_today
if amount > 0:
self.get_asset(AssetType.EQUITY).add_by_amount(abs(amount), curr_date)
elif amount < 0:
self.get_asset(AssetType.EQUITY).reduce_by_amount(abs(amount), curr_date)
amount = (kwargs.get("debt_weight", 0) - curr_debt_weight) * val_as_of_today
if amount > 0:
self.get_asset(AssetType.DEBT).add_by_amount(abs(amount), curr_date)
elif amount < 0:
self.get_asset(AssetType.DEBT).reduce_by_amount(abs(amount), curr_date)
amount = (kwargs.get("gold_weight", 0) - curr_gold_weight) * val_as_of_today
if amount > 0:
self.get_asset(AssetType.GOLD).add_by_amount(abs(amount), curr_date)
elif amount < 0:
self.get_asset(AssetType.GOLD).reduce_by_amount(abs(amount), curr_date)
amount = (kwargs.get("cash_weight", 0) - curr_cash_weight) * val_as_of_today
if amount > 0:
self.get_asset(AssetType.CASH).add_by_amount(abs(amount), curr_date)
elif amount < 0:
self.get_asset(AssetType.CASH).reduce_by_amount(abs(amount), curr_date)
message = f"Total Invested: ${self.total_invested()}, " \
f"Current Value: ${self.get_value_as_of_date(curr_date)} \r\n " \
f"eq: {self.get_asset_weight(AssetType.EQUITY, curr_date)} " \
f"debt: {self.get_asset_weight(AssetType.DEBT, curr_date)} " \
f"gold: {self.get_asset_weight(AssetType.GOLD, curr_date)} " \
f"cash: {self.get_asset_weight(AssetType.CASH, curr_date)}"
self.add_rebalance_logs(f"Portfolio rebalanced on {curr_date} \n + ${message}")
def get_asset_weight(self, asset: AssetType, curr_date=None):
for a in self.__assets:
if a.type == asset:
if curr_date:
return a.get_value_as_of_date(curr_date) / self.get_value_as_of_date(curr_date)
else:
return a.get_current_value() / self.get_current_value()
return 0
def get_current_value(self):
sum = 0
for a in self.__assets:
sum += a.get_current_value()
return sum
def add_rebalance_logs(self, message):
self.__change_logs.append(message)
def get_value_as_of_date(self, d: date):
val = 0
for a in self.__assets:
val += a.get_value_as_of_date(d)
return val
def get_asset(self, asset_type: AssetType):
for a in self.__assets:
if a.type == asset_type:
return a
raise AssetNotFoundException()
def __str__(self) -> str:
current_details = f"Total Invested: ${self.total_invested()}, Current Value: ${self.get_current_value()}"
change_logs = '\r\n'.join(map(str, self.get_change_logs()))
trade_book = '\r\n'.join(map(str, self.get_trade_book()))
return f"{current_details} \r\n + {change_logs} \r\n {trade_book}"
def get_trade_book(self) -> list:
return self.get_asset(AssetType.EQUITY).get_trade_book()
def get_strategy(self) -> Strategy:
return self.__strategy
def add_stock(self, **kwargs):
try:
eq = self.get_asset(AssetType.EQUITY)
except AssetNotFoundException:
eq = Equity()
self.__assets.append(eq)
eq.add(**kwargs)
def add_debt(self, **kwargs):
try:
dt = self.get_asset(AssetType.DEBT)
except AssetNotFoundException:
dt = Debt()
self.__assets.append(dt)
dt.add(**kwargs)
def add_cash(self, cash_value):
if cash_value <= 0:
return
try:
cash_asset = self.get_asset(AssetType.CASH)
cash_asset.add_by_amount(cash_value)
except AssetNotFoundException:
cash_asset = Cash(cash_value)
self.__assets.append(cash_asset)
def add_equities_by_amount(self, amount: int, d: date):
eq = self.get_asset(AssetType.EQUITY)
eq.add_by_amount(amount=amount, d=d)
| 37.056962
| 113
| 0.640649
| 5,413
| 0.924509
| 0
| 0
| 0
| 0
| 0
| 0
| 640
| 0.109308
|
5820f326461279dab8c970a64d716534511d2f87
| 2,478
|
py
|
Python
|
python/zdl/error_logger/error_logger/url_rules/report.py
|
wjiec/packages
|
4ccaf8f717265a1f8a9af533f9a998b935efb32a
|
[
"MIT"
] | null | null | null |
python/zdl/error_logger/error_logger/url_rules/report.py
|
wjiec/packages
|
4ccaf8f717265a1f8a9af533f9a998b935efb32a
|
[
"MIT"
] | 1
|
2016-09-15T07:06:15.000Z
|
2016-09-15T07:06:15.000Z
|
python/zdl/error_logger/error_logger/url_rules/report.py
|
wjiec/packages
|
4ccaf8f717265a1f8a9af533f9a998b935efb32a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2017
import json
import time
from error_logger.url_rules import _base_url_rule
# from error_logger.net import sms_notification, email_notification
from error_logger.utils import generic
class Report(_base_url_rule.BaseUrlRule):
__url__ = '/report'
__methods__ = ['POST']
def __init__(self, config, *args, **kwargs):
super(Report, self).__init__(config)
def callback(self):
adapter = self.get_adapter()
source = self.get_url_parameter('source') # type: str
json_data = self.get_body_dict() # type: dict
for error in json_data.get('errors', []):
_level = int(error.pop('level'))
_time = int(error.pop('time', self.get_current_timestamp()))
_module = generic.to_string(error.pop('module'))
_type = generic.to_string(error.pop('type'))
_msg = error.pop('msg')
_other_data = json.dumps(error)
_ip = generic.to_string(self.get_remote_ip())
# TODO. modify this
self._notification(source, _level)
if not _level or not _time or not _module or not _type or not _msg:
return self.jsonify(1, 'report data format invalid, '
'may be loss some fields')
source.replace('\'', '\'\'')
sql = 'INSERT INTO "{source}"'.format(source=source)
with adapter.cursor() as cursor:
sql = cursor.mogrify(
'''
INSERT INTO "{source}"
("level", "time", "module", "type", "msg", "ip", "other_data")
VALUES
(%s, %s, %s, %s, %s, %s, %s)
'''.format(source=source), (_level,
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(_time)),
_module,
_type,
_msg,
_ip,
_other_data
)
)
try:
adapter.execute(sql)
except Exception as e:
print e
return self.jsonify(2, 'insert error data error occurs, may be'
' error data invalid or server error')
else:
return self.jsonify(0, 'success')
def _notification(self, source, error_level):
pass
| 36.441176
| 84
| 0.506053
| 2,249
| 0.907587
| 0
| 0
| 0
| 0
| 0
| 0
| 642
| 0.25908
|
58230301eafe03e15cb587a17b91ac8b8de815f2
| 246
|
py
|
Python
|
cli/commands/update.py
|
gamesbrainiac/cli
|
bba7285607a8644911f720d1ceb1404ab502bf00
|
[
"Apache-2.0"
] | null | null | null |
cli/commands/update.py
|
gamesbrainiac/cli
|
bba7285607a8644911f720d1ceb1404ab502bf00
|
[
"Apache-2.0"
] | null | null | null |
cli/commands/update.py
|
gamesbrainiac/cli
|
bba7285607a8644911f720d1ceb1404ab502bf00
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import click
from .. import cli
@cli.cli.command(hidden=True)
def update():
"""
Look for new version updates to CLI
"""
# TODO create update command
click.echo('Sorry, command not programmed yet.')
| 16.4
| 52
| 0.630081
| 0
| 0
| 0
| 0
| 185
| 0.752033
| 0
| 0
| 138
| 0.560976
|
5823914afc52a344ae37dba70fad832cd069531a
| 2,397
|
py
|
Python
|
test/test_sl/test_model/test_data.py
|
jlaumonier/mlsurvey
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
[
"MIT"
] | null | null | null |
test/test_sl/test_model/test_data.py
|
jlaumonier/mlsurvey
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
[
"MIT"
] | null | null | null |
test/test_sl/test_model/test_data.py
|
jlaumonier/mlsurvey
|
373598d067c7f0930ba13fe8da9756ce26eecbaf
|
[
"MIT"
] | null | null | null |
import unittest
import numpy as np
import pandas as pd
import mlsurvey as mls
class TestData(unittest.TestCase):
def test_to_dict_dict_should_be_set(self):
"""
:test : mlsurvey.model.Data.to_dict()
:condition : x,y, y_pred data are filled.
:main_result : the dictionary generated is the same as expected
"""
x = np.array([[1, 2, 3], [4, 5, 6]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = mls.sl.models.DataPandas(df, df_contains='xyypred')
expected = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
result = data.to_dict()
self.assertDictEqual(expected, result)
def test_from_dict_df_empty(self):
"""
:test : mlsurvey.model.DataPandas.from_dict()
:condition : the input dict is set and an empty dataframe is given.
:main_result : a ModelError occurs
"""
df = pd.DataFrame(data=np.array([]))
d = None
input_dict = {'df_contains': 'xyypred',
'y_col_name': 'target',
'y_pred_col_name': 'target_pred'}
try:
d = mls.sl.models.DataPandas.from_dict(input_dict, df)
self.assertTrue(False)
except mls.exceptions.ModelError:
self.assertIsNone(d)
self.assertTrue(True)
def test_from_dict_dict_empty(self):
"""
:test : mlsurvey.model.Data.from_dict()
:condition : the input dict does not contains all keys and an full dataframe is given
:main_result : a ModelError occurs
"""
x = np.array([[1, 2], [3, 4]])
y = np.array([0, 1])
y_pred = np.array([1, 0])
data_array = np.concatenate((x, np.array([y]).T, np.array([y_pred]).T), axis=1)
df = pd.DataFrame(data=data_array)
data = None
input_dict = {'df_contains': 'xyypred',
'y_pred_col_name': 'target_pred'}
try:
data = mls.sl.models.DataPandas.from_dict(input_dict, df)
self.assertTrue(False)
except mls.exceptions.ModelError:
self.assertIsNone(data)
self.assertTrue(True)
| 35.776119
| 93
| 0.570296
| 2,314
| 0.965373
| 0
| 0
| 0
| 0
| 0
| 0
| 776
| 0.323738
|
582469a40acf21b2f0921b0060688c700c098a03
| 1,126
|
py
|
Python
|
baidu_verify_response.py
|
CodingDogzxg/Verifycode_ocr
|
6f1bdac2137993695cb4591afd1b47931680b204
|
[
"MIT"
] | null | null | null |
baidu_verify_response.py
|
CodingDogzxg/Verifycode_ocr
|
6f1bdac2137993695cb4591afd1b47931680b204
|
[
"MIT"
] | null | null | null |
baidu_verify_response.py
|
CodingDogzxg/Verifycode_ocr
|
6f1bdac2137993695cb4591afd1b47931680b204
|
[
"MIT"
] | null | null | null |
# encoding:utf-8
import requests
import base64
import time
'''
通用文字识别
'''
request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic"
access_token = '' # 百度AI的token access 详情请去看文档
request_url = request_url + "?access_token=" + access_token
headers = {'content-type': 'application/x-www-form-urlencoded'}
for file_index in range(10000):
file_name = 'vcode_imgs/' + str(file_index) + '.png'
f_obj = open(file_name, 'rb')
img = base64.b64encode(f_obj.read())
f_obj.close()
params = {"image": img}
response = requests.post(request_url, data=params, headers=headers)
if response:
answer = response.content.decode().split(",")[-1].split("\"")[-2].replace(' ', '').lower()
if len(answer) < 5:
with open('baidu_ocr_verify_response.json', 'a') as f:
f.write('{}:{}\n'.format(str(file_index) + '.png', answer))
else:
with open('baidu_ocr_verify_response.json', 'a') as f:
f.write('{}:{}\n'.format(str(file_index) + '.png', '识别失败'))
print('对文件{}.png的识别失败 请手动核对'.format(file_index))
time.sleep(0.2)
| 35.1875
| 98
| 0.619005
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 416
| 0.348993
|
5824ba4bea2f64074dbcd56d9e462c95a3407e0f
| 11,478
|
py
|
Python
|
nets/efficientdet_training.py
|
BikesSaver/efficientdet-pytorch
|
c1e02484733cf2080ecb2ee57c184038a77a09e8
|
[
"MIT"
] | 1
|
2020-09-17T00:51:38.000Z
|
2020-09-17T00:51:38.000Z
|
nets/efficientdet_training.py
|
BikesSaver/efficientdet-pytorch
|
c1e02484733cf2080ecb2ee57c184038a77a09e8
|
[
"MIT"
] | null | null | null |
nets/efficientdet_training.py
|
BikesSaver/efficientdet-pytorch
|
c1e02484733cf2080ecb2ee57c184038a77a09e8
|
[
"MIT"
] | null | null | null |
from random import shuffle
import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
import cv2
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from PIL import Image
from .RepulsionLoss.my_repulsion_loss import repulsion
def preprocess_input(image):
image /= 255
mean=(0.406, 0.456, 0.485)
std=(0.225, 0.224, 0.229)
image -= mean
image /= std
return image
def calc_iou(a, b):
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 0])
ih = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 1])
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih
ua = torch.clamp(ua, min=1e-8)
intersection = iw * ih
IoU = intersection / ua
return IoU
def get_target(anchor, bbox_annotation, classification, cuda):
IoU = calc_iou(anchor[:, :], bbox_annotation[:, :4])
IoU_max, IoU_argmax = torch.max(IoU, dim=1)
# compute the loss for classification
targets = torch.ones_like(classification) * -1
if cuda:
targets = targets.cuda()
targets[torch.lt(IoU_max, 0.4), :] = 0
positive_indices = torch.ge(IoU_max, 0.5)
num_positive_anchors = positive_indices.sum()
assigned_annotations = bbox_annotation[IoU_argmax, :]
targets[positive_indices, :] = 0
targets[positive_indices, assigned_annotations[positive_indices, 4].long()] = 1
return targets, num_positive_anchors, positive_indices, assigned_annotations
def encode_bbox(assigned_annotations, positive_indices, anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y):
assigned_annotations = assigned_annotations[positive_indices, :]
anchor_widths_pi = anchor_widths[positive_indices]
anchor_heights_pi = anchor_heights[positive_indices]
anchor_ctr_x_pi = anchor_ctr_x[positive_indices]
anchor_ctr_y_pi = anchor_ctr_y[positive_indices]
gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]
gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]
gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths
gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights
# efficientdet style
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi
targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi
targets_dw = torch.log(gt_widths / anchor_widths_pi)
targets_dh = torch.log(gt_heights / anchor_heights_pi)
targets = torch.stack((targets_dy, targets_dx, targets_dh, targets_dw))
targets = targets.t()
return targets
class FocalLoss(nn.Module):
def __init__(self):
super(FocalLoss, self).__init__()
def forward(self, classifications, regressions, anchors, annotations, alpha=0.25, gamma=2.0, cuda=True):
# 设置
dtype = regressions.dtype
batch_size = classifications.shape[0]
classification_losses = []
regression_losses = []
repulsion_losses = []
# 获得先验框,将先验框转换成中心宽高的形势
anchor = anchors[0, :, :].to(dtype)
# 转换成中心,宽高的形式
anchor_widths = anchor[:, 3] - anchor[:, 1]
anchor_heights = anchor[:, 2] - anchor[:, 0]
anchor_ctr_x = anchor[:, 1] + 0.5 * anchor_widths
anchor_ctr_y = anchor[:, 0] + 0.5 * anchor_heights
rep_target = []
rep_regres = []
for j in range(batch_size):
# 取出真实框
bbox_annotation = annotations[j]
# 获得每张图片的分类结果和回归预测结果
classification = classifications[j, :, :]
regression = regressions[j, :, :]
# 平滑标签
classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
if len(bbox_annotation) == 0:
alpha_factor = torch.ones_like(classification) * alpha
if cuda:
alpha_factor = alpha_factor.cuda()
alpha_factor = 1. - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(torch.log(1.0 - classification))
cls_loss = focal_weight * bce
if cuda:
regression_losses.append(torch.tensor(0).to(dtype).cuda())
repulsion_losses.append(torch.tensor(0).to(dtype).cuda())
else:
regression_losses.append(torch.tensor(0).to(dtype))
repulsion_losses.append(torch.tensor(0).to(dtype))
classification_losses.append(cls_loss.sum())
continue
# 获得目标预测结果
targets, num_positive_anchors, positive_indices, assigned_annotations = get_target(anchor, bbox_annotation, classification, cuda)
rep_target.append(bbox_annotation[:, 0:4])
rep_regres.append(anchor[positive_indices,:])
alpha_factor = torch.ones_like(targets) * alpha
if cuda:
alpha_factor = alpha_factor.cuda()
alpha_factor = torch.where(torch.eq(targets, 1.), alpha_factor, 1. - alpha_factor)
focal_weight = torch.where(torch.eq(targets, 1.), 1. - classification, classification)
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(targets * torch.log(classification) + (1.0 - targets) * torch.log(1.0 - classification))
cls_loss = focal_weight * bce
zeros = torch.zeros_like(cls_loss)
if cuda:
zeros = zeros.cuda()
cls_loss = torch.where(torch.ne(targets, -1.0), cls_loss, zeros)
classification_losses.append(cls_loss.sum() / torch.clamp(num_positive_anchors.to(dtype), min=1.0)) # cross_entropy ??
# smoooth_l1 & repulsion_loss
if positive_indices.sum() > 0:
targets = encode_bbox(assigned_annotations, positive_indices, anchor_widths, anchor_heights, anchor_ctr_x, anchor_ctr_y)
# print("Targets:", targets)n * 4
regression_diff = torch.abs(targets - regression[positive_indices, :]) # -?
# smoooth_l1
L1delta = 1.0 #0.5
regression_loss = torch.where(
torch.le(regression_diff, L1delta),
0.5 * torch.pow(regression_diff, 2),
L1delta * regression_diff - 0.5 * L1delta ** 2
)
regression_losses.append(regression_loss.sum())
else:
if cuda:
regression_losses.append(torch.tensor(0).to(dtype).cuda())
repulsion_losses.append(torch.tensor(0).to(dtype).cuda())
else:
regression_losses.append(torch.tensor(0).to(dtype))
repulsion_losses.append(torch.tensor(0).to(dtype))
c_loss = torch.stack(classification_losses).mean()
r_loss = torch.stack(regression_losses).mean()
# Repulsion
# rep_target = torch.tensor(rep_target, dtype=torch.float16)
# rep_regres = torch.tensor(rep_regres, dtype=torch.float16)
loss_RepGT = repulsion(rep_target, rep_regres) # anchor
repu_loss = loss_RepGT.mean() # nan problem
loss = c_loss + r_loss #+ repu_loss
return loss, c_loss, r_loss, repu_loss
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
class Generator(object):
def __init__(self,batch_size,
train_lines, image_size,
):
self.batch_size = batch_size
self.train_lines = train_lines
self.train_batches = len(train_lines)
self.image_size = image_size
def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5):
'''r实时数据增强的随机预处理'''
line = annotation_line.split()
image = Image.open(line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:,:, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255
# correct boxes
box_data = np.zeros((len(box),5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
box_data = np.zeros((len(box),5))
box_data[:len(box)] = box
if len(box) == 0:
return image_data, []
if (box_data[:,:4]>0).any():
return image_data, box_data
else:
return image_data, []
def generate(self):
while True:
shuffle(self.train_lines)
lines = self.train_lines
inputs = []
targets = []
n = len(lines)
for i in range(len(lines)):
img,y = self.get_random_data(lines[i], self.image_size[0:2])
i = (i+1) % n
if len(y)!=0:
boxes = np.array(y[:,:4],dtype=np.float32)
y = np.concatenate([boxes,y[:,-1:]],axis=-1)
img = np.array(img,dtype = np.float32)
y = np.array(y,dtype = np.float32)
inputs.append(np.transpose(preprocess_input(img),(2,0,1)))
targets.append(y)
if len(targets) == self.batch_size:
tmp_inp = np.array(inputs)
tmp_targets = np.array(targets)
inputs = []
targets = []
yield tmp_inp, tmp_targets
| 37.756579
| 141
| 0.560202
| 8,712
| 0.748454
| 1,010
| 0.08677
| 0
| 0
| 0
| 0
| 690
| 0.059278
|
58252e686b16a8b93824251a6782b7d24afd2761
| 267
|
py
|
Python
|
project/wsgi.py
|
devluci/django-rest-base-boilerplate
|
0cf512e00aca66ebf9908351527d701cd421ccd4
|
[
"MIT"
] | null | null | null |
project/wsgi.py
|
devluci/django-rest-base-boilerplate
|
0cf512e00aca66ebf9908351527d701cd421ccd4
|
[
"MIT"
] | null | null | null |
project/wsgi.py
|
devluci/django-rest-base-boilerplate
|
0cf512e00aca66ebf9908351527d701cd421ccd4
|
[
"MIT"
] | null | null | null |
import os
from django.core.wsgi import get_wsgi_application
from rest_base.utils import dotenv
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'project.settings')
dotenv.load(os.path.join(os.path.dirname(__file__), '../.env'))
application = get_wsgi_application()
| 26.7
| 67
| 0.797753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.191011
|
5825efbd85281c5ef1426be58d4c0871b10dcdf9
| 3,445
|
py
|
Python
|
tests/test_coco_dataset.py
|
petersiemen/CVND---Image-Captioning-Project
|
53d15c5f2b9d5e04b007f4f8b1e4f9dd17425c06
|
[
"MIT"
] | null | null | null |
tests/test_coco_dataset.py
|
petersiemen/CVND---Image-Captioning-Project
|
53d15c5f2b9d5e04b007f4f8b1e4f9dd17425c06
|
[
"MIT"
] | null | null | null |
tests/test_coco_dataset.py
|
petersiemen/CVND---Image-Captioning-Project
|
53d15c5f2b9d5e04b007f4f8b1e4f9dd17425c06
|
[
"MIT"
] | null | null | null |
from .context import CoCoDataset
import os
from torchvision import transforms
import torch.utils.data as data
from src.data_loader import get_loader
from context import COCO_SMALL
from context import clean_sentence
def test_coco_dataset():
transform_train = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
mode = "train"
batch_size = 3
vocab_threshold = 5
vocab_file = '../vocab.pkl'
start_word = "<start>"
end_word = "<end>"
unk_word = "<unk>"
vocab_from_file = False
cocoapi_loc = COCO_SMALL
img_folder = os.path.join(cocoapi_loc, 'cocoapi/images/val2014/')
annotations_file = os.path.join(cocoapi_loc, 'cocoapi/annotations/captions_val2014.json')
dataset = CoCoDataset(transform=transform_train,
mode=mode,
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_file=vocab_file,
start_word=start_word,
end_word=end_word,
unk_word=unk_word,
annotations_file=annotations_file,
vocab_from_file=vocab_from_file,
img_folder=img_folder)
# data loader for COCO dataset.
data_loader = data.DataLoader(dataset=dataset,
num_workers=4
)
images, captions = next(iter(data_loader))
print(images.shape)
print(captions.shape)
def test_data_loader():
# Define a transform to pre-process the training images.
transform_train = transforms.Compose([
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225))])
vocab_threshold = 5
# Specify the batch size.
batch_size = 10
# Obtain the data loader.
data_loader = get_loader(transform=transform_train,
mode='train',
batch_size=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=False,
cocoapi_loc=COCO_SMALL # uncomment for running on local
)
print('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))
images, captions = next(iter(data_loader))
print('images.shape:', images.shape)
print('captions.shape:', captions.shape)
print(captions)
print(data_loader.dataset.vocab.idx2word)
for caption in captions:
sentence = clean_sentence(caption, data_loader)
print(caption)
print(sentence)
| 39.147727
| 93
| 0.608128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 753
| 0.218578
|
5828ffc478a57b5d3a54d1d5409d86dcb72100d1
| 5,019
|
py
|
Python
|
test/retro-fuse-test.py
|
jaylogue/retro-fuse
|
b300865c1aa4c38930adea66de364f182c73b3b5
|
[
"Apache-2.0"
] | 28
|
2021-02-23T06:00:16.000Z
|
2022-02-28T13:38:48.000Z
|
test/retro-fuse-test.py
|
jaylogue/retro-fuse
|
b300865c1aa4c38930adea66de364f182c73b3b5
|
[
"Apache-2.0"
] | 3
|
2021-09-22T12:37:59.000Z
|
2022-02-01T00:33:25.000Z
|
test/retro-fuse-test.py
|
jaylogue/retro-fuse
|
b300865c1aa4c38930adea66de364f182c73b3b5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2021 Jay Logue
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file A test driver for testing retro-fuse filesystem handlers.
#
import os
import sys
import unittest
import argparse
scriptName = os.path.basename(__file__)
scriptDirName = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
class TestResult(unittest.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(TestResult, self).__init__(stream, descriptions, verbosity)
self.stream = stream
def getDescription(self, test):
return test.shortDescription()
def startTest(self, test):
super(TestResult, self).startTest(test)
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TestResult, self).addSuccess(test)
self.stream.writeln("PASS")
def addError(self, test, err):
super(TestResult, self).addError(test, err)
self.stream.writeln("ERROR")
def addFailure(self, test, err):
super(TestResult, self).addFailure(test, err)
self.stream.writeln("FAIL")
def addSkip(self, test, reason):
super(TestResult, self).addSkip(test, reason)
self.stream.writeln("skipped {0!r}".format(reason))
def addExpectedFailure(self, test, err):
super(TestResult, self).addExpectedFailure(test, err)
self.stream.writeln("expected failure")
def addUnexpectedSuccess(self, test):
super(TestResult, self).addUnexpectedSuccess(test)
self.stream.writeln("unexpected success")
def printErrors(self):
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln("%s" % err)
# Parse command line arguments
argParser = argparse.ArgumentParser()
argParser.add_argument('-s', '--simh', dest='simhCmd', default='pdp11',
help='Path to pdp11 simh executable')
argParser.add_argument('-v', '--verbose', dest='verbosity', action='store_const', const=2, default=1,
help='Verbose output')
argParser.add_argument('-q', '--quiet', dest='verbosity', action='store_const', const=0,
help='Quiet output')
argParser.add_argument('-f', '--failfast', dest='failfast', action='store_true', default=False,
help='Stop on first test failure')
argParser.add_argument('-k', '--keep', dest='keepFS', action='store_true', default=False,
help='Retain the test filesystem on exit')
argParser.add_argument('-i', '--fs-image', dest='fsImage',
help='Use specified file/device as backing store for test filesystem (implies -k)')
argParser.add_argument('fsHandler', help='Filesystem handler executable to be tested')
testOpts = argParser.parse_args()
if testOpts.fsImage is not None:
testOpts.keepFS = True
# Verify access to filesystem handler executable
if not os.access(testOpts.fsHandler, os.F_OK):
print(f'{scriptName}: File not found: {testOpts.fsHandler}', file=sys.stderr)
sys.exit(1)
if not os.access(testOpts.fsHandler, os.X_OK):
print(f'{scriptName}: Unable to execute filesystem handler: {testOpts.fsHandler}', file=sys.stderr)
sys.exit(1)
# Load the appropriate test cases
fsHandlerBaseName = os.path.basename(testOpts.fsHandler)
if fsHandlerBaseName == 'bsd29fs':
import BSD29Tests
testSuite = unittest.TestLoader().loadTestsFromModule(BSD29Tests)
elif fsHandlerBaseName == 'v7fs':
import V7Tests
testSuite = unittest.TestLoader().loadTestsFromModule(V7Tests)
elif fsHandlerBaseName == 'v6fs':
import V6Tests
testSuite = unittest.TestLoader().loadTestsFromModule(V6Tests)
else:
print(f'{scriptName}: Unknown filesystem handler: {testOpts.fsHandler}', file=sys.stderr)
print('Expected a file named v6fs, v7fs or bsd29fs', file=sys.stderr)
sys.exit(1)
# Run the tests
if testOpts.verbosity > 0:
resultStream = sys.stderr
else:
resultStream = open(os.devnull, 'a')
testRunner = unittest.TextTestRunner(stream=resultStream, resultclass=TestResult, verbosity=testOpts.verbosity, failfast=testOpts.failfast)
result = testRunner.run(testSuite)
sys.exit(0 if result.wasSuccessful() else 1)
| 38.312977
| 139
| 0.695557
| 1,667
| 0.332138
| 0
| 0
| 0
| 0
| 0
| 0
| 1,606
| 0.319984
|
582a2d15de4e22e6a4241b45670672383e57c857
| 387
|
py
|
Python
|
docker/app.py
|
dramasamy/kubernetes_training
|
a5f48d540b7b6e9a79b5ab60f62a13a792f1b0e5
|
[
"Apache-2.0"
] | 1
|
2022-03-22T22:31:32.000Z
|
2022-03-22T22:31:32.000Z
|
docker/app.py
|
dramasamy/training
|
af7b9352b56c10aaa957062f24f1302a7a4c5797
|
[
"Apache-2.0"
] | null | null | null |
docker/app.py
|
dramasamy/training
|
af7b9352b56c10aaa957062f24f1302a7a4c5797
|
[
"Apache-2.0"
] | null | null | null |
#! /bin/python
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
return "Hello World! - v1"
@app.route('/<name>')
def hello_name(name):
return "Hello {}! - v1".format(name)
@app.route('/audio')
def audio():
return "Audio - v1"
@app.route('/video')
def video():
return "Video - v1"
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 13.821429
| 40
| 0.596899
| 0
| 0
| 0
| 0
| 257
| 0.664083
| 0
| 0
| 120
| 0.310078
|
582b2e616da4b6c095b0fcc22d4f757b4b8fddc7
| 4,374
|
py
|
Python
|
creme/cluster/k_means.py
|
tweakyllama/creme
|
6bb8e34789947a943e7e6a8a1af1341e4c1de144
|
[
"BSD-3-Clause"
] | null | null | null |
creme/cluster/k_means.py
|
tweakyllama/creme
|
6bb8e34789947a943e7e6a8a1af1341e4c1de144
|
[
"BSD-3-Clause"
] | null | null | null |
creme/cluster/k_means.py
|
tweakyllama/creme
|
6bb8e34789947a943e7e6a8a1af1341e4c1de144
|
[
"BSD-3-Clause"
] | 2
|
2021-06-20T09:29:38.000Z
|
2021-06-23T07:47:21.000Z
|
import collections
import numpy as np
from sklearn import utils
from .. import base
__all__ = ['KMeans']
def euclidean_distance(a, b):
return sum((a.get(k, 0) - b.get(k, 0)) ** 2 for k in set([*a.keys(), *b.keys()]))
class KMeans(base.Clusterer):
"""Incremental k-means.
The most common way to implement batch k-means is to use Lloyd's algorithm, which consists in
assigning all the data points to a set of cluster centers and then moving the centers
accordingly. This requires multiple passes over the data and thus isn't applicable in a
streaming setting.
In this implementation we start by finding the cluster that is closest to the current
observation. We then move the cluster's central position towards the new observation. The
``halflife`` parameter determines by how much to move the cluster toward the new observation.
You will get better results if you scale your data appropriately.
Parameters:
n_clusters (int): Maximum number of clusters to assign.
halflife (float): Amount by which to move the cluster centers, a reasonable value if
between 0 and 1.
mu (float): Mean of the normal distribution used to instantiate cluster positions.
sigma (float): Standard deviation of the normal distribution used to instantiate cluster
positions.
distance (callable): Metric used to compute distances between an observation and a cluster.
random_state (int, RandomState instance or None, default=None): If int, ``random_state`` is
the seed used by the random number generator; if ``RandomState`` instance,
``random_state`` is the random number generator; if ``None``, the random number
generator is the ``RandomState`` instance used by ``np.random``.
Attributes:
centers (dict): Central positions of each cluster.
Example:
In the following example the cluster assignments are exactly the same as when using
``sklearn``'s batch implementation. However changing the ``halflife`` parameter will
produce different outputs.
::
>>> from creme import cluster
>>> from creme import compat
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> k_means = cluster.KMeans(n_clusters=2, halflife=0.4, sigma=3, random_state=42)
>>> k_means = compat.SKLClustererWrapper(k_means)
>>> k_means = k_means.fit(X)
>>> k_means.predict(X)
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> k_means.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
References:
1. `Sequential k-Means Clustering <http://www.cs.princeton.edu/courses/archive/fall08/cos436/Duda/C/sk_means.htm>`_
"""
def __init__(self, n_clusters, halflife=0.5, mu=0, sigma=1, distance=euclidean_distance,
random_state=None):
self.n_clusters = n_clusters
self.halflife = halflife
self.mu = mu
self.sigma = sigma
self.distance = distance
self.random_state = utils.check_random_state(random_state)
self.centers = {
i: collections.defaultdict(self.random_normal)
for i in range(n_clusters)
}
def random_normal(self):
"""Returns a random value sampled from a normal distribution."""
return self.random_state.normal(self.mu, self.sigma)
@property
def cluster_centers_(self):
"""Returns the cluster centers in the same format as scikit-learn."""
return np.array([
list(coords.values())
for coords in self.centers.values()
])
def fit_predict_one(self, x, y=None):
"""Equivalent to ``k_means.fit_one(x).predict_one(x)``, but faster."""
# Find the cluster with the closest center
closest = self.predict_one(x)
# Move the cluster's center
for i, xi in x.items():
self.centers[closest][i] += self.halflife * (xi - self.centers[closest][i])
return closest
def fit_one(self, x, y=None):
self.fit_predict_one(x)
return self
def predict_one(self, x):
return min(self.centers, key=lambda c: self.distance(x, self.centers[c]))
| 37.384615
| 123
| 0.633973
| 4,144
| 0.947417
| 0
| 0
| 238
| 0.054412
| 0
| 0
| 2,907
| 0.664609
|
582ee3ae3eed760c8ee30d3cb820c5796139122b
| 42,165
|
py
|
Python
|
fasttrips/TAZ.py
|
pedrocamargo/fast-trips
|
a2549936b2707b00d6c21b4e6ae4be8fefd0aa46
|
[
"Apache-2.0"
] | 3
|
2017-11-03T00:18:23.000Z
|
2020-11-30T18:54:46.000Z
|
fasttrips/TAZ.py
|
pedrocamargo/fast-trips
|
a2549936b2707b00d6c21b4e6ae4be8fefd0aa46
|
[
"Apache-2.0"
] | null | null | null |
fasttrips/TAZ.py
|
pedrocamargo/fast-trips
|
a2549936b2707b00d6c21b4e6ae4be8fefd0aa46
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import os
import numpy as np
import pandas as pd
from .Error import NetworkInputError
from .Logger import FastTripsLogger
from .Route import Route
from .Stop import Stop
from .Transfer import Transfer
class TAZ(object):
"""
TAZ class.
One instance represents all of the Transportation Analysis Zones
as well as their access links and egress links.
.. todo:: This is really about the access and egress links; perhaps it should be renamed?
Stores access link information in :py:attr:`TAZ.walk_access`, and :py:attr:`TAZ.drive_access`,
both instances of :py:class:`pandas.DataFrame`.
"""
#: File with fasttrips walk access information.
#: See `walk_access specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/walk_access_ft.md>`_.
INPUT_WALK_ACCESS_FILE = "walk_access_ft.txt"
#: Walk access links column name: TAZ Identifier. String.
WALK_ACCESS_COLUMN_TAZ = 'taz'
#: Walk access links column name: Stop Identifier. String.
WALK_ACCESS_COLUMN_STOP = 'stop_id'
#: Walk access links column name: Direction (access or egress)
WALK_ACCESS_COLUMN_DIRECTION = "direction"
#: Walk access links column name: Walk Distance
WALK_ACCESS_COLUMN_DIST = 'dist'
#: fasttrips Walk access links column name: Elevation Gain, feet gained along link.
WALK_ACCESS_COLUMN_ELEVATION_GAIN = 'elevation_gain'
#: fasttrips Walk access links column name: Population Density, people per square mile. Float.
WALK_ACCESS_COLUMN_POPULATION_DENSITY = 'population_density'
#: fasttrips Walk access links column name: Employment Density, employees per square mile. Float.
WALK_ACCESS_COLUMN_EMPLOYMENT_DENSITY = 'employment_density'
#: fasttrips Walk access links column name: Retail Density, employees per square mile. Float.
# WALK_ACCESS_COLUMN_RETAIL_DENSITY = 'retail_density'
#: fasttrips Walk access links column name: Employment Density, employees per square mile. Float.
WALK_ACCESS_COLUMN_EMPLOYMENT_DENSITY = 'employment_density'
#: fasttrips Walk access links column name: Auto Capacity, vehicles per hour per mile. Float.
WALK_ACCESS_COLUMN_AUTO_CAPACITY = 'auto_capacity'
#: fasttrips Walk access links column name: Indirectness, ratio of Manhattan distance to crow-fly distance. Float.
WALK_ACCESS_COLUMN_INDIRECTNESS = 'indirectness'
# ========== Added by fasttrips =======================================================
#: Walk access links column name: TAZ Numerical Identifier. Int.
WALK_ACCESS_COLUMN_TAZ_NUM = 'taz_num'
#: Walk access links column name: Stop Numerical Identifier. Int.
WALK_ACCESS_COLUMN_STOP_NUM = 'stop_id_num'
#: Walk access links column name: Link walk time. This is a TimeDelta
WALK_ACCESS_COLUMN_TIME = 'time'
#: Walk access links column name: Link walk time in minutes. This is float.
WALK_ACCESS_COLUMN_TIME_MIN = 'time_min'
#: Walk acess cost column name: Link generic cost for accessing stop from TAZ. Float.
WALK_ACCESS_COLUMN_ACC_COST = 'access_cost'
#: Walk acess cost column name: Link generic cost for egressing to TAZ from stop. Float.
WALK_ACCESS_COLUMN_EGR_COST = 'egress_cost'
#: Walk access links column name: Supply mode. String.
WALK_ACCESS_COLUMN_SUPPLY_MODE = 'supply_mode'
#: Walk access links column name: Supply mode number. Int.
WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM = 'supply_mode_num'
#: File with fasttrips drive access information.
#: See `drive_access specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/drive_access_ft.md>`_.
INPUT_DRIVE_ACCESS_FILE = "drive_access_ft.txt"
#: Drive access links column name: TAZ Identifier. String.
DRIVE_ACCESS_COLUMN_TAZ = WALK_ACCESS_COLUMN_TAZ
#: Drive access links column name: Stop Identifier. String.
DRIVE_ACCESS_COLUMN_LOT_ID = 'lot_id'
#: Drive access links column name: Direction ('access' or 'egress')
DRIVE_ACCESS_COLUMN_DIRECTION = 'direction'
#: Drive access links column name: Drive distance
DRIVE_ACCESS_COLUMN_DISTANCE = 'dist'
#: Drive access links column name: Drive cost in cents (integer)
DRIVE_ACCESS_COLUMN_COST = 'cost'
#: Drive access links column name: Driving time in minutes between TAZ and lot (TimeDelta)
DRIVE_ACCESS_COLUMN_TRAVEL_TIME = 'travel_time'
#: Drive access links column name: Start time (e.g. time period these attributes apply), minutes after midnight
DRIVE_ACCESS_COLUMN_START_TIME_MIN = 'start_time_min'
#: Drive access links column name: Start time (e.g. time period these attributes apply). A DateTime instance
DRIVE_ACCESS_COLUMN_START_TIME = 'start_time'
#: Drive access links column name: End time (e.g. time period these attributes apply), minutes after midnight
DRIVE_ACCESS_COLUMN_END_TIME_MIN = 'end_time_min'
#: Drive access links column name: End time (e.g. time period these attributes apply). A DateTime instance
DRIVE_ACCESS_COLUMN_END_TIME = 'end_time'
#: fasttrips Drive access links column name: Elevation Gain, feet gained along link.
DRIVE_ACCESS_COLUMN_ELEVATION_GAIN = 'elevation_gain'
#: fasttrips Drive access links column name: Population Density, people per square mile. Float.
DRIVE_ACCESS_COLUMN_POPULATION_DENSITY = 'population_density'
#: fasttrips Drive access links column name: Retail Density, employees per square mile. Float.
DRIVE_ACCESS_COLUMN_RETAIL_DENSITY = 'retail_density'
#: fasttrips Drive access links column name: Auto Capacity, vehicles per hour per mile. Float.
DRIVE_ACCESS_COLUMN_AUTO_CAPACITY = 'auto_capacity'
#: fasttrips Drive access links column name: Indirectness, ratio of Manhattan distance to crow-fly distance. Float.
DRIVE_ACCESS_COLUMN_INDIRECTNESS = 'indirectness'
# ========== Added by fasttrips =======================================================
#: fasttrips These are the original attributes but renamed to be clear they are the drive component (as opposed to the walk)
DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE = 'drive_dist'
DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME = 'drive_travel_time'
#: Drive access links column name: Driving time in minutes between TAZ and lot (float)
DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME_MIN = 'drive_time_min'
#: fasttrips Drive access links column name: TAZ Numerical Identifier. Int.
DRIVE_ACCESS_COLUMN_TAZ_NUM = WALK_ACCESS_COLUMN_TAZ_NUM
#: fasttrips Drive access links column name: Stop Numerical Identifier. Int.
DRIVE_ACCESS_COLUMN_STOP = WALK_ACCESS_COLUMN_STOP
#: fasttrips Drive access links column name: Stop Numerical Identifier. Int.
DRIVE_ACCESS_COLUMN_STOP_NUM = WALK_ACCESS_COLUMN_STOP_NUM
#: fasttrips Drive access links column name: Walk distance from lot to transit. Miles. Float.
DRIVE_ACCESS_COLUMN_WALK_DISTANCE = 'walk_dist'
#: fasttrips Drive access links column name: Walk time from lot to transit. TimeDelta.
DRIVE_ACCESS_COLUMN_WALK_TIME = 'walk_time'
#: fasttrips Drive access links column name: Walk time from lot to transit. Int.
DRIVE_ACCESS_COLUMN_WALK_TIME_MIN = 'walk_time_min'
#: fasttrips Drive access links column name: Supply mode. String.
DRIVE_ACCESS_COLUMN_SUPPLY_MODE = WALK_ACCESS_COLUMN_SUPPLY_MODE
#: Drive access links column name: Supply mode number. Int.
DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM = WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM
#: File with fasttrips drive access points information.
#: See `Drive access points specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/drive_access_points_ft.md>`_.
INPUT_DAP_FILE = 'drive_access_points_ft.txt'
#: fasttrips DAP column name: Lot ID. String.
DAP_COLUMN_LOT_ID = DRIVE_ACCESS_COLUMN_LOT_ID
#: fasttrips DAP column name: Lot Latitude (WGS 84)
DAP_COLUMN_LOT_LATITUDE = 'lot_lat'
#: fasttrips DAP column name: Lot Longitude (WGS 84)
DAP_COLUMN_LOT_LONGITUDE = 'lot_lon'
#: fasttrips DAP column name: Name of the Lot. String.
DAP_COLUMN_NAME = 'name'
#: fasttrips DAP column name: Drop-Off. Boolean.
DAP_COLUMN_DROP_OFF = 'drop_off'
#: fasttrips DAP column name: Capacity (number of parking spaces)
DAP_COLUMN_CAPACITY = 'capacity'
#: fasttrips DAP column name: Hourly Cost in cents. Integer.
DAP_COLUMN_HOURLY_COST = 'hourly_cost'
#: fasttrips DAP column name: Maximum Daily Cost in cents. Integer.
DAP_COLUMN_MAXIMUM_COST = 'max_cost'
#: fasttrips DAP column name: Type
DAP_COLUMN_TYPE = 'type'
#: mode column
MODE_COLUMN_MODE = 'mode'
#: mode number
MODE_COLUMN_MODE_NUM = 'mode_num'
#: access and egress modes. First is default.
ACCESS_EGRESS_MODES = ["walk", "bike_own", "bike_share", "PNR", "KNR"]
#: Access mode: Walk
MODE_ACCESS_WALK = 101
#: Access mode: Bike (own)
MODE_ACCESS_BIKE_OWN = 102
#: Access mode: Bike (share)
MODE_ACCESS_BIKE_SHARE = 103
#: Access mode: Drive to PNR
MODE_ACCESS_PNR = 104
#: Access mode: Drive to KNR
MODE_ACCESS_KNR = 105
#: Egress mode: Walk
MODE_EGRESS_WALK = 201
#: Egress mode: Bike (own)
MODE_EGRESS_BIKE_OWN = 202
#: Egress mode: Bike (share)
MODE_EGRESS_BIKE_SHARE = 203
#: Egress mode: Drive to PNR
MODE_EGRESS_PNR = 204
#: Egress mode: Drive to KNR
MODE_EGRESS_KNR = 205
#: Access mode number list, in order of ACCESS_EGRESS_MODES
ACCESS_MODE_NUMS = [MODE_ACCESS_WALK,
MODE_ACCESS_BIKE_OWN, MODE_ACCESS_BIKE_SHARE,
MODE_ACCESS_PNR, MODE_ACCESS_KNR]
#: Egress mode number list, in order of ACCESS_EGRESS_MODES
EGRESS_MODE_NUMS = [MODE_EGRESS_WALK,
MODE_EGRESS_BIKE_OWN, MODE_EGRESS_BIKE_SHARE,
MODE_EGRESS_PNR, MODE_EGRESS_KNR]
#: Walk mode number list
WALK_MODE_NUMS = [MODE_ACCESS_WALK,
MODE_EGRESS_WALK]
#: Bike mode number list
BIKE_MODE_NUMS = [MODE_ACCESS_BIKE_OWN, MODE_ACCESS_BIKE_SHARE,
MODE_EGRESS_BIKE_OWN, MODE_EGRESS_BIKE_SHARE]
#: Drive mode number list
DRIVE_MODE_NUMS = [MODE_ACCESS_PNR, MODE_ACCESS_KNR,
MODE_EGRESS_PNR, MODE_EGRESS_KNR]
#: File with access/egress links for C++ extension
#: It's easier to pass it via a file rather than through the
#: initialize_fasttrips_extension() because of the strings involved, I think.
OUTPUT_ACCESS_EGRESS_FILE = "ft_intermediate_access_egress.txt"
def __init__(self, output_dir, gtfs, today, stops, transfers, routes):
"""
Constructor. Reads the TAZ data from the input files in *input_archive*.
"""
from .Assignment import Assignment
self.access_modes_df = pd.DataFrame(data={TAZ.MODE_COLUMN_MODE: TAZ.ACCESS_EGRESS_MODES,
TAZ.MODE_COLUMN_MODE_NUM: TAZ.ACCESS_MODE_NUMS})
self.access_modes_df[TAZ.MODE_COLUMN_MODE] = self.access_modes_df[TAZ.MODE_COLUMN_MODE] \
.apply(lambda x: '%s_%s' % (x, Route.MODE_TYPE_ACCESS))
self.egress_modes_df = pd.DataFrame(data={TAZ.MODE_COLUMN_MODE: TAZ.ACCESS_EGRESS_MODES,
TAZ.MODE_COLUMN_MODE_NUM: TAZ.EGRESS_MODE_NUMS})
self.egress_modes_df[TAZ.MODE_COLUMN_MODE] = self.egress_modes_df[TAZ.MODE_COLUMN_MODE] \
.apply(lambda x: '%s_%s' % (x, Route.MODE_TYPE_EGRESS))
routes.add_access_egress_modes(self.access_modes_df, self.egress_modes_df)
#: Walk access links table. Make sure TAZ ID and stop ID are read as strings.
self.walk_access_df = gtfs.get(TAZ.INPUT_WALK_ACCESS_FILE)
# verify required columns are present
walk_access_cols = list(self.walk_access_df.columns.values)
assert (TAZ.WALK_ACCESS_COLUMN_TAZ in walk_access_cols)
assert (TAZ.WALK_ACCESS_COLUMN_STOP in walk_access_cols)
assert (TAZ.WALK_ACCESS_COLUMN_DIRECTION in walk_access_cols)
assert (TAZ.WALK_ACCESS_COLUMN_DIST in walk_access_cols)
# printing this before setting index
FastTripsLogger.debug("=========== WALK ACCESS ===========\n" + str(self.walk_access_df.head()))
FastTripsLogger.debug("As read\n" + str(self.walk_access_df.dtypes))
# Verify direction is valid
invalid_direction = self.walk_access_df.loc[
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_DIRECTION].isin(["access", "egress"]) == False]
if len(invalid_direction) > 0:
error_msg = "Invalid direction in walk access links: \n%s" % str(invalid_direction)
FastTripsLogger.fatal(error_msg)
raise NetworkInputError(TAZ.INPUT_WALK_ACCESS_FILE, error_msg)
# TODO: remove? Or put walk speed some place?
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_TIME_MIN] = self.walk_access_df[
TAZ.WALK_ACCESS_COLUMN_DIST] * 60.0 / 2.7;
# convert time column from float to timedelta
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_TIME] = \
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_TIME_MIN].map(lambda x: datetime.timedelta(minutes=x))
# make sure WALK_ACCESS_COLUMN_TAZ/WALK_ACCESS_COLUMN_DIST is unique
walk_access_dupes = self.walk_access_df.duplicated(subset=[TAZ.WALK_ACCESS_COLUMN_TAZ,
TAZ.WALK_ACCESS_COLUMN_STOP,
TAZ.WALK_ACCESS_COLUMN_DIRECTION], keep=False)
if walk_access_dupes.sum() > 0:
self.walk_access_df["duplicates"] = walk_access_dupes
error_msg = "Duplicate taz/stop pairs in walk access links: \n%s" % str(
self.walk_access_df.loc[self.walk_access_df["duplicates"]])
FastTripsLogger.fatal(error_msg)
raise NetworkInputError(TAZ.INPUT_WALK_ACCESS_FILE, error_msg)
FastTripsLogger.debug("Final\n" + str(self.walk_access_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.walk_access_df), "walk access", TAZ.INPUT_WALK_ACCESS_FILE))
self.dap_df = gtfs.get(TAZ.INPUT_DAP_FILE)
if not self.dap_df.empty:
# verify required columns are present
dap_cols = list(self.dap_df.columns.values)
assert (TAZ.DAP_COLUMN_LOT_ID in dap_cols)
assert (TAZ.DAP_COLUMN_LOT_LATITUDE in dap_cols)
assert (TAZ.DAP_COLUMN_LOT_LONGITUDE in dap_cols)
# default capacity = 0
if TAZ.DAP_COLUMN_CAPACITY not in dap_cols:
self.dap_df[TAZ.DAP_COLUMN_CAPACITY] = 0
# default drop-off = True
if TAZ.DAP_COLUMN_DROP_OFF not in dap_cols:
self.dap_df[TAZ.DAP_COLUMN_DROP_OFF] = True
else:
self.dap_df = pd.DataFrame()
FastTripsLogger.debug("=========== DAPS ===========\n" + str(self.dap_df.head()))
FastTripsLogger.debug("\n" + str(self.dap_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.dap_df), "DAPs", TAZ.INPUT_DAP_FILE))
#: Drive access links table. Make sure TAZ ID and lot ID are read as strings.
self.drive_access_df = gtfs.get(TAZ.INPUT_DRIVE_ACCESS_FILE)
if not self.drive_access_df.empty:
# verify required columns are present
drive_access_cols = list(self.drive_access_df.columns.values)
assert (TAZ.DRIVE_ACCESS_COLUMN_TAZ in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_LOT_ID in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_DIRECTION in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_DISTANCE in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_COST in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_TRAVEL_TIME in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_START_TIME in drive_access_cols)
assert (TAZ.DRIVE_ACCESS_COLUMN_END_TIME in drive_access_cols)
# printing this before setting index
FastTripsLogger.debug("=========== DRIVE ACCESS ===========\n" + str(self.drive_access_df.head()))
FastTripsLogger.debug("As read\n" + str(self.drive_access_df.dtypes)) # Rename dist to drive_dist
# the distance and times here are for DRIVING
self.drive_access_df.rename(
columns={TAZ.DRIVE_ACCESS_COLUMN_DISTANCE: TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE,
TAZ.DRIVE_ACCESS_COLUMN_TRAVEL_TIME: TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME},
inplace=True)
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME_MIN] = \
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME]
# if there are any that go past midnight, duplicate
sim_day_end = Assignment.NETWORK_BUILD_DATE_START_TIME + datetime.timedelta(days=1)
dupes = self.drive_access_df.loc[self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_END_TIME] > sim_day_end,
:].copy()
if len(dupes) > 0:
# e.g. 18:00 - 27:00
# dupe: 00:00 - 3:00
dupes.loc[dupes[
TAZ.DRIVE_ACCESS_COLUMN_END_TIME] > sim_day_end, TAZ.DRIVE_ACCESS_COLUMN_START_TIME] = Assignment.NETWORK_BUILD_DATE_START_TIME
dupes.loc[dupes[TAZ.DRIVE_ACCESS_COLUMN_END_TIME] > sim_day_end, TAZ.DRIVE_ACCESS_COLUMN_END_TIME] = \
dupes[TAZ.DRIVE_ACCESS_COLUMN_END_TIME] - datetime.timedelta(days=1)
# orig: 18:00 - 24:00
self.drive_access_df.loc[self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_END_TIME] > sim_day_end, TAZ.DRIVE_ACCESS_COLUMN_END_TIME] = sim_day_end
FastTripsLogger.debug(
"Added %d morning hour drive access links. Head:\n%s" % (len(dupes), dupes.head().to_string()))
# combine
self.drive_access_df = self.drive_access_df.append(dupes)
# drive access period start/end time: float version
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN] = \
(self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_START_TIME] - Assignment.NETWORK_BUILD_DATE_START_TIME) / np.timedelta64(1,
'm')
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN] = \
(self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_END_TIME] - Assignment.NETWORK_BUILD_DATE_START_TIME) / np.timedelta64(1,
'm')
# convert time column from number to timedelta
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME] = \
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME_MIN].map(
lambda x: datetime.timedelta(minutes=float(x)))
# need PNRs and KNRs - get them from the dap
knr_dap_df = self.dap_df.loc[self.dap_df[TAZ.DAP_COLUMN_DROP_OFF] == True].copy()
pnr_dap_df = self.dap_df.loc[self.dap_df[TAZ.DAP_COLUMN_CAPACITY] > 0].copy()
knr_dap_df['dap_type'] = 'KNR'
pnr_dap_df['dap_type'] = 'PNR'
self.drive_access_df = pd.merge(left=self.drive_access_df,
right=pd.concat([knr_dap_df, pnr_dap_df], axis=0),
on=TAZ.DRIVE_ACCESS_COLUMN_LOT_ID,
how='left')
# look for required column being null
lots_not_found = self.drive_access_df.loc[pd.isnull(self.drive_access_df[TAZ.DAP_COLUMN_LOT_LATITUDE])]
if len(lots_not_found) > 0:
error_msg = "Found %d drive access links in %s with lots not specified in %s" % \
(len(lots_not_found), TAZ.INPUT_DRIVE_ACCESS_FILE, TAZ.INPUT_DAP_FILE)
FastTripsLogger.fatal(error_msg)
FastTripsLogger.fatal("\nFirst five drive access links with lots not found:\n%s" % \
str(lots_not_found.head().to_string()))
raise NetworkInputError(TAZ.INPUT_DAP_FILE, error_msg)
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE] = \
self.drive_access_df['dap_type'] + '_' + \
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DIRECTION]
# done with this
self.drive_access_df.drop(['dap_type'], axis=1, inplace=True)
# We're going to join this with stops to get drive-to-stop
drive_access = self.drive_access_df.loc[self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DIRECTION] == 'access']
drive_egress = self.drive_access_df.loc[self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DIRECTION] == 'egress']
# join with transfers to go from taz -> lot -> stop
drive_access = pd.merge(left=drive_access,
right=transfers.transfers_df,
left_on=TAZ.DRIVE_ACCESS_COLUMN_LOT_ID,
right_on=Transfer.TRANSFERS_COLUMN_FROM_STOP,
how='left')
drive_access[TAZ.DRIVE_ACCESS_COLUMN_STOP] = drive_access[Transfer.TRANSFERS_COLUMN_TO_STOP]
# join with transfers to go from stop -> lot -> taz
drive_egress = pd.merge(left=drive_egress,
right=transfers.transfers_df,
left_on=TAZ.DRIVE_ACCESS_COLUMN_LOT_ID,
right_on=Transfer.TRANSFERS_COLUMN_TO_STOP,
how='left')
drive_egress[TAZ.DRIVE_ACCESS_COLUMN_STOP] = drive_egress[Transfer.TRANSFERS_COLUMN_FROM_STOP]
self.drive_access_df = pd.concat([drive_access, drive_egress], axis=0)
# drop redundant columns
# TODO: assuming min_transfer_type and transfer_type from GTFS aren't relevant here, since
# the time and dist are what matter.
# Assuming schedule_precedence doesn't make sense in the drive access/egress context
self.drive_access_df.drop([Transfer.TRANSFERS_COLUMN_FROM_STOP,
Transfer.TRANSFERS_COLUMN_TO_STOP,
Transfer.TRANSFERS_COLUMN_TRANSFER_TYPE,
Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME,
Transfer.TRANSFERS_COLUMN_SCHEDULE_PRECEDENCE,
Transfer.TRANSFERS_COLUMN_PENALTY], axis=1, inplace=True)
# not relevant for drive access
if Transfer.TRANSFERS_COLUMN_FROM_ROUTE in list(self.drive_access_df.columns.values):
self.drive_access_df.drop([Transfer.TRANSFERS_COLUMN_FROM_ROUTE], axis=1, inplace=True)
if Transfer.TRANSFERS_COLUMN_TO_ROUTE in list(self.drive_access_df.columns.values):
self.drive_access_df.drop([Transfer.TRANSFERS_COLUMN_TO_ROUTE], axis=1, inplace=True)
if Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME_MIN in list(self.drive_access_df.columns.values):
self.drive_access_df.drop([Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME_MIN], axis=1, inplace=True)
# some may have no lot to stop connections -- check for null stop ids
null_stop_ids = self.drive_access_df.loc[pd.isnull(self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_STOP])]
if len(null_stop_ids) > 0:
FastTripsLogger.warn("Dropping %d drive links that don't connect to stops:\n%s" % (
len(null_stop_ids), str(null_stop_ids)))
# drop them
self.drive_access_df = self.drive_access_df.loc[
pd.notnull(self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_STOP])]
# rename walk attributes to be clear
self.drive_access_df.rename(
columns={
Transfer.TRANSFERS_COLUMN_DISTANCE: TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE,
Transfer.TRANSFERS_COLUMN_TIME: TAZ.DRIVE_ACCESS_COLUMN_WALK_TIME,
Transfer.TRANSFERS_COLUMN_TIME_MIN: TAZ.DRIVE_ACCESS_COLUMN_WALK_TIME_MIN},
inplace=True)
# add generic distance and time
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DISTANCE] = self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE] + \
self.drive_access_df[
TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE]
self.drive_access_df["time_min"] = self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_WALK_TIME_MIN] + \
self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME_MIN]
FastTripsLogger.debug("Final (%d) types:\n%s\nhead:\n%s" % (
len(self.drive_access_df), str(self.drive_access_df.dtypes), str(self.drive_access_df.head())))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.drive_access_df), "drive access", TAZ.INPUT_DRIVE_ACCESS_FILE))
self.has_drive_access = True
else:
self.has_drive_access = False
self.drive_access_df = pd.DataFrame(columns=[TAZ.DRIVE_ACCESS_COLUMN_TAZ, TAZ.DRIVE_ACCESS_COLUMN_LOT_ID])
FastTripsLogger.debug("=========== NO DRIVE ACCESS ===========\n")
# add DAPs IDs and TAZ IDs to stop ID list
stops.add_daps_tazs_to_stops(self.drive_access_df[[TAZ.DRIVE_ACCESS_COLUMN_LOT_ID]],
TAZ.DRIVE_ACCESS_COLUMN_LOT_ID,
pd.concat([self.walk_access_df[[TAZ.WALK_ACCESS_COLUMN_TAZ]],
self.drive_access_df[[TAZ.DRIVE_ACCESS_COLUMN_TAZ]]], axis=0),
TAZ.WALK_ACCESS_COLUMN_TAZ)
# transfers can add stop numeric IDs now that DAPs are available
transfers.add_numeric_stop_id(stops)
# Add numeric stop ID to walk access links
self.walk_access_df = stops.add_numeric_stop_id(self.walk_access_df,
id_colname=TAZ.WALK_ACCESS_COLUMN_STOP,
numeric_newcolname=TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
warn=True,
warn_msg="Numeric stop id not found for walk access links")
# Add TAZ stop ID to walk and drive access links
self.walk_access_df = stops.add_numeric_stop_id(self.walk_access_df,
id_colname=TAZ.WALK_ACCESS_COLUMN_TAZ,
numeric_newcolname=TAZ.WALK_ACCESS_COLUMN_TAZ_NUM)
# These have direction now. Set supply mode string
self.walk_access_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE] = "walk_" + self.walk_access_df[
TAZ.WALK_ACCESS_COLUMN_DIRECTION]
self.walk_access_df = routes.add_numeric_mode_id(self.walk_access_df,
id_colname=TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE,
numeric_newcolname=TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM)
if self.has_drive_access:
print(self.drive_access_df.loc[self.drive_access_df[TAZ.DRIVE_ACCESS_COLUMN_STOP] == "9065"])
self.drive_access_df = stops.add_numeric_stop_id(self.drive_access_df,
id_colname=TAZ.DRIVE_ACCESS_COLUMN_STOP,
numeric_newcolname=TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
warn=True,
warn_msg="Drive access stops missing ids")
self.drive_access_df = stops.add_numeric_stop_id(self.drive_access_df,
id_colname=TAZ.DRIVE_ACCESS_COLUMN_TAZ,
numeric_newcolname=TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM)
self.drive_access_df = routes.add_numeric_mode_id(self.drive_access_df,
id_colname=TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE,
numeric_newcolname=TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM)
# warn on stops that have no walk access
self.warn_on_stops_without_walk_access(stops)
# write this to communicate to extension
self.write_access_egress_for_extension(output_dir)
def add_distance(self, links_df, dist_col):
"""
Sets distance column value for access and egress links.
.. todo:: This neglects the start_time/end_time issue. Don't use without fixing.
"""
############## walk ##############
walk_dists = self.walk_access_df[[TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM,
TAZ.WALK_ACCESS_COLUMN_DIST]].copy()
walk_dists.rename(columns={TAZ.WALK_ACCESS_COLUMN_DIST: "walk_dist"}, inplace=True)
# walk access
links_df = pd.merge(left=links_df,
left_on=["A_id_num", "B_id_num", "mode_num"],
right=walk_dists,
right_on=[TAZ.WALK_ACCESS_COLUMN_TAZ_NUM, TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM],
how="left")
links_df.loc[pd.notnull(links_df["walk_dist"]), dist_col] = links_df["walk_dist"]
links_df.drop([TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM,
"walk_dist"], axis=1, inplace=True)
# walk egress
links_df = pd.merge(left=links_df,
left_on=["A_id_num", "B_id_num", "mode_num"],
right=walk_dists,
right_on=[TAZ.WALK_ACCESS_COLUMN_STOP_NUM, TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM],
how="left")
links_df.loc[pd.notnull(links_df["walk_dist"]), dist_col] = links_df["walk_dist"]
links_df.drop([TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM,
"walk_dist"], axis=1, inplace=True)
############## drive ##############
FastTripsLogger.debug("drive_access_df=\n%s" % self.drive_access_df.head())
if len(self.drive_access_df) > 0:
drive_dists = self.drive_access_df[[TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM,
TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE,
TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE,
TAZ.DRIVE_ACCESS_COLUMN_START_TIME,
TAZ.DRIVE_ACCESS_COLUMN_END_TIME]].copy()
drive_dists["drive_total_dist"] = drive_dists[TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE] + drive_dists[
TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE]
drive_dists.drop([TAZ.DRIVE_ACCESS_COLUMN_DRIVE_DISTANCE, TAZ.DRIVE_ACCESS_COLUMN_WALK_DISTANCE], axis=1,
inplace=True)
# drive access
links_df = pd.merge(left=links_df,
left_on=["A_id_num", "B_id_num", "mode_num"],
right=drive_dists,
right_on=[TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM, TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM],
how="left")
# TODO: drop those with drive access links covering different times
links_df.loc[pd.notnull(links_df["drive_total_dist"]), dist_col] = links_df["drive_total_dist"]
links_df.drop([TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM,
"drive_total_dist"], axis=1, inplace=True)
# drive egress
links_df = pd.merge(left=links_df,
left_on=["A_id_num", "B_id_num", "mode_num"],
right=drive_dists,
right_on=[TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM, TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM],
how="left")
links_df.loc[pd.notnull(links_df["drive_total_dist"]), dist_col] = links_df["drive_total_dist"]
links_df.drop([TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM,
"drive_total_dist"], axis=1, inplace=True)
FastTripsLogger.debug("links_df=\n%s" % links_df.head(30).to_string())
return links_df
def warn_on_stops_without_walk_access(self, stops):
"""
Do any stops lack *any* walk access?
"""
# FastTripsLogger.debug("warn_on_stops_without_walk_access: \n%s", stops.stops_df.head() )
# FastTripsLogger.debug("warn_on_stops_without_walk_access: \n%s", self.walk_access_df.head() )
# join stops to walk access
no_access_stops = pd.merge(left=stops.stops_df[[Stop.STOPS_COLUMN_STOP_ID]],
right=self.walk_access_df[[TAZ.WALK_ACCESS_COLUMN_STOP, TAZ.WALK_ACCESS_COLUMN_TAZ]],
how="left")
no_access_stops = no_access_stops.loc[pd.isnull(no_access_stops[TAZ.WALK_ACCESS_COLUMN_TAZ])]
if len(no_access_stops) > 0:
FastTripsLogger.warn("The following %d stop ids have no walk access: \n%s" % (
len(no_access_stops), no_access_stops.to_string()))
def write_access_egress_for_extension(self, output_dir):
"""
Write the access and egress links to a single output file for the C++ extension to read.
It's in this form because I'm not sure how to pass the strings to C++ in
Assignment.initialize_fasttrips_extension so I know that's inconsistent, but it's a
time sink to investigate, so I'll leave this for now
.. todo:: clean this up? Rename intermediate files (they're not really output)
"""
# ========== Walk access/egres =================================================
# print "walk_access columns"
# for col in list(self.walk_access_df.columns): print " %s" % col
# start with all walk columns
self.walk_df = self.walk_access_df.copy()
# drop the redundant columns
drop_fields = [TAZ.WALK_ACCESS_COLUMN_TAZ, # use numerical version
TAZ.WALK_ACCESS_COLUMN_STOP, # use numerical version
TAZ.WALK_ACCESS_COLUMN_DIRECTION, # it's in the supply mode num
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE, # use numerical version
TAZ.WALK_ACCESS_COLUMN_TIME, # use numerical version
]
# we can only drop fields that are in the dataframe
walk_fields = list(self.walk_df.columns.values)
valid_drop_fields = []
for field in drop_fields:
if field in walk_fields: valid_drop_fields.append(field)
self.walk_df.drop(valid_drop_fields, axis=1, inplace=True)
# make walk access valid all times -- need this for consistency
self.walk_df[TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN] = 0.0
self.walk_df[TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN] = 60.0 * 24.0
# the index is TAZ num, supply mode num, and stop num
self.walk_df.set_index([TAZ.WALK_ACCESS_COLUMN_TAZ_NUM,
TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM,
TAZ.WALK_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN,
TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN], inplace=True)
# ========== Drive access/egres =================================================
self.drive_df = self.drive_access_df.copy()
# print "drive_access columns"
# for col in list(self.drive_access_df.columns): print " %s" % col
# TEMP
drive_fields = list(self.drive_df.columns.values)
# drop some of the attributes
drop_fields = [TAZ.DRIVE_ACCESS_COLUMN_TAZ, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_STOP, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_DRIVE_TRAVEL_TIME, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_START_TIME, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_END_TIME, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_WALK_TIME, # use numerical version
TAZ.DRIVE_ACCESS_COLUMN_DIRECTION, # redundant with supply mode
TAZ.DAP_COLUMN_DROP_OFF, # redundant with supply mode
TAZ.DAP_COLUMN_LOT_LATITUDE, # probably not useful
TAZ.DAP_COLUMN_LOT_LONGITUDE, # probably not useful
TAZ.DRIVE_ACCESS_COLUMN_LOT_ID, # probably not useful
]
valid_drop_fields = []
for field in drop_fields:
if field in drive_fields: valid_drop_fields.append(field)
self.drive_df.drop(valid_drop_fields, axis=1, inplace=True)
# the index is TAZ num, supply mode num, and stop num
if len(self.drive_df) > 0:
self.drive_df.set_index([TAZ.DRIVE_ACCESS_COLUMN_TAZ_NUM,
TAZ.DRIVE_ACCESS_COLUMN_SUPPLY_MODE_NUM,
TAZ.DRIVE_ACCESS_COLUMN_STOP_NUM,
TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN,
TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN], inplace=True)
# stack() this will make it so beyond taz num, supply mode num, and stop num
# the remaining columns collapse to variable name, variable value
# put walk and drive together
access_df = pd.concat([self.walk_df.stack(), self.drive_df.stack()], axis=0).to_frame()
else:
access_df = self.walk_df.stack().to_frame()
access_df.reset_index(inplace=True)
# rename from these default column names
access_df.rename(columns={"level_3": "attr_name", 0: "attr_value"}, inplace=True)
# make attr_value a float instead of an object
access_df["attr_value"] = access_df["attr_value"].astype(float)
FastTripsLogger.debug("\n" + str(access_df.head()))
FastTripsLogger.debug("\n" + str(access_df.tail()))
# Check for null stop ids
null_stop_ids = access_df.loc[pd.isnull(access_df["stop_id_num"])]
if len(null_stop_ids) > 0:
FastTripsLogger.warn("write_access_egress_for_extension null_stop_ids:\n%s" % str(null_stop_ids))
# for now, drop rows with null stop id nums
access_df = access_df.loc[pd.notnull(access_df["stop_id_num"])]
access_df["stop_id_num"] = access_df["stop_id_num"].astype(int)
access_df.to_csv(os.path.join(output_dir, TAZ.OUTPUT_ACCESS_EGRESS_FILE),
sep=" ", index=False)
FastTripsLogger.debug("Wrote %s" % os.path.join(output_dir, TAZ.OUTPUT_ACCESS_EGRESS_FILE))
| 57.681259
| 157
| 0.628958
| 41,181
| 0.976663
| 0
| 0
| 0
| 0
| 0
| 0
| 13,236
| 0.31391
|
58309191f39ca5397068401c1360251a2a11c48a
| 2,686
|
py
|
Python
|
tests/test_stardist2D.py
|
ianbgroves/stardist
|
6524c27d01c625dabfd75b1443dd46ccb1cb3dcd
|
[
"BSD-3-Clause"
] | 1
|
2021-02-05T11:59:39.000Z
|
2021-02-05T11:59:39.000Z
|
tests/test_stardist2D.py
|
ianbgroves/stardist
|
6524c27d01c625dabfd75b1443dd46ccb1cb3dcd
|
[
"BSD-3-Clause"
] | 1
|
2020-06-17T09:06:29.000Z
|
2020-06-17T09:06:29.000Z
|
tests/test_stardist2D.py
|
ianbgroves/stardist
|
6524c27d01c625dabfd75b1443dd46ccb1cb3dcd
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from stardist import star_dist, relabel_image_stardist
import pytest
from utils import random_image, real_image2d, check_similar, circle_image
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
@pytest.mark.parametrize('n_rays', (4, 16, 32))
def test_types(img, n_rays):
mode = "cpp"
gt = star_dist(img, n_rays=n_rays, mode=mode)
for dtype in (np.int8, np.int16, np.int32,
np.uint8, np.uint16, np.uint32):
x = star_dist(img.astype(dtype), n_rays=n_rays, mode=mode)
print("test_stardist2D (mode {mode}) for shape {img.shape} and type {dtype}".format(
mode=mode, img=img, dtype=dtype))
check_similar(gt, x)
@pytest.mark.gpu
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
@pytest.mark.parametrize('n_rays', (4, 16, 32))
def test_types_gpu(img, n_rays):
mode = "opencl"
gt = star_dist(img, n_rays=n_rays, mode=mode)
for dtype in (np.int8, np.int16, np.int32,
np.uint8, np.uint16, np.uint32):
x = star_dist(img.astype(dtype), n_rays=n_rays, mode=mode)
print("test_stardist2D with mode {mode} for shape {img.shape} and type {dtype}".format(
mode=mode, img=img, dtype=dtype))
check_similar(gt, x)
@pytest.mark.gpu
@pytest.mark.parametrize('img', (real_image2d()[1], random_image((128, 123))))
@pytest.mark.parametrize('n_rays', (4, 16, 32))
def test_cpu_gpu(img, n_rays):
s_cpp = star_dist(img, n_rays=n_rays, mode="cpp")
s_ocl = star_dist(img, n_rays=n_rays, mode="opencl")
check_similar(s_cpp, s_ocl)
@pytest.mark.parametrize('n_rays', (32,64))
@pytest.mark.parametrize('eps', ((1,1),(.4,1.3)))
def test_relabel_consistency(n_rays, eps, plot = False):
""" test whether an already star-convex label image gets perfectly relabeld"""
# img = random_image((128, 123))
lbl1 = circle_image(shape=(32,32), radius=8, eps = eps)
lbl1 = relabel_image_stardist(lbl1, n_rays)
lbl2 = relabel_image_stardist(lbl1, n_rays)
rel_error = 1-np.count_nonzero(np.bitwise_and(lbl1>0, lbl2>0))/np.count_nonzero(lbl1>0)
print(rel_error)
assert rel_error<1e-1
if plot:
import matplotlib.pyplot as plt
plt.figure(num=1, figsize=(8,4))
plt.subplot(1,3,1);plt.imshow(lbl1);plt.title("GT")
plt.subplot(1,3,2);plt.imshow(lbl2);plt.title("Reco")
plt.subplot(1,3,3);plt.imshow(1*(lbl1>0)+2*(lbl2>0));plt.title("Overlay")
plt.tight_layout()
plt.show()
return lbl1, lbl2
if __name__ == '__main__':
lbl1, lbl2 = test_relabel_consistency(32,eps = (.7,1), plot = True)
| 36.794521
| 95
| 0.655249
| 0
| 0
| 0
| 0
| 2,403
| 0.894639
| 0
| 0
| 360
| 0.134028
|
583228f93313973cc02c96e9d032138aeb10b053
| 26,395
|
py
|
Python
|
all_call/infer_new.py
|
jbudis/dante
|
90177c33825d5f9ce3fba5463092fbcf20b72fe2
|
[
"Apache-2.0"
] | 4
|
2018-09-28T14:50:47.000Z
|
2021-08-09T12:46:12.000Z
|
all_call/infer_new.py
|
jbudis/dante
|
90177c33825d5f9ce3fba5463092fbcf20b72fe2
|
[
"Apache-2.0"
] | 6
|
2019-01-02T13:08:31.000Z
|
2021-03-25T21:45:40.000Z
|
all_call/infer_new.py
|
jbudis/dante
|
90177c33825d5f9ce3fba5463092fbcf20b72fe2
|
[
"Apache-2.0"
] | 1
|
2017-12-12T10:38:26.000Z
|
2017-12-12T10:38:26.000Z
|
import math
import functools
from scipy.stats import binom
import numpy as np
import itertools
import sys
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from copy import copy
def combine_distribs(deletes, inserts):
"""
Combine insert and delete models/distributions
:param deletes: ndarray - delete distribution
:param inserts: ndarray - insert distribution
:return: ndarray - combined array of the same length
"""
# how much to fill?
to_fill = sum(deletes == 0.0) + 1
while to_fill < len(inserts) and inserts[to_fill] > 0.0001:
to_fill += 1
# create the end array
len_del = len(deletes)
end_distr = np.zeros_like(deletes, dtype=float)
# fill it!
for i, a in enumerate(inserts[:to_fill]):
# print i,a,(deletes*a)[:len_del-i]
end_distr[i:] += (deletes * a)[:len_del - i]
# print("end_distr", end_distr[:3], deletes[:3], inserts[:3])
return end_distr
def const_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Constant rate function.
:param n: int - allele number (unused)
:param p1: float - constant parameter
:param p2: float - linear parameter (unused)
:param p3: float - additional parameter (unused)
:return: float - p1
"""
return p1
def linear_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Linear rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - additional parameter (unused)
:return: float - p1 + p2 * n
"""
return p1 + p2 * n
def n2_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Quadratic rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - quadratic parameter
:return: float - p1 + p2 * n + p3 * n * n
"""
return p1 + p2 * n + p3 * n * n
def exp_rate(n, p1=0.0, p2=1.0, p3=1.0):
"""
Exponential rate function.
:param n: int - allele number
:param p1: float - constant parameter
:param p2: float - linear parameter
:param p3: float - exponential parameter
:return: float - p1 + p2 * e^(p3 * n)
"""
return p1 + p2 * math.exp(p3 * n)
def clip(value, minimal, maximal):
"""
Clips value to range <minimal, maximal>
:param value: ? - value
:param minimal: ? - minimal value
:param maximal: ? - maximal value
:return: ? - clipped value
"""
return min(max(minimal, value), maximal)
def model_full(rng, model_params, n, rate_func=linear_rate):
"""
Create binomial model for both deletes and inserts of STRs
:param rng: int - max_range of distribution
:param model_params: 4-tuple - parameters for inserts and deletes
:param n: int - target allele number
:param rate_func: function - rate function for deletes
:return: ndarray - combined distribution
"""
p1, p2, p3, q = model_params
deletes = binom.pmf(np.arange(rng), n, clip(1 - rate_func(n, p1, p2, p3), 0.0, 1.0))
inserts = binom.pmf(np.arange(rng), n, q)
return combine_distribs(deletes, inserts)
def model_template(rng, model_params, rate_func=linear_rate):
"""
Partial function for model creation.
:param rng: int - max_range of distribution
:param model_params: 4-tuple - parameters for inserts and deletes
:param rate_func: function - rate function for deletes
:return: partial function with only 1 parameter - n - target allele number
"""
return functools.partial(model_full, rng, model_params, rate_func=rate_func)
class Inference:
""" Class for inference of alleles. """
MIN_REPETITIONS = 1
# default parameters for inference
DEFAULT_MODEL_PARAMS = (-0.0107736, 0.00244419, 0.0, 0.00440608)
DEFAULT_FIT_FUNCTION = "linear"
def __init__(self, read_distribution, params_file, str_rep=3, minl_primer1=5, minl_primer2=5, minl_str=5, p_bckg_closed=None, p_bckg_open=None, p_expanded=None):
"""
Initialization of the Inference class + setup of all models and their probabilities.
:param read_distribution: ndarray(int) - read distribution
:param params_file: str - filename of parameters
:param str_rep: int - length of the STR
:param minl_primer1: int - minimal length of the left primer
:param minl_primer2: int - minimal length of the right primer
:param minl_str: int - minimal length of the STR
:param p_bckg_closed: float - probability of the background model for closed observation
:param p_bckg_open: float - probability of the background model for open observation
:param p_expanded: float - probability of the expanded model (if None it is equal to other models)
"""
# assign variables
self.str_rep = str_rep
self.minl_primer1 = minl_primer1
self.minl_primer2 = minl_primer2
self.minl_str = minl_str
self.read_distribution = read_distribution
self.sum_reads_log = np.log(np.sum(read_distribution))
self.sum_reads = np.sum(read_distribution)
self.params_file = params_file
self.p_expanded = p_expanded
self.p_bckg_closed = p_bckg_closed
self.p_bckg_open = p_bckg_open
def construct_models(self, min_rep, max_rep, e_model):
"""
Construct all models needed for current inference.
:param min_rep: int - minimal allele to model
:param max_rep: int - maximal allele to model
:param e_model: int - model for expanded alleles
:return: None
"""
# extract params
model_params, rate_func_str = self.read_params(self.params_file)
str_to_func = {"linear": linear_rate, "const": const_rate, "exponential": exp_rate, "square": n2_rate}
rate_func = const_rate
if rate_func_str in str_to_func.keys():
rate_func = str_to_func[rate_func_str]
# save min_rep and max_rep
self.min_rep = min_rep
self.max_rep = max_rep # non-inclusive
self.max_with_e = e_model + 1 # non-inclusive
# get models
mt = model_template(self.max_with_e, model_params, rate_func)
self.background_model = np.concatenate([np.zeros(self.min_rep, dtype=float), np.ones(self.max_with_e - self.min_rep, dtype=float) / float(self.max_with_e - self.min_rep)])
self.expanded_model = mt(self.max_with_e - 1)
self.allele_models = {i: mt(i) for i in range(min_rep, max_rep)}
self.models = {'E': self.expanded_model, 'B': self.background_model}
self.models.update(self.allele_models)
# get model likelihoods
open_to_closed = 10.0
l_others = 1.0
l_bckg_open = 0.01
l_exp = 1.01
l_bckg_model_open = 1.0
if self.p_expanded is None:
self.p_expanded = l_exp
if self.p_bckg_open is None and self.p_bckg_closed is None:
self.p_bckg_open = l_bckg_open
self.p_bckg_closed = self.p_bckg_open / open_to_closed
if self.p_bckg_closed is None:
self.p_bckg_closed = self.p_bckg_open / open_to_closed
if self.p_bckg_open is None:
self.p_bckg_open = self.p_bckg_closed * open_to_closed
self.model_probabilities = {'E': self.p_expanded, 'B': l_bckg_model_open}
self.model_probabilities.update({i: l_others for i in self.allele_models.keys()})
def read_params(self, params_file):
"""
Reads all parameters written with write_params(print_all=True)
:param params_file: str - filename to read parameters from, if None, load default params
:return: 4-tuple, 2-tuple, function - parameters for model, read count drop, and error function for model distributions
"""
if params_file is None:
return self.DEFAULT_MODEL_PARAMS, self.DEFAULT_FIT_FUNCTION
# read 2nd and last line of the file
with open(params_file) as f:
lines = f.readlines()
fit_function = lines[1].strip().split()[1]
split = list(map(float, lines[-1].strip().split()))
if len(split) < 4:
print("ERROR: parameters were not read successfully, using defaults!", file=sys.stderr)
return self.DEFAULT_MODEL_PARAMS, self.DEFAULT_FIT_FUNCTION
# extract parameters from last line of file
model_params = tuple(split[0:4])
return model_params, fit_function
def likelihood_rl(self, rl):
"""
Likelihood of a read with this length.
:param rl: int - read length
:return: float - likelihood of a read this long
"""
# print('rl', self.read_distribution[rl] / float(self.sum_reads))
return self.read_distribution[rl] / float(self.sum_reads)
@staticmethod
def likelihood_model(model, g):
"""
Likelihood of a generated allele al from a model of
:param model: ndarray - model that we evaluate
:param g: int - observed read count
:return: float - likelihood of a read coming from this model
"""
return model[g]
def likelihood_intersection(self, model_i, model_j, g):
return min(model_i[g], model_j[g])
def likelihood_coverage(self, true_length, rl, closed=True):
"""
Likelihood of generating a read with this length and this allele.
:param true_length: int - true number of repetitions of an STR
:param rl: int - read length
:param closed: bool - if the read is closed - i.e. both primers are there
:return: float - likelihood of a read being generated with this attributes
"""
whole_inside_str = max(0, true_length * self.str_rep + self.minl_primer1 + self.minl_primer2 - rl + 1)
# closed_overlapping = max(0, rl - self.minl_primer1 - self.minl_primer2 - true_length * self.str_rep + 1)
open_overlapping = max(0, rl + true_length * self.str_rep - 2 * self.minl_str + 1)
assert open_overlapping > whole_inside_str, '%d open %d whole inside %d %d %d' % (open_overlapping, whole_inside_str, true_length, rl, self.minl_str)
return 1.0 / float(open_overlapping - whole_inside_str)
def likelihood_read_allele(self, model, observed, rl, closed=True):
"""
Likelihood of generation of read with observed allele count and rl.
:param model: ndarray - model for the allele
:param observed: int - observed allele count
:param rl: int - read length
:param closed: bool - if the read is closed - i.e. both primers are there
:return:
"""
if closed:
return self.likelihood_rl(rl) * self.likelihood_model(model, observed) * self.likelihood_coverage(observed, rl, True)
else:
number_of_options = 0
partial_likelihood = 0
for true_length in itertools.chain(range(observed, self.max_rep), [self.max_with_e - 1]):
partial_likelihood += self.likelihood_model(model, true_length) * self.likelihood_coverage(true_length, rl, False)
number_of_options += 1
return self.likelihood_rl(rl) * partial_likelihood / float(number_of_options)
def likelihood_read_intersection(self, model_i, model_j, observed, rl, closed=True):
"""
Likelihood of generation of read with observed allele count and rl.
:param model: ndarray - model for the allele
:param observed: int - observed allele count
:param rl: int - read length
:param closed: bool - if the read is closed - i.e. both primers are there
:return:
"""
if closed:
return self.likelihood_rl(rl) * self.likelihood_intersection(model_i, model_j, observed) * self.likelihood_coverage(observed, rl, True)
else:
number_of_options = 0
partial_likelihood = 0
for true_length in itertools.chain(range(observed, self.max_rep), [self.max_with_e - 1]):
partial_likelihood += self.likelihood_intersection(model_i, model_j, true_length) * self.likelihood_coverage(true_length, rl, False)
number_of_options += 1
return self.likelihood_rl(rl) * partial_likelihood / float(number_of_options)
def likelihood_read(self, observed, rl, model_index1, model_index2, closed=True):
"""
Compute likelihood of generation of a read from either of those models.
:param observed: int - observed allele count
:param rl: int - read length
:param model_index1: char/int - model index for left allele
:param model_index2: char/int - model index for right allele
:param closed: bool - if the read is closed - i.e. both primers are therse
:return: float - likelihood of this read generation
"""
# print('testing', model_index1, model_index2)
model_i = self.models[model_index1]
model_j = self.models[model_index2]
model_prob_i = self.model_probabilities[model_index1]
model_prob_j = self.model_probabilities[model_index2]
# TODO: tuto podla mna nemoze byt len tak +, chyba tam korelacia modelov, ale v ramci zjednodusenia asi ok
allele1_likelihood = model_prob_i * self.likelihood_read_allele(model_i, observed, rl, closed)
allele2_likelihood = model_prob_j * self.likelihood_read_allele(model_j, observed, rl, closed)
p_bckg = self.p_bckg_closed if closed else self.p_bckg_open
bckgrnd_likelihood = p_bckg * self.likelihood_read_allele(self.models['B'], observed, rl, closed)
# alleles_intersection = min(model_prob_j, model_prob_i) * self.likelihood_read_intersection(model_i, model_j, observed, rl, closed)
# if alleles_intersection > 0.0:
# print('%g %g %g %s %s %d' % (alleles_intersection, allele2_likelihood, allele1_likelihood, str(model_index1), str(model_index2), observed))
assert not np.isnan(allele2_likelihood)
assert not np.isnan(allele1_likelihood)
assert not np.isnan(bckgrnd_likelihood)
# assert alleles_intersection <= max(allele1_likelihood, allele2_likelihood), '%g %g %g %s %s %d' % (
# alleles_intersection, allele2_likelihood, allele1_likelihood, str(model_index1), str(model_index2), observed)
# print('read_%s' % (str(closed)), observed, 'all1_lh', allele1_likelihood, 'all2_lh', allele2_likelihood)
return allele1_likelihood + allele2_likelihood + bckgrnd_likelihood # - alleles_intersection
def infer(self, annotations, filt_annotations, index_rep, verbose=True):
"""
Does all of the inference, computes for which 2 combination of alleles are these annotations and parameters the best.
argmax_{G1, G2} P(G1, G2 | AL, COV, RL) ~ P(AL, COV, RL | G1, G2) * P(G1, G2) = prod_{read_i} P(al_i, cov_i, rl_i | G1, G2) * P(G1, G2) =independent G1 G2=
= prod_{read_i} P(al_i, cov_i, rl_i | G1) * P(al_i, cov_i, rl_i | G2) * P(G1) * P(G2) {here G1, G2 is from possible alleles, background, and expanded, priors are from params}
P(al_i, cov_i, rl_i | G1) - 2 options: 1. closed evidence (al_i = X), we know X; 2. open evidence (al_i >= X), cl_i == True if i is closed
1.: P(al_i, cov_i, rl_i, cl_i | G1) = P(rl_i is from read distribution) * p(allele is al_i | G1) * P(read generated closed evidence | rl_i, al_i)
2.: P(rl_i is from r.distr.) * P(allele is >= al_i | G1) * P(read generated open evidence | rl_i, al_i)
:param annotations: iterator(reads) - closed reads (both primers set)
:param filt_annotations: iterator(reads) - open reads (only one primer set)
:param index_rep: int - index of a repetition
:param verbose: bool - print more stuff?
:return: dict(tuple(int, int):float) - directory of model indices to their likelihood
"""
# generate closed observed and read_length arrays
observed_annots = list(map(lambda x: x.module_repetitions[index_rep], annotations))
rl_annots = list(map(lambda x: len(x.read.sequence), annotations))
closed_annots = np.ones_like(observed_annots, dtype=bool)
# generate open observed and read_length arrays
observed_fa = list(map(lambda x: x.module_repetitions[index_rep], filt_annotations))
rl_fa = list(map(lambda x: len(x.read.sequence), filt_annotations))
closed_fa = np.zeros_like(observed_fa, dtype=bool)
# join them and keep the information if they are open or closed
observed_arr = np.concatenate([observed_annots, observed_fa]).astype(int)
rl_arr = np.concatenate([rl_annots, rl_fa]).astype(int)
closed_arr = np.concatenate([closed_annots, closed_fa]).astype(bool)
# generate the boundaries:
overhead = 3
if len(observed_annots) == 0:
max_rep = max(observed_fa) + overhead # non-inclusive
min_rep = max(self.MIN_REPETITIONS, max(observed_fa) - overhead) # inclusive
else:
max_rep = max(observed_annots) + overhead + 1 # non-inclusive
min_rep = max(self.MIN_REPETITIONS, min(observed_annots) - overhead) # inclusive
# expanded allele
e_allele = max_rep
if len(observed_fa) > 0:
e_allele = max(max_rep, max(observed_fa) + 1)
# generate all the models
self.construct_models(min_rep, max_rep, e_allele)
tested_models = []
for model_index1 in range(min_rep, max_rep):
for model_index2 in range(model_index1, max_rep):
tested_models.append((model_index1, model_index2))
tested_models.append((model_index1, 'E'))
# tested_models.append(('B', model_index1))
tested_models.append(('B', 'B'))
tested_models.append(('E', 'E'))
# go through every model and evaluate:
evaluated_models = {}
for m1, m2 in tested_models:
evaluated_models[(m1, m2)] = 0
if verbose:
print('model', m1, m2)
# go through every reads
for obs, rl, closed in zip(observed_arr, rl_arr, closed_arr):
lh = self.likelihood_read(obs, rl, m1, m2, closed=closed)
# TODO weighted sum according to the closeness/openness of reads?
evaluated_models[(m1, m2)] += np.log(lh)
if verbose:
print('model', m1, m2, 'log-likelihood', evaluated_models[(m1, m2)])
return evaluated_models
def print_pcolor(self, lh_dict, display_file, name, lognorm=True):
"""
Get maximum likelihood option and alternatively print it to image file.
:param lh_dict: dict(tuple(int, int):float) - directory of model indices to their likelihood
:param display_file: str - filename for pcolor image output
:param name: str - name to use in title
:param lognorm: bool - use loglog scale in displaying likelihood array
:return: tuple(int, int) - option with highest likelihood
"""
# convert to a numpy array:
lh_array = np.zeros((self.max_rep, self.max_rep + 1))
for (k1, k2), v in lh_dict.items():
if k1 == 'B':
k1 = 0
if k2 == 'B':
k2 = 0
if k1 == 'E':
k1 = 0
if k2 == 'E':
k2 = self.max_rep
lh_array[k1, k2] = v
# print(lh_dict, lh_array)
# get minimal and maximal likelihood
ind_good = (lh_array < 0.0) & (lh_array > -1e10) & (lh_array != np.nan)
if len(lh_array[ind_good]) == 0:
return lh_array, (0, 0)
lh_array[~ind_good] = np.NINF
z_min, z_max = min(lh_array[ind_good]), max(lh_array[ind_good])
max_str = len(lh_array)
# generate image file if specified:
if display_file is not None:
plt.figure()
if lognorm:
lh_view = -np.log(-lh_array)
z_min = -np.log(-z_min)
z_max = -np.log(-z_max)
else:
lh_view = lh_array
# background:
bg_size = max(2, (len(lh_view) - self.min_rep) // 6)
if len(lh_view) - self.min_rep <= 6:
bg_size = 1
lh_view[-bg_size:, self.min_rep:self.min_rep + bg_size] = lh_view[0, 0]
# expanded
lh_view[-bg_size:, self.min_rep + bg_size:self.min_rep + 2 * bg_size] = lh_view[0, self.max_rep]
# plotting
plt.title("%s likelihood of each option for %s" % ("Loglog" if lognorm else "Log", name))
plt.xlabel('2nd allele')
plt.ylabel('1st allele')
start_ticks = 5
step_ticks = 5
plt.xticks(np.concatenate([np.array(range(start_ticks - self.min_rep, max_str - self.min_rep, step_ticks)), [max_str - self.min_rep]]) + 0.5,
list(range(start_ticks, max_str, step_ticks)) + ['E(>%d)' % (self.max_with_e - 2)])
plt.yticks(np.array(range(start_ticks - self.min_rep, max_str - self.min_rep, step_ticks)) + 0.5, range(start_ticks, max_str, step_ticks))
palette = copy(plt.cm.jet)
palette.set_under('gray', 1.0)
plt.pcolor(lh_view[self.min_rep:, self.min_rep:], cmap=palette, vmin=z_min, vmax=z_max)
plt.colorbar()
# draw dividing line:
plt.plot([max_str - self.min_rep, max_str - self.min_rep], [0, max_str - self.min_rep], 'k', linewidth=3)
# background:
plt.text(float(bg_size) / 2.0, max_str - self.min_rep - float(bg_size) / 2.0, 'BG', size=20, horizontalalignment='center',
verticalalignment='center', path_effects=[PathEffects.withStroke(linewidth=2.5, foreground="w")])
# expanded
plt.text(bg_size + float(bg_size) / 2.0, max_str - self.min_rep - float(bg_size) / 2.0, 'Exp', size=20, horizontalalignment='center',
verticalalignment='center', path_effects=[PathEffects.withStroke(linewidth=2.5, foreground="w")])
# save
plt.savefig(display_file + '.pdf')
plt.savefig(display_file + '.png')
plt.close()
# output best option
best = sorted(np.unravel_index(np.argmax(lh_array), lh_array.shape))
# and convert it to symbols
if best[0] == 0 and best[1] == 0:
best_sym = ('B', 'B')
else:
best_sym = list(map(lambda x: 'E' if x == self.max_rep or x == 0 else x, best))
return lh_array, best, best_sym
def get_confidence(self, lh_array, predicted):
"""
Get confidence of a prediction.
:param lh_array: 2D-ndarray - log likelihoods of the prediction
:param predicted: tuple(int, int) - predicted alleles
:return: tuple(float, float, float) - prediction confidence of all, first, and second allele(s)
"""
# get confidence
lh_corr_array = lh_array - np.max(lh_array)
lh_sum = np.sum(np.exp(lh_corr_array))
confidence = np.exp(lh_corr_array[predicted[0], predicted[1]]) / lh_sum
confidence1 = np.sum(np.exp(lh_corr_array[predicted[0], :])) / lh_sum
confidence2 = np.sum(np.exp(lh_corr_array[:, predicted[1]])) / lh_sum
confidence_back = np.exp(lh_corr_array[0, 0]) / lh_sum
confidence_back_all = np.sum(np.exp(lh_corr_array[0, :])) / lh_sum
confidence_exp = np.exp(lh_corr_array[0, self.max_rep]) / lh_sum
confidence_exp_all = np.sum(np.exp(lh_corr_array[:, self.max_rep])) / lh_sum
return confidence, confidence1, confidence2, confidence_back, confidence_back_all, confidence_exp, confidence_exp_all
@staticmethod
def write_output(file_desc, predicted, conf, name):
"""
Write result of one prediction.
:param file_desc: file descriptor - where to write to
:param predicted: tuple(int/char, int/char) - predicted alleles
:param conf: tuple(float, float, float) - confidence of prediction (whole, 1st allele, 2nd allele)
:param name: str/int - name/number of the sample
:return: None
"""
def write_output_fd(f, predicted, conf, name):
print("Predicted alleles for %s: (confidence = %5.1f%%)" % (str(name), conf[0] * 100.0), file=f)
print("\t%3s (confidence = %5.1f%%)" % (str(predicted[0]), conf[1] * 100.0), file=f)
print("\t%3s (confidence = %5.1f%%)" % (str(predicted[1]), conf[2] * 100.0), file=f)
print("B B %7.3f%%" % (conf[3] * 100.0), file=f)
print("all B %7.3f%%" % (conf[4] * 100.0), file=f)
print("B E %7.3f%%" % (conf[5] * 100.0), file=f)
print("all E %7.3f%%" % (conf[6] * 100.0), file=f)
if type(file_desc) is str:
with open(file_desc, 'w') as f:
write_output_fd(f, predicted, conf, name)
else:
write_output_fd(file_desc, predicted, conf, name)
def all_call(self, annotations, filt_annotations, index_rep, file_pcolor, file_output, name):
"""
Run All_call - inference of likelihoods, printing of pcolor and writing output.
:param annotations: list(Annotation) - good (blue) annotations
:param filt_annotations: list(Annotation) - (grey) annotations with one primer
:param index_rep: int - index of a repetition
:param file_pcolor: str - file prefix for a pcolor image
:param file_output: str - file for all_call output
:param name: str - name of the sample
:return: None
"""
# if we do not have any good annotations, then quit
if len(annotations) == 0 and len(filt_annotations) == 0:
# write output
# self.write_output(file_output, ('B', 'B'), (0.0, 0.0, 0.0), name)
return None
# infer likelihoods
lh_dict = self.infer(annotations, filt_annotations, index_rep, verbose=False)
# print pcolor image
lh_array, predicted, predicted_sym = self.print_pcolor(lh_dict, file_pcolor, name)
# get confidence of our prediction
conf = self.get_confidence(lh_array, predicted)
# write output
self.write_output(file_output, predicted_sym, conf, name)
| 44.21273
| 183
| 0.633567
| 22,778
| 0.862966
| 0
| 0
| 1,607
| 0.060883
| 0
| 0
| 11,052
| 0.418716
|
583374a576c3edb6be71e460848c9177cb1eee6a
| 18,398
|
py
|
Python
|
createbag.py
|
axfelix/moveit
|
a0d4207fdd90af8f05a5c55b4b247757cd6d7bb2
|
[
"Unlicense"
] | null | null | null |
createbag.py
|
axfelix/moveit
|
a0d4207fdd90af8f05a5c55b4b247757cd6d7bb2
|
[
"Unlicense"
] | null | null | null |
createbag.py
|
axfelix/moveit
|
a0d4207fdd90af8f05a5c55b4b247757cd6d7bb2
|
[
"Unlicense"
] | null | null | null |
"""
GUI tool to create a Bag from a filesystem folder.
"""
import sys
import os
import shutil
import bagit
import platform
import random
import string
import re
from time import strftime
import subprocess
from paramiko import SSHClient
from paramiko import AutoAddPolicy
from paramiko import AuthenticationException
from scp import SCPClient
from distutils.dir_util import copy_tree
import zipfile
import hashlib
import tempfile
from zipfile import ZipFile
import platform
pyversion = platform.python_version_tuple()[0]
if pyversion == "2":
from urllib import urlencode
import urllib2
else:
from urllib.parse import urlencode
import urllib.request as urllib2
# These are toggled at build time. TODO: switch to argument parser.
# toggle this if depositing to an Active Directory server
internalDepositor = 0
# toggle this if depositing to SFU Library
radar = 0
# toggle this if bypassing the Bagit step
nobag = 0
# toggle this if bypassing the transfer and only creating a Bag on desktop
ziponly = 1
bagit_checksum_algorithms = ['md5']
confirmation_message_win = "The transfer package will be created and placed on your\n desktop after this; large packages may take a moment.\n\nAre all the transfer details correct?\n\n"
#confirmation_message_mac = "The transfer package will be created and placed on your desktop after this; large packages may take a moment.\n\nAre all the transfer details correct?\n\n"
confirmation_message_mac = "The transfer package will be created and placed on your desktop after this; large packages may take a moment.\n\n"
session_message = "Session Number"
session_message_final_win = "The transfer package will be created and placed on your\n desktop after this; large packages may take a moment.\n\nSession Number"
session_message_final_mac = "The transfer package will be created and placed on your desktop after this; large packages may take a moment.\n\nSession Number"
transfer_message = "Transfer Number"
if internalDepositor == 0:
username_message = "Username"
password_message = "Password"
else:
username_message = "SFU Computing ID"
password_message = "SFU Computing password"
close_session_message = "Is this the final session for this transfer?\nThe transfer will begin in the background after this \nand let you know when it is complete."
close_session_osx_title = "Is this the final session for this transfer?"
close_session_osx_informative = "The transfer will begin in the background and let you know when it is complete."
if radar == 0:
sfu_success_message = "Files have been successfuly transferred to SFU Archives. \nAn archivist will be in contact with you if further attention is needed."
bag_success_message = "Files have been successfully packaged and placed in a new folder on your desktop for transfer."
else:
sfu_success_message = "Files have been successfuly transferred to SFU Library. \nA librarian will be in contact with you if further attention is needed."
password_message = "Please input your SFU Computing password. \nTransfer will commence after clicking OK and you will be notified when it is complete."
sfu_failure_message = "Transfer did not complete successfully. \nPlease contact moveit@sfu.ca for help."
if platform.system() != 'Darwin' and platform.system() != 'Windows':
# The Linux/Gtk config has been removed for now
from gi.repository import Gtk
elif platform.system() == 'Windows':
from PyQt4 import QtGui, QtCore
elif platform.system() == 'Darwin':
# Sets up Cocoadialog for error message popup on OSX.
CD_PATH = os.path.join("~/.createbag/", "CocoaDialog.app/Contents/MacOS/CocoaDialog")
def cocoaPopup(boxtype, title, texttype, message, button, buttontext):
template = CD_PATH + " %s --title '%s' '%s' '%s' '%s' '%s'"
cocoa_process = subprocess.Popen(template % (boxtype, title, texttype, message, button, buttontext), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=False)
cocoa_output = cocoa_process.communicate()
cocoa_result = cocoa_output[0].splitlines()
return cocoa_result
def cocoaError():
if __name__ == "__main__":
popup = cocoaPopup("msgbox", "Error", "--text", "Sorry, you can't create a bag here -- you may want to change the config file so that bags are always created in a different output directory, rather than in situ.", "--button1", "OK")
if popup == "1":
sys.exit()
def cocoaSuccess(bag_dir):
if __name__ == "__main__":
popup = cocoaPopup("msgbox", "Success!", "--text", "Bag created at %s" % bag_dir, "--button1", "OK")
def cocoaTransferSuccess(success_type):
if __name__ == "__main__":
popup = cocoaPopup("msgbox", "SFU MoveIt", "--informative-text", success_type, "--button1", "OK")
def cocoaTransferError(failure_message=sfu_failure_message):
if __name__ == "__main__":
popup = cocoaPopup("msgbox", "SFU MoveIt", "--informative-text", failure_message, "--button1", "OK")
if popup == "1":
sys.exit()
def cocoaSessionNo():
if __name__ == "__main__":
popup = cocoaPopup("standard-inputbox", "Session Number", "--informative-text", session_message, "", "")
if popup[0] == "2":
sys.exit()
return popup[1]
def cocoaTransferNo():
if __name__ == "__main__":
popup = cocoaPopup("standard-inputbox", "Transfer Number", "--informative-text", transfer_message, "", "")
if popup[0] == "2":
sys.exit()
return popup[1]
def cocoaUsername():
if __name__ == "__main__":
popup = cocoaPopup("standard-inputbox", "Username", "--informative-text", username_message, "", "")
if popup[0] == "2":
sys.exit()
return popup[1]
def cocoaPassword():
if __name__ == "__main__":
popup = cocoaPopup("secure-standard-inputbox", "Password", "--informative-text", password_message, "", "")
if popup[0] == "2":
sys.exit()
return popup[1]
# Dummied temporarily because of issues w/ CocoaDialog under High Sierra
def cocoaConfirmation(confirmation_mac):
if __name__ == "__main__":
#popup = cocoaPopup("yesno-msgbox", "SFU MoveIt", "--text", "Confirm Transfer", "--informative-text", confirmation_mac)
#if popup[0] == "3" or popup[0] == "2":
# sys.exit()
popup = cocoaPopup("msgbox", "SFU MoveIt", "--informative-text", confirmation_mac, "--button1", "OK")
if popup == "1":
sys.exit()
return
def cocoaCloseSession():
if __name__ == "__main__":
popup = cocoaPopup("yesno-msgbox", "SFU MoveIt", "--text", close_session_osx_title, "--informative-text", close_session_osx_informative)
if popup[0] == "3":
sys.exit()
# "no" will equal 2 rather than 0 in cocoa, but "yes" still = 1
return popup[0]
def make_bag(chosen_folder):
if nobag == 0:
bag_dir_parent = tempfile.mkdtemp()
if os.path.isdir(bag_dir_parent):
shutil.rmtree(bag_dir_parent)
bag_dir = os.path.join(bag_dir_parent, 'bag')
os.makedirs(bag_dir)
copy_tree(chosen_folder, bag_dir)
# Create the Bag.
try:
bag = bagit.make_bag(bag_dir, None, 1, bagit_checksum_algorithms)
except (bagit.BagError, Exception) as e:
if platform.system() == 'Darwin':
cocoaError()
elif platform.system() == 'Windows':
QtChooserWindow.qt_error(ex)
return
return bag_dir_parent
else:
return chosen_folder
def transfer_manifest(bag_dir, sessionno, transferno, archivesUsername, checksum, metafilename, filelist):
current_time = strftime("%Y-%m-%d %H:%M:%S")
transfer_metadata = "Transfer Number: " + transferno + "-" + sessionno + "\nUser: " + archivesUsername + "\nChecksum: " + checksum + "\nTime Received: " + current_time + "\n" + filelist
with open(metafilename, 'w') as transfer_metafile:
transfer_metafile.write(transfer_metadata)
def generate_password():
length = 13
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
random.seed = (os.urandom(1024))
passwordString = ''.join(random.choice(chars) for i in range(length))
return passwordString
def generate_file_md5(zipname, blocksize=2**20):
m = hashlib.md5()
with open(zipname, "rb") as f:
while True:
buf = f.read(blocksize)
if not buf:
break
m.update(buf)
return m.hexdigest()
def check_zip_and_send(bag_dir_parent, sessionno, transferno, archivesUsername, archivesPassword, close_session, parent_path):
if nobag == 0:
bag_dir = os.path.join(str(bag_dir_parent), 'bag')
numbered_bag_dir = os.path.join(str(bag_dir_parent), (transferno + "-" + sessionno))
metafilename = numbered_bag_dir + "-meta.txt"
zipname = shutil.make_archive(numbered_bag_dir, 'zip', bag_dir)
checksum = generate_file_md5(zipname)
with open(os.path.join(bag_dir, 'manifest-md5.txt'), 'r') as manifestmd5:
bagit_manifest_txt = manifestmd5.read()
filelist = re.sub("\r?\n\S*?\s+data", ("\n" + parent_path), bagit_manifest_txt)
filelist = filelist.split(' ', 1)[1]
passwordString = generate_password()
# Passwording uploaded files is disabled for now.
#with ZipFile(zipname, 'a') as transferZip:
# transferZip.setpassword(passwordString)
shutil.rmtree(bag_dir)
# check transfer number blacklist and post back if OK
get_req = urllib2.Request("http://arbutus.archives.sfu.ca:8008/blacklist")
try:
get_response = urllib2.urlopen(get_req, timeout = 2)
blacklist = get_response.read()
blacklist_entries = blacklist.split()
if transferno in blacklist_entries:
if platform.system() == 'Darwin':
cocoaTransferError()
elif platform.system() == 'Windows':
QtChooserWindow.qt_transfer_failure(ex)
return
except:
pass
values = {'transfer' : transferno, 'session' : sessionno, 'username' : archivesUsername, 'checksum' : checksum}
postdata = urlencode(values)
post_req = urllib2.Request("http://arbutus.archives.sfu.ca:8008/blacklist", postdata)
else:
filelist = ""
transfer_manifest(bag_dir, sessionno, transferno, archivesUsername, checksum, metafilename, filelist)
if ziponly == 1:
desktopPath = os.path.expanduser("~/Desktop/")
outputPath = desktopPath + os.path.splitext(os.path.basename(zipname))[0]
os.mkdir(outputPath)
shutil.move(zipname, (outputPath + "/" + os.path.basename(zipname)))
shutil.move(metafilename, (outputPath + "/" + os.path.basename(metafilename)))
return "bagged"
try:
ssh = SSHClient()
ssh.set_missing_host_key_policy(AutoAddPolicy())
if internalDepositor == 0:
ssh.connect("142.58.136.69", username=archivesUsername, password=archivesPassword, look_for_keys=False)
scp = SCPClient(ssh.get_transport())
remote_path = '~/deposit_here/' + transferno + "-" + sessionno
scp.put(bag_dir_parent, remote_path, recursive=True)
if close_session == 1:
try:
urllib2.urlopen(post_req, timeout = 2)
except:
pass
elif radar == 1:
ssh.connect("researchdata.sfu.ca", username=archivesUsername, password=archivesPassword, look_for_keys=False)
scp = SCPClient(ssh.get_transport())
remote_zip_path = '~/.pydiodata/' + os.path.basename(os.path.normpath(bag_dir))
try:
scp.put(os.path.normpath(bag_dir), remote_zip_path, recursive=True)
except:
ssh.exec_command('mkdir .pydiodata')
scp.put(os.path.normpath(bag_dir), remote_zip_path, recursive=True)
else:
ssh.connect("pine.archives.sfu.ca", username=archivesUsername, password=archivesPassword, look_for_keys=False)
scp = SCPClient(ssh.get_transport())
remote_path = '~/' + transferno + "-" + sessionno
scp.put(bag_dir_parent, remote_path, recursive=True)
if close_session == 1:
try:
urllib2.urlopen(post_req, timeout = 2)
except:
pass
except AuthenticationException:
failure_message = "Transfer did not complete successfully. \nUsername or password incorrect."
if platform.system() == 'Darwin':
cocoaTransferError(failure_message)
elif platform.system() == 'Windows':
QtChooserWindow.qt_transfer_failure(ex, failure_message)
return
except:
if platform.system() == 'Darwin':
cocoaTransferError()
elif platform.system() == 'Windows':
QtChooserWindow.qt_transfer_failure(ex)
return
if nobag == 0:
os.remove(zipname)
os.remove(metafilename)
return remote_path
# Windows/Qt-specific code (can also work on Linux but Gtk is nicer)
if platform.system() == 'Windows':
class QtChooserWindow(QtGui.QDialog):
def __init__(self, parent=None):
super(QtChooserWindow, self).__init__(parent)
if parent is None:
self.initUI()
def initUI(self):
choose_folder_button = QtGui.QPushButton("Choose a folder to transfer", self)
choose_folder_button.clicked.connect(self.showDialog)
choose_folder_button.resize(choose_folder_button.sizeHint())
choose_folder_button.move(20, 30)
quit_button = QtGui.QPushButton("Quit", self)
quit_button.clicked.connect(QtCore.QCoreApplication.instance().quit)
quit_button.resize(quit_button.sizeHint())
quit_button.move(250, 30)
self.resize(345, 80)
self.center()
self.setWindowTitle('SFU MoveIt')
self.show()
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def showDialog(self):
fname = QtGui.QFileDialog.getExistingDirectory(self, 'SFU MoveIt - Choose a folder to transfer', '/home')
parent_path = os.path.basename(os.path.normpath(str(fname)))
bag_dir = make_bag(str(fname))
if (bag_dir):
archivesUsername = self.qt_username(bag_dir)
if archivesUsername == "":
sys.exit()
if ziponly == 0:
archivesPassword = self.qt_password(bag_dir)
else:
archivesPassword = ""
if radar == 0:
transferno = self.qt_transfer(bag_dir)
if transferno == "":
sys.exit()
sessionno = self.qt_session(bag_dir)
if sessionno == "":
sys.exit()
confirmation = self.qt_review(bag_dir, archivesUsername, sessionno, transferno)
if ziponly == 0:
close_session = self.qt_close_session()
else:
close_session = 0
else:
sessionno = 0
transferno = 0
close_session = 0
payload = check_zip_and_send(bag_dir, str(sessionno), str(transferno), str(archivesUsername), str(archivesPassword), close_session, parent_path)
if (payload):
if payload == "bagged":
self.qt_transfer_success(bag_success_message)
else:
self.qt_transfer_success(sfu_success_message)
def qt_username(self, bag_dir):
archivesUsername, ok = QtGui.QInputDialog.getText(self, "Username", username_message)
return archivesUsername
def qt_password(self, bag_dir):
archivesPassword, ok = QtGui.QInputDialog.getText(self, "Password", password_message, 2)
return archivesPassword
def qt_session(self, bag_dir):
sessionno, ok = QtGui.QInputDialog.getText(self, "Session Number", session_message)
return sessionno
def qt_transfer(self, bag_dir):
transferno, ok = QtGui.QInputDialog.getText(self, "Transfer Number", transfer_message)
return transferno
def qt_review(self, bag_dir, archivesUsername, transferno, sessionno):
confirmation_string = confirmation_message_win + "\nUsername: " + archivesUsername + "\nTransfer: " + transferno + "-" + sessionno
review_window = QtGui.QMessageBox.question(self, 'SFU MoveIt', confirmation_string, QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if review_window == QtGui.QMessageBox.Yes:
return
else:
sys.exit()
def qt_close_session(self):
close_session_window = QtGui.QMessageBox.question(self, 'SFU MoveIt', close_session_message, QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if close_session_window == QtGui.QMessageBox.Yes:
close_session = 1
else:
close_session = 0
return close_session
def qt_transfer_success(self, success_type):
confirmation_window = QtChooserWindow(self)
confirmation_string = success_type
confirmation_message = QtGui.QLabel(confirmation_string, confirmation_window)
confirmation_message.move(20, 30)
confirmation_window.resize(500, 80)
confirmation_window.center()
confirmation_window.setWindowTitle('Success')
confirmation_window.show()
def qt_transfer_failure(self, failure_message=sfu_failure_message):
confirmation_window = QtChooserWindow(self)
confirmation_string = failure_message
confirmation_message = QtGui.QLabel(confirmation_string, confirmation_window)
confirmation_message.move(20, 30)
confirmation_window.resize(500, 80)
confirmation_window.center()
confirmation_window.setWindowTitle('Error')
confirmation_window.show()
def qt_confirmation(self, bag_dir):
confirmation_window = QtChooserWindow(self)
confirmation_string = "The Bag for folder " + bag_dir + " has been created."
confirmation_message = QtGui.QLabel(confirmation_string, confirmation_window)
confirmation_message.move(20, 30)
confirmation_window.resize(500, 80)
confirmation_window.center()
confirmation_window.setWindowTitle('Bag created')
confirmation_window.show()
def qt_error(self):
error_window = QtChooserWindow(self)
error_message = QtGui.QLabel("Something went wrong! Please open an issue report at http://github.com/axfelix/moveit/issues", error_window)
error_message.move(20, 30)
error_window.resize(360, 80)
error_window.center()
error_window.setWindowTitle('Sorry')
error_window.show()
app = QtGui.QApplication(sys.argv)
ex = QtChooserWindow()
sys.exit(app.exec_())
# OSX-specific code.
elif platform.system() == 'Darwin':
# add progress bar code eventually
# Python 3 needs .decode() because Cocoa returns bytestrings
archivesUsername = cocoaUsername().decode()
if ziponly == 0:
archivesPassword = cocoaPassword().decode()
else:
archivesPassword = ""
transferno = cocoaTransferNo().decode()
sessionno = cocoaSessionNo().decode()
confirmation_mac = confirmation_message_mac + "\nUsername: " + archivesUsername + "\nTransfer: " + transferno + "-" + sessionno
confirmation = cocoaConfirmation(confirmation_mac)
bag_dir = make_bag(sys.argv[1])
parent_path = os.path.basename(os.path.normpath(sys.argv[1]))
if ziponly == 0:
close_session = cocoaCloseSession()
else:
close_session = 0
script_output = check_zip_and_send(bag_dir, sessionno, transferno, archivesUsername, archivesPassword, close_session, parent_path)
if script_output == "bagged":
cocoaTransferSuccess(bag_success_message)
else:
cocoaTransferSuccess(sfu_success_message)
| 35.655039
| 236
| 0.72709
| 5,085
| 0.276389
| 0
| 0
| 0
| 0
| 0
| 0
| 4,813
| 0.261605
|
583491d9c92a8b53e562e95c5e8cebcf67dc3f00
| 10,937
|
py
|
Python
|
code/python-neo/neo/core/basesignal.py
|
qniksefat/macaque_brain_causality_test
|
24cd5caee3ae79066ca37844cab931d04dcad977
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
code/python-neo/neo/core/basesignal.py
|
qniksefat/macaque_brain_causality_test
|
24cd5caee3ae79066ca37844cab931d04dcad977
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
code/python-neo/neo/core/basesignal.py
|
qniksefat/macaque_brain_causality_test
|
24cd5caee3ae79066ca37844cab931d04dcad977
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
This module implements :class:`BaseSignal`, an array of signals.
This is a parent class from which all signal objects inherit:
:class:`AnalogSignal` and :class:`IrregularlySampledSignal`
:class:`BaseSignal` inherits from :class:`quantities.Quantity`, which
inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Constructor :meth:`__new__` for :class:`BaseSignal` doesn't exist.
Only child objects :class:`AnalogSignal` and :class:`IrregularlySampledSignal`
can be created.
'''
# needed for Python 3 compatibility
from __future__ import absolute_import, division, print_function
import copy
import logging
import numpy as np
import quantities as pq
from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
from neo.core.dataobject import DataObject, ArrayDict
from neo.core.channelindex import ChannelIndex
logger = logging.getLogger("Neo")
class BaseSignal(DataObject):
'''
This is the base class from which all signal objects inherit:
:class:`AnalogSignal` and :class:`IrregularlySampledSignal`.
This class contains all common methods of both child classes.
It uses the following child class attributes:
:_necessary_attrs: a list of the attributes that the class must have.
:_recommended_attrs: a list of the attributes that the class may
optionally have.
'''
def _array_finalize_spec(self, obj):
'''
Called by :meth:`__array_finalize__`, used to customize behaviour of sub-classes.
'''
return obj
def __array_finalize__(self, obj):
'''
This is called every time a new signal is created.
It is the appropriate place to set default values for attributes
for a signal constructed by slicing or viewing.
User-specified values are only relevant for construction from
constructor, and these are set in __new__ in the child object.
Then they are just copied over here. Default values for the
specific attributes for subclasses (:class:`AnalogSignal`
and :class:`IrregularlySampledSignal`) are set in
:meth:`_array_finalize_spec`
'''
super(BaseSignal, self).__array_finalize__(obj)
self._array_finalize_spec(obj)
# The additional arguments
self.annotations = getattr(obj, 'annotations', {})
# Add empty array annotations, because they cannot always be copied,
# but do not overwrite existing ones from slicing etc.
# This ensures the attribute exists
if not hasattr(self, 'array_annotations'):
self.array_annotations = ArrayDict(self._get_arr_ann_length())
# Globally recommended attributes
self.name = getattr(obj, 'name', None)
self.file_origin = getattr(obj, 'file_origin', None)
self.description = getattr(obj, 'description', None)
# Parent objects
self.segment = getattr(obj, 'segment', None)
self.channel_index = getattr(obj, 'channel_index', None)
@classmethod
def _rescale(self, signal, units=None):
'''
Check that units are present, and rescale the signal if necessary.
This is called whenever a new signal is
created from the constructor. See :meth:`__new__' in
:class:`AnalogSignal` and :class:`IrregularlySampledSignal`
'''
if units is None:
if not hasattr(signal, "units"):
raise ValueError("Units must be specified")
elif isinstance(signal, pq.Quantity):
# This test always returns True, i.e. rescaling is always executed if one of the units
# is a pq.CompoundUnit. This is fine because rescaling is correct anyway.
if pq.quantity.validate_dimensionality(units) != signal.dimensionality:
signal = signal.rescale(units)
return signal
def rescale(self, units):
obj = super(BaseSignal, self).rescale(units)
obj.channel_index = self.channel_index
return obj
def __getslice__(self, i, j):
'''
Get a slice from :attr:`i` to :attr:`j`.attr[0]
Doesn't get called in Python 3, :meth:`__getitem__` is called instead
'''
return self.__getitem__(slice(i, j))
def __ne__(self, other):
'''
Non-equality test (!=)
'''
return not self.__eq__(other)
def _apply_operator(self, other, op, *args):
'''
Handle copying metadata to the new signal
after a mathematical operation.
'''
self._check_consistency(other)
f = getattr(super(BaseSignal, self), op)
new_signal = f(other, *args)
new_signal._copy_data_complement(self)
# _copy_data_complement can't always copy array annotations,
# so this needs to be done locally
new_signal.array_annotations = copy.deepcopy(self.array_annotations)
return new_signal
def _get_required_attributes(self, signal, units):
'''
Return a list of the required attributes for a signal as a dictionary
'''
required_attributes = {}
for attr in self._necessary_attrs:
if 'signal' == attr[0]:
required_attributes[str(attr[0])] = signal
else:
required_attributes[str(attr[0])] = getattr(self, attr[0], None)
required_attributes['units'] = units
return required_attributes
def duplicate_with_new_data(self, signal, units=None):
'''
Create a new signal with the same metadata but different data.
Required attributes of the signal are used.
Note: Array annotations can not be copied here because length of data can change
'''
if units is None:
units = self.units
# else:
# units = pq.quantity.validate_dimensionality(units)
# signal is the new signal
required_attributes = self._get_required_attributes(signal, units)
new = self.__class__(**required_attributes)
new._copy_data_complement(self)
new.annotations.update(self.annotations)
# Note: Array annotations are not copied here, because it is not ensured
# that the same number of signals is used and they would possibly make no sense
# when combined with another signal
return new
def _copy_data_complement(self, other):
'''
Copy the metadata from another signal.
Required and recommended attributes of the signal are used.
Note: Array annotations can not be copied here because length of data can change
'''
all_attr = {self._recommended_attrs, self._necessary_attrs}
for sub_at in all_attr:
for attr in sub_at:
if attr[0] != 'signal':
setattr(self, attr[0], getattr(other, attr[0], None))
setattr(self, 'annotations', getattr(other, 'annotations', None))
# Note: Array annotations cannot be copied because length of data can be changed # here
# which would cause inconsistencies
def __rsub__(self, other, *args):
'''
Backwards subtraction (other-self)
'''
return self.__mul__(-1, *args) + other
def __add__(self, other, *args):
'''
Addition (+)
'''
return self._apply_operator(other, "__add__", *args)
def __sub__(self, other, *args):
'''
Subtraction (-)
'''
return self._apply_operator(other, "__sub__", *args)
def __mul__(self, other, *args):
'''
Multiplication (*)
'''
return self._apply_operator(other, "__mul__", *args)
def __truediv__(self, other, *args):
'''
Float division (/)
'''
return self._apply_operator(other, "__truediv__", *args)
def __div__(self, other, *args):
'''
Integer division (//)
'''
return self._apply_operator(other, "__div__", *args)
__radd__ = __add__
__rmul__ = __sub__
def merge(self, other):
'''
Merge another signal into this one.
The signal objects are concatenated horizontally
(column-wise, :func:`np.hstack`).
If the attributes of the two signal are not
compatible, an Exception is raised.
Required attributes of the signal are used.
'''
for attr in self._necessary_attrs:
if 'signal' != attr[0]:
if getattr(self, attr[0], None) != getattr(other, attr[0], None):
raise MergeError("Cannot merge these two signals as the %s differ." % attr[0])
if self.segment != other.segment:
raise MergeError(
"Cannot merge these two signals as they belong to different segments.")
if hasattr(self, "lazy_shape"):
if hasattr(other, "lazy_shape"):
if self.lazy_shape[0] != other.lazy_shape[0]:
raise MergeError("Cannot merge signals of different length.")
merged_lazy_shape = (self.lazy_shape[0], self.lazy_shape[1] + other.lazy_shape[1])
else:
raise MergeError("Cannot merge a lazy object with a real object.")
if other.units != self.units:
other = other.rescale(self.units)
stack = np.hstack(map(np.array, (self, other)))
kwargs = {}
for name in ("name", "description", "file_origin"):
attr_self = getattr(self, name)
attr_other = getattr(other, name)
if attr_self == attr_other:
kwargs[name] = attr_self
else:
kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
merged_annotations = merge_annotations(self.annotations, other.annotations)
kwargs.update(merged_annotations)
kwargs['array_annotations'] = self._merge_array_annotations(other)
signal = self.__class__(stack, units=self.units, dtype=self.dtype, copy=False,
t_start=self.t_start, sampling_rate=self.sampling_rate, **kwargs)
signal.segment = self.segment
if hasattr(self, "lazy_shape"):
signal.lazy_shape = merged_lazy_shape
# merge channel_index (move to ChannelIndex.merge()?)
if self.channel_index and other.channel_index:
signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1]),
channel_ids=np.hstack(
[self.channel_index.channel_ids, other.channel_index.channel_ids]),
channel_names=np.hstack(
[self.channel_index.channel_names, other.channel_index.channel_names]))
else:
signal.channel_index = ChannelIndex(index=np.arange(signal.shape[1]))
return signal
| 37.713793
| 98
| 0.633172
| 9,932
| 0.90811
| 0
| 0
| 847
| 0.077444
| 0
| 0
| 4,932
| 0.450946
|
5835a4f4779f435b367bd40c05663242713c67ad
| 3,038
|
py
|
Python
|
Morocco model/scripts/cropland_processing.py
|
KTH-dESA/FAO
|
74459217a9e8ad8107b1d3a96fd52eebd93daebd
|
[
"MIT"
] | 3
|
2020-09-17T11:12:52.000Z
|
2021-03-31T09:24:02.000Z
|
Morocco model/scripts/cropland_processing.py
|
KTH-dESA/FAO
|
74459217a9e8ad8107b1d3a96fd52eebd93daebd
|
[
"MIT"
] | 101
|
2019-10-02T10:16:28.000Z
|
2021-06-05T06:42:55.000Z
|
Morocco model/scripts/cropland_processing.py
|
KTH-dESA/FAO
|
74459217a9e8ad8107b1d3a96fd52eebd93daebd
|
[
"MIT"
] | 2
|
2020-02-23T13:28:00.000Z
|
2021-03-31T10:02:46.000Z
|
import sys
sys.path.append("..") #this is to add the avobe folder to the package directory
import geopandas as gpd
import pandas as pd
import numpy as np
import os
from nexustool.gis_tools import download_data, create_time_data, get_area_share, get_zonal_stats
from nexustool.weap_tools import reproject_raster, sample_raster
## Downloading solar irradiation and water table depth data
url = 'https://biogeo.ucdavis.edu/data/worldclim/v2.1/base/wc2.1_30s_srad.zip'
file_path = os.path.join('data', 'gis', 'srad', 'wc2.1_30s_srad.zip')
download_data(url, file_path)
url = 'https://souss-massa-dev.s3.us-east-2.amazonaws.com/post_build/Africa_model_wtd_v2.nc'
file_path = os.path.join('data', 'gis', 'wtd', 'Africa_model_wtd_v2.nc')
download_data(url, file_path)
## Reading the input data
demand_path = str(snakemake.input.demand_points)
cropland_path = str(snakemake.input.cropland)
crop_df = pd.read_csv(cropland_path, encoding='utf-8')
geometry = crop_df['WKT'].map(shapely.wkt.loads)
cropland = gpd.GeoDataFrame(crop_df.drop(columns=['WKT']), crs="EPSG:26192", geometry=geometry)
provinces = gpd.read_file(os.path.join('data', 'gis', 'admin', 'provinces.gpkg'), encoding='utf-8')
output_file = str(snakemake.output)
output_folder = output_file.split(os.path.basename(output_file))[0]
## Convert coordenate reference system (crs)
MerchidSudMoroc = 26192
for gdf in [provinces, provinces]:
gdf.to_crs(epsg=MerchidSudMoroc, inplace=True)
cropland = cropland.loc[cropland.area_m2>=100] #choose
## Solar irradiation zonal statistics
Loops through the 12 months of the year and gets the mean solar irradiation of each month within each cropland polygon
cropland.to_crs(epsg=4326, inplace=True)
for month in range(1, 13):
cropland = get_zonal_stats(cropland,
os.path.join('data', 'gis', 'srad',
f'wc2.1_30s_srad_{str(month).zfill(2)}.tif'),
['mean'], all_touched=True).rename(columns={'mean': f'srad{month}'})
## Water table depth zonal statistics
cropland.crs = 4326
cropland = get_zonal_stats(cropland,
os.path.join('data', 'gis', 'wtd',
'Africa_model_wtd_v2.nc'),
['mean'], all_touched=True).rename(columns={'mean': 'wtd'})
cropland.crs = 4326
cropland.to_crs(epsg=MerchidSudMoroc, inplace=True)
## Creating time series data
df_cropland = create_time_data(cropland, 2019, 2050)
## Calculating the area share of each croplan area within each province
cropland.loc[cropland['province']=='Inezgane-Aït Melloul', 'province'] = 'Taroudannt' #Including Inezgane-Aït Melloul irrigated area into results from Taroudant due to lack of data for the former
cropland['area_share'] = get_area_share(cropland, 'province', 'area_m2')
df_cropland = pd.merge(df_cropland, cropland[['Demand point', 'area_share']], on='Demand point')
os.makedirs(output_folder, exist_ok = True)
df_cropland.to_csv(output_file, index=False)
| 40.506667
| 195
| 0.711982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,053
| 0.346382
|
58394701554d3a507c68ce7bd347905779a7cb27
| 891
|
py
|
Python
|
dl_data_validation_toolset/framework/report_gen/group.py
|
kwierman/dl_data_validation_toolset
|
fb0486abd000ba28c6474f8979762c92fb4ee038
|
[
"MIT"
] | 1
|
2017-08-24T00:46:47.000Z
|
2017-08-24T00:46:47.000Z
|
dl_data_validation_toolset/framework/report_gen/group.py
|
kwierman/dl_data_validation_toolset
|
fb0486abd000ba28c6474f8979762c92fb4ee038
|
[
"MIT"
] | 177
|
2017-04-10T23:03:27.000Z
|
2022-03-28T22:07:54.000Z
|
dl_data_validation_toolset/framework/report_gen/group.py
|
HEP-DL/dl_data_validation_toolset
|
fb0486abd000ba28c6474f8979762c92fb4ee038
|
[
"MIT"
] | null | null | null |
from .file import FileGenerator
from ..report.group import GroupReport
import logging
import asyncio
import os
class GroupGenerator(object):
logger = logging.getLogger("ddvt.rep_gen.grp")
def __init__(self, group):
self.meta = group
async def generate(self, parent):
self.logger.info("Generating Group Report: {}".format(self.meta.group))
self.temp_dir = os.path.join(parent.temp_dir, self.meta.group)
if not os.path.exists(self.temp_dir):
os.mkdir(self.temp_dir)
file_gens = [FileGenerator(i) for i in self.meta.full_filenames]
await asyncio.gather(*[i.generate(self) for i in file_gens])
msg = "Finished with subtasks for group {}".format(self.meta.group)
self.logger.info(msg)
self.report = GroupReport(self.meta.group, self.temp_dir)
self.report.file_reports = [i.report for i in file_gens]
self.report.render(self.temp_dir)
| 31.821429
| 75
| 0.725028
| 777
| 0.872054
| 0
| 0
| 0
| 0
| 643
| 0.721661
| 84
| 0.094276
|
583a1302a3f7562a97c1476d70bc500c24d60c4f
| 174
|
py
|
Python
|
glanceclient/common/exceptions.py
|
citrix-openstack-build/python-glanceclient
|
32d9c42816b608220ae5692e573142dab6534604
|
[
"Apache-2.0"
] | 1
|
2019-09-11T11:56:19.000Z
|
2019-09-11T11:56:19.000Z
|
tools/dockerize/webportal/usr/lib/python2.7/site-packages/glanceclient/common/exceptions.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
tools/dockerize/webportal/usr/lib/python2.7/site-packages/glanceclient/common/exceptions.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
# This is here for compatability purposes. Once all known OpenStack clients
# are updated to use glanceclient.exc, this file should be removed
from glanceclient.exc import *
| 43.5
| 75
| 0.804598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 141
| 0.810345
|
583a2eef001a72cf9b9737ee6ef5ed10dc5f494d
| 1,458
|
py
|
Python
|
api/scpca_portal/views/filter_options.py
|
AlexsLemonade/scpca-portal
|
d60d6db5abe892ed58764128269df936778c6fd7
|
[
"BSD-3-Clause"
] | null | null | null |
api/scpca_portal/views/filter_options.py
|
AlexsLemonade/scpca-portal
|
d60d6db5abe892ed58764128269df936778c6fd7
|
[
"BSD-3-Clause"
] | 85
|
2021-07-27T14:33:55.000Z
|
2022-03-28T20:18:41.000Z
|
api/scpca_portal/views/filter_options.py
|
AlexsLemonade/scpca-portal
|
d60d6db5abe892ed58764128269df936778c6fd7
|
[
"BSD-3-Clause"
] | null | null | null |
from django.http import JsonResponse
from rest_framework import status, viewsets
from scpca_portal.models import Project
class FilterOptionsViewSet(viewsets.ViewSet):
def list(self, request):
dicts = (
Project.objects.order_by()
.values("diagnoses", "seq_units", "technologies", "modalities")
.distinct()
)
diagnoses_options = set()
seq_units_options = set()
technologies_options = set()
modalities = set()
for value_set in dicts:
if value_set["diagnoses"]:
for value in value_set["diagnoses"].split(", "):
diagnoses_options.add(value)
if value_set["seq_units"]:
for value in value_set["seq_units"].split(", "):
seq_units_options.add(value)
if value_set["technologies"]:
for value in value_set["technologies"].split(", "):
technologies_options.add(value)
if value_set["modalities"]:
for value in value_set["modalities"].split(", "):
modalities.add(value)
response_dict = {
"diagnoses": list(diagnoses_options),
"seq_units": list(seq_units_options),
"technologies": list(technologies_options),
"modalities": list(modalities),
}
return JsonResponse(response_dict, status=status.HTTP_200_OK)
| 33.136364
| 75
| 0.577503
| 1,333
| 0.914266
| 0
| 0
| 0
| 0
| 0
| 0
| 208
| 0.142661
|
583a4439342b3be3a1f5a61fbbd79630bf4f80cd
| 409
|
py
|
Python
|
cords/selectionstrategies/SL/__init__.py
|
krishnatejakk/AUTOMATA
|
fd0cf58058e39660f88d9d6b4101e30a497f6ce2
|
[
"MIT"
] | null | null | null |
cords/selectionstrategies/SL/__init__.py
|
krishnatejakk/AUTOMATA
|
fd0cf58058e39660f88d9d6b4101e30a497f6ce2
|
[
"MIT"
] | null | null | null |
cords/selectionstrategies/SL/__init__.py
|
krishnatejakk/AUTOMATA
|
fd0cf58058e39660f88d9d6b4101e30a497f6ce2
|
[
"MIT"
] | 1
|
2022-03-16T05:55:12.000Z
|
2022-03-16T05:55:12.000Z
|
from .craigstrategy import CRAIGStrategy
from .dataselectionstrategy import DataSelectionStrategy
from .glisterstrategy import GLISTERStrategy
from .randomstrategy import RandomStrategy
from .submodularselectionstrategy import SubmodularSelectionStrategy
from .gradmatchstrategy import GradMatchStrategy
from .fixedweightstrategy import FixedWeightStrategy
from .adapweightsstrategy import AdapWeightsStrategy
| 51.125
| 68
| 0.904645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
583a53eef1dad89d42938f5028c87aba4efb30bb
| 10,917
|
py
|
Python
|
pycost/rocch.py
|
tfawcett/pycost
|
69f96866295dba937a23f36c8f24f2f6acdaccbd
|
[
"BSD-3-Clause"
] | 1
|
2019-10-05T10:37:47.000Z
|
2019-10-05T10:37:47.000Z
|
pycost/rocch.py
|
tfawcett/pycost
|
69f96866295dba937a23f36c8f24f2f6acdaccbd
|
[
"BSD-3-Clause"
] | null | null | null |
pycost/rocch.py
|
tfawcett/pycost
|
69f96866295dba937a23f36c8f24f2f6acdaccbd
|
[
"BSD-3-Clause"
] | 1
|
2020-06-12T17:13:14.000Z
|
2020-06-12T17:13:14.000Z
|
"""
Metrics to calculate and manipulate the ROC Convex Hull on a classification task given scores.
"""
# Author: Tom Fawcett <tom.fawcett@gmail.com>
from collections import namedtuple
from math import sqrt
from typing import List, Dict, Tuple, Union
# DESCRIPTION:
#
# This program computes the convex hull of a set of ROC points
# (technically, the upper left triangular convex hull, bounded
# by (0,0) and (1,1)). The ROC Convex Hull is used to find dominant
# (and locally best) classifiers in ROC space. For more information
# on the ROC convex hull and its uses, see the references below.
#
# FP and TP are the False Positive (X axis) and True Positive (Y axis)
# values for the point.
#
#
# REFERENCES:
#
# The first paper below is probably best for an introduction and
# general discussion of the ROC Convex Hull and its uses.
#
# 1) Provost, F. and Fawcett, T. "Analysis and visualization of
# classifier performance: Comparison under imprecise class and cost
# distributions". In Proceedings of the Third International
# Conference on Knowledge Discovery and Data Mining (KDD-97),
# pp.43-48. AAAI Press.
#
# 2) Provost, F. and Fawcett, T. "Robust Classification Systems for
# Imprecise Environments".
#
# 3) Provost, F., Fawcett, T., and Kohavi, R. "The Case
# Against Accuracy Estimation for Comparing Induction Algorithms".
# Available from:
#
#
# BUG REPORTS / SUGGESTIONS / QUESTIONS: Tom Fawcett <tom.fawcett@gmail.com>
#
#
"""
Typical use is something like this:
rocch = ROCCH(keep_intermediate=False)
for clf in classifiers:
y_scores = clf.decision_function(y_test)
rocch.fit(clfname, roc_curve(y_scores, y_true))
...
plt.plot(rocch.hull())
rocch.describe()
"""
Point = namedtuple( "Point", ["x", "y", "clfname"] )
Point.__new__.__defaults__ = ("",) # make clfname optional
INFINITY: float = float( "inf" )
class ROCCH( object ):
"""ROC Convex Hull.
Some other stuff.
"""
_hull: List[Point]
def __init__(self, keep_intermediate=False):
"""Initialize the object."""
self.keep_intermediate = keep_intermediate
self.classifiers: Dict[str, List[Tuple]] = { }
self._hull = [Point( 0, 0, "AllNeg" ), Point( 1, 1, "AllPos" )]
def fit(self, clfname: str, points):
"""Fit (add) a classifier's ROC points to the ROCCH.
:param clfname: A classifier name or identifier. This is only used to record the
identity of the classifier producing the points. It can be anything, such as a
(classifier, threshold) pair.
TODO: Let clfname be a string or a list; add some way to incorporate info per point so we
can associate each point with a parameter.
:param points: A sequence of ROC points, contained in a list or array. Each point should
be an (FP, TP) pair. TODO: Make this more general.
:return: None
"""
points_instances = [Point( x, y, clfname ) for (x, y) in points]
points_instances.extend( self._hull )
points_instances.sort( key=lambda pt: pt.x )
hull = []
# TODO: Make this more efficient by simply using pointers rather than append-pop.
while points_instances:
hull.append( points_instances.pop( 0 ) )
# Now test the top three on new_hull
test_top = True
while len( hull ) >= 3 and test_top:
turn_dir = turn( *hull[-3:] )
if turn_dir > 0: # CCW turn, this introduced a concavity.
hull.pop( -2 )
elif turn_dir == 0: # Co-linear, should we keep it?
if not self.keep_intermediate:
# No, treat it as if it's under the hull
hull.pop( -2 )
else: # Treat this as convex
test_top = False
else: # CW turn, this is convex
test_top = False
self._hull = hull
def _check_hull(self) -> None:
"""Check a list of hull points for convexity.
This is a simple utility function for testing.
Throws an AssertionError if a hull segment is concave or if the terminal AllNeg and
AllPos are not present.
Colinear segments (turn==0) will be considered violations unless keep_intermediate is on.
"""
hull = self._hull
assert len( hull ) >= 2, "Hull is damaged"
assert hull[0].clfname == "AllNeg", "First hull point is not AllNeg"
assert hull[-1].clfname == "AllPos", "Last hull point is not AllPos"
for hull_idx in range( len( hull ) - 2 ):
segment = hull[hull_idx: hull_idx + 3]
turn_val = turn( *segment )
assert turn_val <= 0, f"Concavity in hull: {segment}"
if not self.keep_intermediate:
assert turn_val < 0, "Intermediate (colinear) point in hull"
@property
def hull(self) -> List[Tuple]:
"""
Return a list of points constituting the convex hull of classifiers in ROC space.
Returns a list of tuples (FP, TP, CLF) where each (FP,TP) is a point in ROC space
and CLF is the classifier producing that performance point.
"""
# Defined just in case postprocessing needs to be done.
return self._hull
def dominant_classifiers(self) -> List[Tuple]:
"""
Return a list describing the hull in terms of the dominant classifiers.
Start at point (1,1) and work counter-clockwise down the hull to (0,0).
Iso-performance line slope starts at 0.0 and works up to infinity.
:return: A list consisting of (prob_min, prob_max, point) where
:rtype: List[Tuple]
"""
slope = 0.0
last_point = None
last_slope = None
segment_right_boundary: Union[Point,None] = None
dominant_list: List[Tuple] = []
# TODO: Check for hull uninitialized.
point: Point
for point in self._hull:
if last_point is not None:
slope: float = calculate_slope( point, last_point )
else:
segment_right_boundary = point
if last_slope is not None:
if self.keep_intermediate or last_slope != slope:
dominant_list.append( (last_slope, slope, segment_right_boundary) )
last_slope = slope
segment_right_boundary = point
else: # last_slope is undefined
last_slope = slope
last_point = point
if last_slope != INFINITY:
slope = INFINITY
# Output final point
dominant_list.append( (last_slope, slope, segment_right_boundary) )
return dominant_list
def best_classifiers_for_conditions(self, class_ratio=1.0, cost_ratio=1.0):
"""
Given a set of operating conditions (class and cost ratios), return best classifiers.
Given a class ratio (P/N) and a cost ratio (cost(FP),cost(FN)), return a set of
classifiers that will perform optimally for those conditions. The class ratio is the
fraction of positives per negative. The cost ratio is the cost of a False Positive
divided by the cost of a False Negative.
The return value will be a list of either one or two classifiers. If the conditions
identify a single best classifier, the result will be simply:
[ (clf, 1.0) ]
indicating that clf should be chosen.
If the conditions are between the performance of two classifiers, the result will be:
[ (clf1, p1), (clf2, p2) ]
indicating that clf1's decisions should be sampled at a rate of p1 and clf2's at a rate
of p2, with p1 and p2 summing to 1.
:param class_ratio, float: The ratio of positives to negatives: P/N
:param cost_ratio, float: The ratio of the cost of a False Positive error to a False
Negative Error: cost(FP)/cost(FN)
:return:
:rtype:
"""
assert 0 < class_ratio < 1.0, "Class ratio must be between 0 and 1"
assert 0 < cost_ratio < 1.0, "Cost ratio must be between 0 and 1"
def calculate_slope(pt1, pt2: Point):
"""
Return the slope from pt1 to pt2, or inf if slope is infinite
:param pt1:
:type pt1: Point
:param pt2:
:type pt2: Point
:return:
:rtype: float
"""
dx = pt2.x - pt1.x
dy = pt2.y - pt1.y
if dx == 0:
return INFINITY
else:
return dy / dx
def _check_hull(hull):
"""Check a list of hull points for convexity.
This is a simple utility function for testing.
Throws an AssertionError if a hull segment is concave.
Colinear segments (turn==0) are not considered violations.
:param hull: A list of Point instances describing an ROC convex hull.
:return: None
"""
for hull_idx in range( len( hull ) - 2 ):
segment = hull[hull_idx: hull_idx + 3]
assert turn( *segment ) <= 0, f"Concavity in hull: {segment}"
def ROC_order(pt1, pt2: Point) -> bool:
"""Predicate for determining ROC_order for sorting.
Either pt1's x is ahead of pt2's x, or the x's are equal and pt1's y is ahead of pt2's y.
"""
return (pt1.x < pt2.x) or (pt1.x == pt2.x and pt1.y < pt2.y)
def compute_theta(p1, p2: Point) -> float:
"""Compute theta, an ordering function on a point pair.
Theta has the same properties as the angle between the horizontal axis and
the line segment between the points, but is much faster to compute than
arctangent. Range is 0 to 360. Defined on P.353 of _Algorithms in C_.
"""
dx = p2.x - p1.x
ax = abs( dx )
dy = p2.y - p1.y
ay = abs( dy )
if dx == 0 and dy == 0:
t = 0
else:
t = dy / (ax + ay)
# Adjust for quadrants two through four
if dx < 0:
t = 2 - t
elif dy < 0:
t = 4 + t
return t * 90.0
def euclidean(p1, p2: Point) -> float:
"""Compute Euclidean distance.
"""
return sqrt( (p1.x - p2.x)**2 + (p1.y - p2.y)**2 )
def turn(a, b, c: Point) -> float:
"""Determine the turn direction going from a to b to c.
Going from a->b->c, is the turn clockwise, counterclockwise, or straight.
positive => CCW
negative => CW
zero => colinear
See: https://algs4.cs.princeton.edu/91primitives/
>>> a = Point(1,1)
>>> b = Point(2,2)
>>> turn(a, b, Point(3,2))
-1
>>> turn(a, b, Point(2,3))
1
>>> turn(a, b, Point(3,3))
0
>>> turn(a, b, Point(1.5, 1.5)) == 0
True
>>> turn(a, b, Point(1.5,1.7)) > 0
True
:param Point a:
:param Point b:
:param Point c:
:rtype: float
"""
return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y)
if __name__ == "__main__":
import doctest
doctest.testmod()
# End of rocch.py
| 33.798762
| 100
| 0.612989
| 6,358
| 0.582394
| 0
| 0
| 406
| 0.03719
| 0
| 0
| 6,839
| 0.626454
|
583a8bbe4d63a96ce53555ed1fbf8f8d31b49bdb
| 846
|
py
|
Python
|
all_raspi_code_backup/DriveTesting.py
|
lord-pradhan/SnowBot
|
82a0b3439dc203bf27725e293d6e56bcad720c09
|
[
"MIT"
] | 2
|
2020-05-31T07:37:59.000Z
|
2021-03-24T13:43:39.000Z
|
all_raspi_code_backup/DriveTesting.py
|
lord-pradhan/SnowBot
|
82a0b3439dc203bf27725e293d6e56bcad720c09
|
[
"MIT"
] | null | null | null |
all_raspi_code_backup/DriveTesting.py
|
lord-pradhan/SnowBot
|
82a0b3439dc203bf27725e293d6e56bcad720c09
|
[
"MIT"
] | 1
|
2019-12-13T19:21:12.000Z
|
2019-12-13T19:21:12.000Z
|
"""
Program: DriveTesting.py
Revised On: 12/01/2019
"""
### Library Imports
from DriveArduino import DriveArduino
import numpy as np
from time import sleep
from sys import exit
from signal import signal, SIGINT
###
### CTRL + C Signal Handler & Resource Cleanup
def signal_handler(sig, frame):
"""Handler for CTRL + C clean exit."""
print('Quitting program.')
cleanup()
def cleanup():
"""Resource cleanup."""
drive.close()
print('Resource cleanup completed.')
exit(0)
signal(SIGINT, signal_handler)
###
### Arduino Configuration
addr = 0x08
drive = DriveArduino(addr)
###
### Main Program
print('Press CTRL + C to exit.')
while True:
setpoints = np.array([25, 25, -25, -25])
drive.set_rpm(setpoints)
sleep(1)
drive.update()
print(drive.rpm)
print(drive.pwm)
print()
###
| 16.92
| 46
| 0.652482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 312
| 0.368794
|
583ba4ab4b346b94532e02cbbc5e159874800f72
| 363
|
py
|
Python
|
src/sentry/utils/strings.py
|
rogerhu/sentry
|
ee2b190e92003abe0f538b2df5b686e425df1200
|
[
"BSD-3-Clause"
] | 1
|
2015-12-13T18:27:54.000Z
|
2015-12-13T18:27:54.000Z
|
src/sentry/utils/strings.py
|
simmetria/sentry
|
9731f26adb44847d1c883cca108afc0755cf21cc
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/utils/strings.py
|
simmetria/sentry
|
9731f26adb44847d1c883cca108afc0755cf21cc
|
[
"BSD-3-Clause"
] | null | null | null |
def truncatechars(value, arg):
"""
Truncates a string after a certain number of chars.
Argument: Number of chars to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > length:
return value[:length] + '...'
return value
| 25.928571
| 55
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 166
| 0.4573
|
583d59db015ae71e12d80d6cb5e3e2aba7e8e79c
| 817
|
py
|
Python
|
setup.py
|
Ozencb/cli-pto
|
445e5133340adb25dcf5d14c4203643b7a8741c2
|
[
"MIT"
] | 6
|
2020-04-30T18:32:38.000Z
|
2020-07-28T15:37:04.000Z
|
setup.py
|
Ozencb/cli-pto
|
445e5133340adb25dcf5d14c4203643b7a8741c2
|
[
"MIT"
] | 1
|
2020-04-30T18:34:08.000Z
|
2020-05-01T10:16:49.000Z
|
setup.py
|
Ozencb/cli-pto
|
445e5133340adb25dcf5d14c4203643b7a8741c2
|
[
"MIT"
] | null | null | null |
import os
import re
from setuptools import find_packages, setup
def get_version(package):
path = os.path.join(os.path.dirname(__file__), package, "__init__.py")
with open(path, "rb") as f:
init_py = f.read().decode("utf-8")
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
setup(
name='cli-pto',
author='Özenç Bilgili',
description='A CLI text editor with encryption.',
version=get_version('cli_pto'),
url='https://github.com/ozencb/cli-pto',
packages=find_packages(),
install_requires=['prompt-toolkit', 'Pygments', 'pycryptodome'],
entry_points={'console_scripts': 'cli-pto = cli_pto.clipto:main'},
license=open('LICENSE').read(),
keywords=['text', 'editor', 'encryption', 'encrypted', 'password', 'manager']
)
| 31.423077
| 85
| 0.641371
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 318
| 0.388278
|
583f4f6dd761e12a8aa4ad8d387f0bdd2b82f1de
| 9,545
|
py
|
Python
|
users/models.py
|
scoremaza/church_alive_backend
|
2ee7260aea51ec39972588dc4a346aa152356aa3
|
[
"MIT"
] | null | null | null |
users/models.py
|
scoremaza/church_alive_backend
|
2ee7260aea51ec39972588dc4a346aa152356aa3
|
[
"MIT"
] | null | null | null |
users/models.py
|
scoremaza/church_alive_backend
|
2ee7260aea51ec39972588dc4a346aa152356aa3
|
[
"MIT"
] | null | null | null |
import os
import uuid
from django.db import models
from django.contrib.auth.models import User
from django.db.models.base import Model
from django.db.models.enums import Choices, ChoicesMeta
from django.db.models.fields.related import ForeignKey
from django.utils.deconstruct import deconstructible
@deconstructible
class GenerateProfileImagePath(object):
'''
This will allow naming convention for the files and up loaded
as images in profiles. Image will have this assigned to its
upload_to property
'''
def __init__(self) -> None:
pass
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
path = f'media/accounts/{instance.user.id}/images'
name = f'profile_image.ext'
return os.path.join(path, name)
user_profile_image_path = GenerateProfileImagePath()
class PositionType(models.Model):
'''
Model definition for PositionType
Here the user is assign a position within the organization
allowing access and assignment capabilities enabling allocation
throughout the system.
'''
position_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
position = models.CharField(max_length=150)
sort_order = models.IntegerField(default=-1)
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
"""Meta definition for VisibilityLevel."""
verbose_name = "Position"
verbose_name_plural = "Positions"
def __str__(self):
"""Unicode representation of VisibilityLevel."""
return f'{self.position}'
def get_absolute_url(self):
"""Return absolute url for VisibilityLevel."""
pass
class Profile (models.Model):
'''
Model definition for Profile
Here the user will create its existence in the appplication
with features to set privacy and adding friends will be
controlled by this profile. Using User from the internal structure
of Django as the manager to keep things together.
'''
profile_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.FileField(upload_to=user_profile_image_path, blank=True, null=True)
profile_name = models.CharField(max_length=200, null=True)
date_of_birth = models.DateField(null=True)
create_date = models.DateTimeField(auto_now_add=True)
last_update = models.DateTimeField(auto_now=True)
position_type = models.ForeignKey(PositionType, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
"""Meta definition for VisibilityLevel."""
verbose_name = "Profile"
verbose_name_plural = "Profiles"
def __str__(self):
"""Unicode representation of VisibilityLevel."""
return f'{self.user.username}\'s Profile'
def get_absolute_url(self):
"""Return absolute url for VisibilityLevel."""
pass
class VisibilityLevel(models.Model):
"""
Model definition for VisibilityLevel.
Here the user will give wether a friend could have the
capabilities to view or send information.
"""
visibility_level_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=150)
class Meta:
"""Meta definition for VisibilityLevel."""
verbose_name = 'VisibilityLevel'
verbose_name_plural = 'VisibilityLevels'
def __str__(self):
"""Unicode representation of VisibilityLevel."""
return self.name
def get_absolute_url(self):
"""Return absolute url for VisibilityLevel."""
pass
class PrivacyFlagType(models.Model):
'''
Model definition for PrivacyFlagType.
Here we have the values of information such as questions asked to the
user and the user answers will be stored in PrivacyFlag
'''
privacy_flag_type_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
field_name = models.CharField(max_length=150,blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
sort_order = models.IntegerField(default=-1)
class Meta:
"""Meta definition for VisibilityLevel."""
verbose_name = "PrivacyFlagType"
verbose_name_plural = "PrivacyFlagTypes"
def __str__(self):
"""Unicode representation of VisibilityLevel."""
return self.field_name
def get_absolute_url(self):
"""Return absolute url for VisibilityLevel."""
pass
class PrivacyFlag(models.Model):
'''
Model definition for PrivacyFlag
Here we will allocate the values and controling what
could be seen and by whom.
'''
privacy_flag_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
privacyflagtype = models.ForeignKey(PrivacyFlagType, on_delete=models.CASCADE)
visibility_level = models.ForeignKey(VisibilityLevel, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
"""Meta definition for VisibilityLevel."""
verbose_name = "PrivacyFlag"
verbose_name_plural = "PrivacyFlags"
def __str__(self):
"""Unicode representation of VisibilityLevel."""
return f'{self.profile.user.username} has {self.privacyflagtype.field_name} privacy'
def get_absolute_url(self):
"""Return absolute url for VisibilityLevel."""
pass
class ProfileAttributeType(models.Model):
'''
Model definition for ProfileAttributeType
Here we have the type of values enter by the user
'''
profile_attribute_type_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
attribute_type = models.CharField(max_length=500)
sort_order = models.IntegerField(default=-1)
privacy_flag_type = models.ForeignKey(PrivacyFlagType, on_delete=models.CASCADE)
class Meta:
"""Meta definition for VisibilityLevel."""
verbose_name = "ProfileAttributeType"
verbose_name_plural = "ProfileAttributeTypes"
def __str__(self):
"""Unicode representation of VisibilityLevel."""
return f'{self.profile.user.username} has {self.privacyflagtype.field_name} privacy'
def get_absolute_url(self):
"""Return absolute url for VisibilityLevel."""
pass
class ProfileAttribute(models.Model):
'''
Model definition for ProfileAttribute.
Here we have the values themselves given by the user
'''
profile_attribute_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE)
profile_attribute_type = models.ForeignKey(ProfileAttributeType, on_delete=models.CASCADE)
response = models.CharField(max_length=250)
createDate = models.DateField(auto_now_add=True)
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
"""Meta definition for VisibilityLevel."""
verbose_name = "ProfileAttribute"
verbose_name_plural = "ProfileAttributes"
def __str__(self):
"""Unicode representation of VisibilityLevel."""
return f'{self.profile.user.username} response {self.response}'
def get_absolute_url(self):
"""Return absolute url for VisibilityLevel."""
pass
class AlertType(models.Model):
'''
Model definition for AlertType.
Here we have the user with the capability to have alerts from different
news form their friends
'''
alert_type_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=150)
class Meta:
"""Meta definition for VisibilityLevel."""
verbose_name = "AlertType"
verbose_name_plural = "AlertTypes"
def __str__(self):
"""Unicode representation of VisibilityLevel."""
return f'{self.name}'
def get_absolute_url(self):
"""Return absolute url for VisibilityLevel."""
pass
class Alert(models.Model):
'''
Model definition for Alert.
Here we have the whole set completion for the
Alerts. Allow the system to use the functionality
of hidding and allowing users to manage the completion
of there news feed, with notifications.
'''
alert_id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(User, on_delete=models.CASCADE)
create_date = models.DateTimeField(auto_now_add=True)
timestamp = models.DateTimeField(auto_now_add=True)
alert_type = models.ForeignKey(AlertType, on_delete=models.CASCADE)
is_hidden = models.BooleanField(default=False)
message = models.TextField()
class Meta:
"""Meta definition for VisibilityLevel."""
verbose_name = "Alert"
verbose_name_plural = "Alerts"
def __str__(self):
"""Unicode representation of VisibilityLevel."""
return f'{self.alert_type.name} has this {self.message}'
def get_absolute_url(self):
"""Return absolute url for VisibilityLevel."""
pass
| 32.355932
| 103
| 0.683394
| 9,095
| 0.952855
| 0
| 0
| 502
| 0.052593
| 0
| 0
| 3,676
| 0.385123
|
5840120e03a13bb96c98c4c82966a3349be1a938
| 1,012
|
py
|
Python
|
format_errors.py
|
drupchen/correct-ewts
|
0a23db216b2fb78a8c73476ca55cebf23a7d2706
|
[
"Apache-2.0"
] | null | null | null |
format_errors.py
|
drupchen/correct-ewts
|
0a23db216b2fb78a8c73476ca55cebf23a7d2706
|
[
"Apache-2.0"
] | null | null | null |
format_errors.py
|
drupchen/correct-ewts
|
0a23db216b2fb78a8c73476ca55cebf23a7d2706
|
[
"Apache-2.0"
] | null | null | null |
import re
from collections import defaultdict
with open('input/errors-ewts.csv') as f:
raw = f.read()
#raw = raw.replace('`not expected', '` not expected')
lines = raw.split('\n')
data = []
for line in lines:
columns = re.split(r'(?:^"|","|",,"|"$)', line)
msgs = [a for a in columns[3].split(',') if a != '']
entry = [columns[1], columns[2], msgs]
data.append(entry)
error_types = []
by_error_type = defaultdict(list)
for entry in data:
msgs = entry[2]
for msg in msgs:
msg = msg.replace('line 1: ', '')
error_pattern = re.sub(r'`[^`]*`', r'`X`', msg)
error_types.append(error_pattern)
by_error_type[error_pattern].append(entry)
error_types = sorted(list(set(error_types)))
for type, entries in by_error_type.items():
print('{} occurences:\t\t{}'.format(len(entries), type))
etc_count = 0
for line in lines:
if 'character `.`.' in line:
etc_count += 1
print('number of lines with misplaced dots:', etc_count)
print('ok')
| 27.351351
| 60
| 0.614625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.211462
|
5840ef989a734ba50cfa0c0f408fab21378c995e
| 344
|
py
|
Python
|
exercise-django/user/views.py
|
theseana/goodfellas
|
9ad9d9759d193cd64ec71876b1dab155bb9ba2c7
|
[
"MIT"
] | null | null | null |
exercise-django/user/views.py
|
theseana/goodfellas
|
9ad9d9759d193cd64ec71876b1dab155bb9ba2c7
|
[
"MIT"
] | null | null | null |
exercise-django/user/views.py
|
theseana/goodfellas
|
9ad9d9759d193cd64ec71876b1dab155bb9ba2c7
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from user.forms import UserForm
def register(request):
form = UserForm()
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
form.save()
return render(request, 'user/registeration/register.html', {'form': form})
| 24.571429
| 78
| 0.659884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 71
| 0.206395
|
5841ecc637b36ee324105b2737f2b6315d8d0459
| 3,609
|
py
|
Python
|
shark/example/env/catch_ball_env.py
|
7starsea/shark
|
5030f576da6f5998728d80170480e68a3debfe79
|
[
"MIT"
] | null | null | null |
shark/example/env/catch_ball_env.py
|
7starsea/shark
|
5030f576da6f5998728d80170480e68a3debfe79
|
[
"MIT"
] | null | null | null |
shark/example/env/catch_ball_env.py
|
7starsea/shark
|
5030f576da6f5998728d80170480e68a3debfe79
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import PIL
import torch
import torchvision.transforms as TF
from types import SimpleNamespace
from gym import spaces, Env
from .SharkExampleEnv import CatchBallSimulate
# internal_screen_h, internal_screen_w = 80, 140
class CatchBallEnvBase(Env):
metadata = {'render.modes': ['human']}
def __init__(self, screen=(80, 120), num_balls=10, action_penalty=.02, waiting=0, is_continuous=False):
self.game = CatchBallSimulate(screen, ball=(6, 6), ball_speed=(5, 2), bar=(5, 15),
action_penalty=action_penalty,
waiting=waiting, is_continuous=is_continuous)
self.num_balls = num_balls
self.index = 0
self.screen = np.zeros(self.game.screen_size + (3,), dtype=np.uint8)
h, w = screen
self.observation_space = spaces.Space(shape=(h, w, 1), dtype=np.uint8)
if is_continuous:
low, high = self.game.action_range
self.action_space = spaces.Box(low=low, high=high, shape=(1,))
else:
self.action_space = spaces.Discrete(n=3)
self.spec = SimpleNamespace(id='CatchBall_%d' % num_balls)
self.ax = None
self.fig = None
def set_action_range(self, low, high):
assert self.game.is_continuous and "Only continuous action supports set_action_range."
self.game.action_range = low, high
self.action_space = spaces.Box(low=low, high=high, shape=(1,))
def seed(self, seed):
self.game.seed(seed)
def close(self):
self.ax.clear()
def render(self, mode='human'):
import matplotlib.pyplot as plt
if not self.ax:
self.fig = plt.figure(1, figsize=(8, 10))
self.ax = plt.subplot(111)
self.ax.clear()
self.screen.fill(0)
self.game.get_display(self.screen)
self.ax.imshow(self.screen)
plt.pause(0.02)
def reset(self):
self.game.reset()
self.index = 0
state = np.zeros_like(self.screen)
self.game.get_display(state)
return state
def step(self, action):
# # in discrete-setting, action should be 0, 1, 2
is_game_over, reward = self.game.step(int(action))
if is_game_over:
if self.num_balls > 0:
self.index += 1
is_game_over = self.index >= self.num_balls
else:
is_game_over = reward < .5
self.game.reset_ball()
next_state = np.zeros_like(self.screen)
self.game.get_display(next_state)
return next_state, reward, is_game_over, {}
class CatchBallEnv(CatchBallEnvBase):
def __init__(self, *args, **kwargs):
super(CatchBallEnv, self).__init__(*args, **kwargs)
self.kwargs = dict(dtype=torch.float32)
h, w, _ = self.observation_space.shape
h, w = int(h / 2), int(w / 2)
self.observation_space = spaces.Space(shape=(1, h, w), dtype=np.float32)
self.composer = TF.Compose([TF.Grayscale(), TF.Resize((h, w)), TF.ToTensor()])
def _preprocess(self, image):
x = PIL.Image.fromarray(image)
image = self.composer(x)
image = image.to(**self.kwargs)
return image
def step(self, action):
if isinstance(action, np.ndarray):
action = int(action.reshape(-1)[0])
state, r, done, info = super().step(action)
return self._preprocess(state), r, done, info
def reset(self):
state = super().reset()
return self._preprocess(state)
| 31.112069
| 107
| 0.600443
| 3,339
| 0.925187
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.059019
|
5842b3ae714ec5029aefbd5f4f522395e8920892
| 4,652
|
py
|
Python
|
examples/launch_tor_with_simplehttpd.py
|
kneufeld/txtorcon
|
fbe2fc70cae00aa6228a2920ef048b282872dbab
|
[
"MIT"
] | null | null | null |
examples/launch_tor_with_simplehttpd.py
|
kneufeld/txtorcon
|
fbe2fc70cae00aa6228a2920ef048b282872dbab
|
[
"MIT"
] | null | null | null |
examples/launch_tor_with_simplehttpd.py
|
kneufeld/txtorcon
|
fbe2fc70cae00aa6228a2920ef048b282872dbab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Create a new tor node and add a simple http server to it, serving a given
directory over http. The server is single-threaded and very limited.
There are two arguments that can be passed via the commandline:
-p\tThe internet-facing port the hidden service should listen on
-d\tThe directory to serve via http
Example:
./launch_tor_with_simplehttpd.py -p 8080 -d /opt/files/
'''
import SimpleHTTPServer
import SocketServer
import functools
import getopt
import os
import sys
import tempfile
import thread
from twisted.internet import reactor
import txtorcon
def print_help():
print __doc__
def print_tor_updates(prog, tag, summary):
# Prints some status messages while booting tor
print 'Tor booting [%d%%]: %s' % (prog, summary)
def start_httpd(httpd):
# Create a new thread to serve requests
print 'Starting httpd...'
return thread.start_new_thread(httpd.serve_forever, ())
def stop_httpd(httpd):
# Kill the httpd
print 'Stopping httpd...'
httpd.shutdown()
def setup_complete(config, port, proto):
# Callback from twisted when tor has booted.
# We create a reference to this function via functools.partial that
# provides us with a reference to 'config' and 'port', twisted then adds
# the 'proto' argument
print '\nTor is now running. The hidden service is available at'
print '\n\thttp://%s:%i\n' % (config.HiddenServices[0].hostname, port)
# This is probably more secure than any other httpd...
print '### DO NOT RELY ON THIS SERVER TO TRANSFER FILES IN A SECURE WAY ###'
def setup_failed(arg):
# Callback from twisted if tor could not boot. Nothing to see here, move
# along.
print 'Failed to launch tor', arg
reactor.stop()
def main():
# Parse the commandline-options
try:
opts, args = getopt.getopt(sys.argv[1:], 'hd:p:')
except getopt.GetoptError as excp:
print str(excp)
print_help()
return 1
serve_directory = '.' # The default directory to serve files from
hs_public_port = 8011 # The default port the hidden service is available on
web_port = 4711 # The real server's local port
web_host = '127.0.0.1' # The real server is bound to localhost
for o, a in opts:
if o == '-d':
serve_directory = a
elif o == '-p':
hs_public_port = int(a)
elif o == '-h':
print_help()
return
else:
print 'Unknown option "%s"' % (o, )
return 1
# Sanitize path and set working directory there (for SimpleHTTPServer)
serve_directory = os.path.abspath(serve_directory)
if not os.path.exists(serve_directory):
print 'Path "%s" does not exists, can\'t serve from there...' % \
(serve_directory, )
return 1
os.chdir(serve_directory)
# Create a new SimpleHTTPServer and serve it from another thread.
# We create a callback to Twisted to shut it down when we exit.
print 'Serving "%s" on %s:%i' % (serve_directory, web_host, web_port)
httpd = SocketServer.TCPServer((web_host, web_port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
start_httpd(httpd)
reactor.addSystemEventTrigger('before', 'shutdown', stop_httpd, httpd=httpd)
# Create a directory to hold our hidden service. Twisted will unlink it
# when we exit.
hs_temp = tempfile.mkdtemp(prefix='torhiddenservice')
reactor.addSystemEventTrigger('before', 'shutdown',
functools.partial(txtorcon.util.delete_file_or_tree, hs_temp))
# Add the hidden service to a blank configuration
config = txtorcon.TorConfig()
config.SOCKSPort = 0
config.ORPort = 9089
config.HiddenServices = [txtorcon.HiddenService(config, hs_temp,
['%i %s:%i' % (hs_public_port,
web_host,
web_port)])]
config.save()
# Now launch tor
# Notice that we use a partial function as a callback so we have a
# reference to the config object when tor is fully running.
tordeferred = txtorcon.launch_tor(config, reactor,
progress_updates=print_tor_updates)
tordeferred.addCallback(functools.partial(setup_complete, config,
hs_public_port))
tordeferred.addErrback(setup_failed)
reactor.run()
if __name__ == '__main__':
sys.exit(main())
| 33.710145
| 96
| 0.635211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,986
| 0.426913
|
5842cd8ea1a4359a03a5653c005a52f4e2eeeb68
| 5,123
|
py
|
Python
|
homeroom/wsgi.py
|
openshift-labs/workshop-homeroom
|
a0f0c144eef679e35a93201d11973329be9924fb
|
[
"Apache-2.0"
] | 14
|
2019-09-28T20:42:29.000Z
|
2021-11-23T13:12:42.000Z
|
homeroom/wsgi.py
|
openshift-homeroom/workshop-homeroom
|
a0f0c144eef679e35a93201d11973329be9924fb
|
[
"Apache-2.0"
] | 1
|
2019-10-15T02:55:57.000Z
|
2019-10-15T02:55:57.000Z
|
homeroom/wsgi.py
|
openshift-homeroom/workshop-homeroom
|
a0f0c144eef679e35a93201d11973329be9924fb
|
[
"Apache-2.0"
] | 3
|
2020-02-11T16:55:59.000Z
|
2021-08-13T13:16:27.000Z
|
import os
import json
import threading
import time
import yaml
from flask import Flask
from flask import render_template
from kubernetes.client.rest import ApiException
from kubernetes.client.configuration import Configuration
from kubernetes.config.incluster_config import load_incluster_config
from kubernetes.client.api_client import ApiClient
from openshift.dynamic import DynamicClient
from openshift.dynamic.exceptions import ResourceNotFoundError
# Work out namespace operating in.
service_account_path = '/var/run/secrets/kubernetes.io/serviceaccount'
with open(os.path.join(service_account_path, 'namespace')) as fp:
namespace = fp.read().strip()
# Setup REST API client access.
load_incluster_config()
import urllib3
urllib3.disable_warnings()
instance = Configuration()
instance.verify_ssl = False
Configuration.set_default(instance)
api_client = DynamicClient(ApiClient())
try:
route_resource = api_client.resources.get(
api_version='route.openshift.io/v1', kind='Route')
except ResourceNotFoundError:
route_resource = None
ingress_resource = api_client.resources.get(
api_version='extensions/v1beta1', kind='Ingress')
# Setup loading or workshops or live monitor.
workshops = []
application_name = os.environ.get('APPLICATION_NAME', 'homeroom')
def filter_out_hidden(workshops):
for workshop in workshops:
if workshop.get('visibility', 'visible') != 'hidden':
yield workshop
def monitor_workshops():
global workshops
while True:
active_workshops = []
if route_resource is not None:
try:
routes = route_resource.get(namespace=namespace)
for route in routes.items:
annotations = route.metadata.annotations
if annotations:
if annotations.get('homeroom/group') == application_name:
name = route.metadata.name
title = annotations.get('homeroom/title') or name
description = annotations.get('homeroom/description') or ''
scheme = 'http'
if route.tls and route.tls.termination:
scheme = 'https'
url = '%s://%s' % (scheme, route.spec.host)
active_workshops.append(dict(title=title,
description=description, url=url))
except ApiException as e:
print('ERROR: Error looking up routes. %s' % e)
except Exception as e:
print('ERROR: Error looking up routes. %s' % e)
try:
ingresses = ingress_resource.get(namespace=namespace)
for ingress in ingresses.items:
annotations = route.metadata.annotations
if annotations:
if annotations.get('homeroom/group') == application_name:
name = ingress.metadata.name
title = annotations.get('homeroom/title') or name
description = annotations.get('homeroom/description') or ''
scheme = 'http'
if ingress.tls:
scheme = 'https'
url = '%s://%s' % (scheme, ingress.spec.rules[0].host)
active_workshops.append(dict(title=title,
description=description, url=url))
except ApiException as e:
print('ERROR: Error looking up ingress. %s' % e)
except Exception as e:
print('ERROR: Error looking up ingress. %s' % e)
if workshops != active_workshops:
workshops[:] = active_workshops
print('WORKSHOPS', workshops)
time.sleep(15)
if os.path.exists('/opt/app-root/configs/workshops.yaml'):
with open('/opt/app-root/configs/workshops.yaml') as fp:
content = fp.read()
if content:
workshops = list(filter_out_hidden(yaml.safe_load(content)))
if os.path.exists('/opt/app-root/configs/workshops.json'):
with open('/opt/app-root/configs/workshops.json') as fp:
content = fp.read()
workshops = list(filter_out_hidden(json.loads(content)))
if not workshops:
monitor_thread = threading.Thread(target=monitor_workshops)
monitor_thread.daemon = True
monitor_thread.start()
# Setup the Flask application.
app = Flask(__name__)
banner_images = {
'homeroom': 'homeroom-logo.png',
'openshift': 'openshift-logo.svg',
'dedicated': 'openshift-dedicated-logo.svg',
'okd': 'okd-logo.svg',
}
@app.route('/')
def index():
title = os.environ.get('HOMEROOM_TITLE', 'Workshops')
branding = os.environ.get('HOMEROOM_BRANDING', 'openshift')
banner_image = banner_images.get(branding, banner_images['openshift'])
visible_workshops = list(filter_out_hidden(workshops))
return render_template('workshops.html', title=title,
banner_image=banner_image, workshops=visible_workshops)
| 31.819876
| 87
| 0.622877
| 0
| 0
| 153
| 0.029865
| 412
| 0.080422
| 0
| 0
| 986
| 0.192465
|
584381c8993e76aeeaae4fc35eb8cf9d4869915b
| 3,417
|
py
|
Python
|
rever/__init__.py
|
limecrayon/rever
|
0446ad9707fb1e81b3101625959fd16bdaac1853
|
[
"MIT"
] | 2
|
2018-02-16T08:31:48.000Z
|
2018-11-19T02:31:07.000Z
|
rever/__init__.py
|
limecrayon/rever
|
0446ad9707fb1e81b3101625959fd16bdaac1853
|
[
"MIT"
] | null | null | null |
rever/__init__.py
|
limecrayon/rever
|
0446ad9707fb1e81b3101625959fd16bdaac1853
|
[
"MIT"
] | null | null | null |
import functools
import time
__all__ = ('ReachedMaxRetries', 'rever')
class ReachedMaxRetries(Exception):
def __init__(self, func):
Exception.__init__(self, "Function {} raised exception due to max number of retries performed".format(func))
self.func = func
def rever(**rever_kwargs):
"""
rever_kwargs default values defined:
If backoff is True, then times and pause will not be initialized, but they will be calculated.
backoff: True
total_pause: 30
steps: 10
exception: BaseException
raises: True
prior: None
If backoff is False, then total_pause and steps will be initialized, but do not get used.
backoff: False
times: 1
pause: 0
exception: BaseException
raises: True
prior: None
"""
backoff = True
total_pause = 1
steps = 10
times = 1
pause = 0
exception = BaseException
raises = True
prior = None
if "backoff" not in rever_kwargs:
rever_kwargs["backoff"] = backoff
if "total_pause" not in rever_kwargs:
rever_kwargs["total_pause"] = total_pause
if "steps" not in rever_kwargs:
rever_kwargs["steps"] = steps
if "times" not in rever_kwargs:
if not rever_kwargs["backoff"]:
rever_kwargs["times"] = times
if "pause" not in rever_kwargs:
if not rever_kwargs["backoff"]:
rever_kwargs["pause"] = pause
if "exception" not in rever_kwargs:
rever_kwargs["exception"] = exception
if "raises" not in rever_kwargs:
rever_kwargs["raises"] = raises
if "prior" not in rever_kwargs:
rever_kwargs["prior"] = prior
initialized_kwargs = {key: rever_kwargs[key] for key in rever_kwargs}
def rever_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
nonlocal rever_kwargs
try:
if args or kwargs:
r = func(*args, **kwargs)
rever_kwargs = {key: initialized_kwargs[key] for key in initialized_kwargs}
return r
else:
r = func()
rever_kwargs = {key: initialized_kwargs[key] for key in initialized_kwargs}
return r
except rever_kwargs["exception"]:
if rever_kwargs["backoff"]:
rever_kwargs["pause"] = \
.5 * (rever_kwargs["total_pause"] / 2 ** (rever_kwargs["steps"]))
if rever_kwargs["steps"] >= 0:
time.sleep(rever_kwargs["pause"])
rever_kwargs["steps"] -= 1
if rever_kwargs["prior"]:
rever_kwargs["prior"]()
return wrapper(*args, **kwargs)
else:
if rever_kwargs["times"] > 0:
time.sleep(rever_kwargs["pause"])
rever_kwargs["times"] -= 1
if rever_kwargs["prior"]:
rever_kwargs["prior"]()
return wrapper(*args, **kwargs)
if rever_kwargs["raises"] and (rever_kwargs["steps"] < 0 or rever_kwargs["times"] <= 0):
raise ReachedMaxRetries(func)
else:
return None
return wrapper
return rever_decorator
| 30.238938
| 116
| 0.550776
| 207
| 0.060579
| 0
| 0
| 1,595
| 0.466784
| 0
| 0
| 859
| 0.25139
|
5844f2ad1f289327e37c42bac510107e36f8f9d5
| 25,811
|
py
|
Python
|
gui(12102018).py
|
hanhydro/T2H
|
f4922ce721eb450c7d91370f180e6c860e9ec6be
|
[
"MIT"
] | null | null | null |
gui(12102018).py
|
hanhydro/T2H
|
f4922ce721eb450c7d91370f180e6c860e9ec6be
|
[
"MIT"
] | null | null | null |
gui(12102018).py
|
hanhydro/T2H
|
f4922ce721eb450c7d91370f180e6c860e9ec6be
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import (QApplication, QDialog,
QProgressBar, QPushButton, QMessageBox)
import matplotlib.pyplot as plt
from matplotlib import style
import T2H, PLOT
import flopy
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
#%%
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("T2H Graphical User Interface")
MainWindow.resize(1280, 800)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
#%% QFrames
self.frame_1 = QtWidgets.QFrame(self.centralWidget)
self.frame_1.setGeometry(QtCore.QRect(810, 70, 461, 201))
self.frame_1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_1.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_1.setObjectName("frame_2")
self.frame_2 = QtWidgets.QFrame(self.centralWidget)
self.frame_2.setGeometry(QtCore.QRect(810, 280, 461, 101))
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.frame_3 = QtWidgets.QFrame(self.centralWidget)
self.frame_3.setGeometry(QtCore.QRect(810, 390, 461, 31))
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
#%% QLabels
self.sedK = QtWidgets.QLabel(self.frame_2)
self.sedK.setGeometry(QtCore.QRect(30, 10, 141, 16))
self.sedK.setObjectName("sedK")
self.aqK = QtWidgets.QLabel(self.frame_2)
self.aqK.setGeometry(QtCore.QRect(30, 40, 141, 16))
self.aqK.setObjectName("aqK")
self.faultK = QtWidgets.QLabel(self.frame_2)
self.faultK.setGeometry(QtCore.QRect(30, 70, 141, 16))
self.faultK.setObjectName("faultK")
self.sedKN = QtWidgets.QLabel(self.centralWidget)
self.sedKN.setGeometry(QtCore.QRect(910, 500, 141, 16))
self.sedKN.setObjectName("sedKN")
self.sedKNlabel = QtWidgets.QLabel(self.centralWidget)
self.sedKNlabel.setGeometry(QtCore.QRect(1100, 500, 61, 16))
self.sedKNlabel.setObjectName("sedKNlabel")
self.aquiferKNlabel = QtWidgets.QLabel(self.centralWidget)
self.aquiferKNlabel.setGeometry(QtCore.QRect(1100, 520, 61, 16))
self.aquiferKNlabel.setObjectName("aquiferKNlabel")
self.aqKN = QtWidgets.QLabel(self.centralWidget)
self.aqKN.setGeometry(QtCore.QRect(910, 520, 81, 16))
self.aqKN.setObjectName("aqKN")
self.faultKN = QtWidgets.QLabel(self.centralWidget)
self.faultKN.setGeometry(QtCore.QRect(910, 540, 81, 16))
self.faultKN.setObjectName("faultKN")
self.faultKNlabel = QtWidgets.QLabel(self.centralWidget)
self.faultKNlabel.setGeometry(QtCore.QRect(1100, 540, 61, 16))
self.faultKNlabel.setObjectName("faultKNlabel")
self.label_21 = QtWidgets.QLabel(self.frame_3)
self.label_21.setGeometry(QtCore.QRect(10, 7, 141, 16))
self.label_21.setObjectName("label_21")
self.visoptionsLabel = QtWidgets.QLabel(self.centralWidget)
self.visoptionsLabel.setGeometry(QtCore.QRect(20, 540, 141, 16))
self.visoptionsLabel.setObjectName("visoptionsLabel")
self.fileLabel = QtWidgets.QLabel(self.centralWidget)
self.fileLabel.setGeometry(QtCore.QRect(810, 4, 60, 16))
self.fileLabel.setObjectName("fileLabel")
self.fileLabel_path = QtWidgets.QLabel(self.centralWidget)
self.fileLabel_path.setGeometry(QtCore.QRect(880, 4, 320, 16))
self.fileLabel_path.setObjectName("fileLabel_path")
self.label = QtWidgets.QLabel(self.centralWidget)
self.label.setGeometry(QtCore.QRect(814, 51, 241, 16))
self.label.setObjectName("label")
self.nz = QtWidgets.QLabel(self.centralWidget)
self.nz.setGeometry(QtCore.QRect(840, 104, 141, 16))
self.nz.setObjectName("nz")
self.targetperiod = QtWidgets.QLabel(self.centralWidget)
self.targetperiod.setGeometry(QtCore.QRect(840, 80, 151, 16))
self.targetperiod.setObjectName("targetperiod")
self.nzfixed = QtWidgets.QLabel(self.centralWidget)
self.nzfixed.setGeometry(QtCore.QRect(840, 128, 141, 16))
self.nzfixed.setObjectName("nzfixed")
self.constrecharge = QtWidgets.QLabel(self.centralWidget)
self.constrecharge.setGeometry(QtCore.QRect(840, 176, 151, 16))
self.constrecharge.setObjectName("constrecharge")
#
self.hiniratio = QtWidgets.QLabel(self.centralWidget)
self.hiniratio.setGeometry(QtCore.QRect(840, 242, 151, 16))
self.hiniratio.setObjectName("hiniratio")
self.datvar = QtWidgets.QLabel(self.centralWidget)
self.datvar.setGeometry(QtCore.QRect(840, 152, 161, 16))
self.datvar.setObjectName("datvar")
# Recharge input
self.constrecharge_2 = QtWidgets.QLabel(self.centralWidget)
self.constrecharge_2.setGeometry(QtCore.QRect(840, 200, 151, 16))
self.constrecharge_2.setObjectName("constrecharge_2")
# Image pane
self.image = QtWidgets.QLabel(self.centralWidget)
self.image.setGeometry(QtCore.QRect(10, 10, 780, 520))
self.image.setObjectName("image")
self.pixmap = QtGui.QPixmap("logo.png")
self.image.setPixmap(self.pixmap)
#%% QLineEdits
self.sedKlineEdit = QtWidgets.QLineEdit(self.frame_2)
self.sedKlineEdit.setGeometry(QtCore.QRect(260, 10, 113, 21))
self.sedKlineEdit.setObjectName("sedKlineEdit")
self.sedKlineEdit.setText("547.5")
#
self.aqKlineEdit = QtWidgets.QLineEdit(self.frame_2)
self.aqKlineEdit.setGeometry(QtCore.QRect(260, 40, 113, 21))
self.aqKlineEdit.setObjectName("aqKlineEdit")
self.aqKlineEdit.setText("36.5")
#
self.faultKlineEdit = QtWidgets.QLineEdit(self.frame_2)
self.faultKlineEdit.setGeometry(QtCore.QRect(260, 70, 113, 21))
self.faultKlineEdit.setObjectName("faultKlineEdit")
self.faultKlineEdit.setText("0.0365")
#
self.nzfline = QtWidgets.QLineEdit(self.centralWidget)
self.nzfline.setGeometry(QtCore.QRect(1070, 128, 113, 21))
self.nzfline.setObjectName("nzfline")
self.nzfline.setText("10")
#
self.nzline = QtWidgets.QLineEdit(self.centralWidget)
self.nzline.setGeometry(QtCore.QRect(1070, 104, 113, 21))
self.nzline.setObjectName("nzline")
self.nzline.setText("40")
#
self.datline = QtWidgets.QLineEdit(self.centralWidget)
self.datline.setGeometry(QtCore.QRect(1070, 152, 113, 21))
self.datline.setObjectName("datline")
self.datline.setText("-10000")
#
self.hiniratioLineEdit = QtWidgets.QLineEdit(self.centralWidget)
self.hiniratioLineEdit.setGeometry(QtCore.QRect(1070, 242, 113, 21))
self.hiniratioLineEdit.setObjectName("hiniratioLineEdit")
self.hiniratioLineEdit.setText("0.9")
#
self.datvarline = QtWidgets.QLineEdit(self.centralWidget)
self.datvarline.setGeometry(QtCore.QRect(1070, 176, 113, 21))
self.datvarline.setObjectName("datvarline")
self.datvarline.setText("-3000")
self.rchline = QtWidgets.QLineEdit(self.centralWidget)
self.rchline.setGeometry(QtCore.QRect(1070, 200, 113, 21))
self.rchline.setObjectName("rchline")
self.rchline.setText("0.05")
# Ma input lineedit
self.maline = QtWidgets.QLineEdit(self.centralWidget)
self.maline.setGeometry(QtCore.QRect(1070, 80, 113, 21))
self.maline.setObjectName("maline")
self.maline.setText("12.5")
#%% QPushButtons
self.load = QtWidgets.QPushButton(self.centralWidget)
self.load.setGeometry(QtCore.QRect(1100, -1, 71, 32))
self.load.setObjectName("loadButton")
self.load.clicked.connect(self.fileloader)
self.load1 = QtWidgets.QPushButton(self.centralWidget)
self.load1.setGeometry(QtCore.QRect(1170, -1, 101, 32))
self.load1.setObjectName("loadButton1")
self.load1.clicked.connect(self.fileloader)
self.applyButton = QtWidgets.QPushButton(self.frame_1)
self.applyButton.setGeometry(QtCore.QRect(380, 60, 81, 81))
self.applyButton.setObjectName("applyButton")
self.applyButton.clicked.connect(self.applyclicked)
self.fileDialog_3 = QtWidgets.QPushButton(self.frame_2)
self.fileDialog_3.setGeometry(QtCore.QRect(380, 20, 81, 71))
self.fileDialog_3.setObjectName("fileDialog_3")
self.fileDialog_3.clicked.connect(self.applyCalClicked)
# Model run button
self.ModelRunButton = QtWidgets.QPushButton(self.centralWidget)
self.ModelRunButton.setGeometry(QtCore.QRect(640, 620, 113, 32))
self.ModelRunButton.setObjectName("ModelRunButton")
self.ModelRunButton.clicked.connect(self.run)
self.QuitButton = QtWidgets.QPushButton(self.centralWidget)
self.QuitButton.setGeometry(QtCore.QRect(760, 620, 113, 32))
self.QuitButton.setObjectName("QuitButton")
self.QuitButton.clicked.connect(QCoreApplication.instance().quit)
self.VtkOutputButton = QtWidgets.QPushButton(self.centralWidget)
self.VtkOutputButton.setGeometry(QtCore.QRect(880, 620, 113, 32))
self.VtkOutputButton.setObjectName("VtkOutputButton")
# self.VtkOutputButton.clicked.connect(self.vtk)
self.PlotButton = QtWidgets.QPushButton(self.centralWidget)
self.PlotButton.setGeometry(QtCore.QRect(460, 560, 113, 32))
self.PlotButton.setObjectName("PlotButton")
self.PlotButton.clicked.connect(self.plot)
#%% QGraphicsViews
self.figure = plt.figure(figsize=(12,12))
self.canvas = FigureCanvas(self.figure)
#%% QComboBoxes
# File combo box
self.fileBox = QtWidgets.QComboBox(self.centralWidget)
self.fileBox.setGeometry(QtCore.QRect(808, 25, 461, 26))
self.fileBox.setObjectName("fileBox")
# Solver selection combo box
self.solverBox = QtWidgets.QComboBox(self.frame_3)
self.solverBox.setGeometry(QtCore.QRect(63, 2, 281, 26))
self.solverBox.setObjectName("solverBox")
self.solverBox.addItem("xMD")
self.solverBox.addItem("GMRES")
#
self.visComboBox = QtWidgets.QComboBox(self.centralWidget)
self.visComboBox.setGeometry(QtCore.QRect(10, 560, 441, 26))
self.visComboBox.setObjectName("visComboBox")
self.visComboBox.addItem("Cross Section")
self.visComboBox.addItem("Fault Plane")
self.visComboBox.addItem("Vertical Flow Barriers (VFB)")
self.visComboBox.addItem("Horizontal Flow Barriers (HFB)")
#%% QCheckBoxes
#
self.elevdependentChecker = QtWidgets.QCheckBox(self.centralWidget)
self.elevdependentChecker.setGeometry(QtCore.QRect(860, 220, 231, 20))
self.elevdependentChecker.setObjectName("elevdependentChecker")
#%% QProgressBars
self.progress = QProgressBar(self.centralWidget)
self.progress.setGeometry(10, 620, 600, 25)
self.progress.setMaximum(100)
#%% Mainwindows
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1024, 22))
self.menuBar.setObjectName("menuBar")
self.menuT2H_Main = QtWidgets.QMenu(self.menuBar)
self.menuT2H_Main.setObjectName("menuT2H_Main")
self.menuT2H_Checker = QtWidgets.QMenu(self.menuBar)
self.menuT2H_Checker.setObjectName("menuT2H_Checker")
self.menuT2H_Plot = QtWidgets.QMenu(self.menuBar)
self.menuT2H_Plot.setObjectName("menuT2H_Plot")
MainWindow.setMenuBar(self.menuBar)
self.mainToolBar = QtWidgets.QToolBar(MainWindow)
self.mainToolBar.setObjectName("mainToolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.menuBar.addAction(self.menuT2H_Main.menuAction())
self.menuBar.addAction(self.menuT2H_Checker.menuAction())
self.menuBar.addAction(self.menuT2H_Plot.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
#%% Functions
def applyclicked(self):
self.Ma = float(self.maline.text())
self.Ma = format(self.Ma, '.1f')
self.nz = int(self.nzline.text())
self.nz_fixed = int(self.nzfline.text())
self.dx = 1000
self.dy = 1000
self.inz = self.nz - self.nz_fixed
self.dat = int(self.datline.text())
self.dat_var = int(self.datvarline.text())
self.idat = self.dat - self.dat_var
self.rech = float(self.rchline.text())
self.perm_sed = float(self.sedKlineEdit.text())
self.hratio = float(self.hiniratioLineEdit.text())
self.Kconst = float(self.aqKlineEdit.text())
self.hydchr = self.Kconst/1000
self.target_row = 101
self.iskip = 4
self.ivtk = 1
self.h_tol = 1e-4
self.fileLabel_path.setText("/tisc_output/topo_" + self.Ma +"0Ma.txt")
self.ans = QMessageBox.question(self.centralWidget, "Confirmation",\
"Are these correct?\n" + "Period: " + self.Ma\
+ "Ma\n" + "Nz: " + str(self.nz) +"\n" + "Datum: "\
+ str(self.dat) + " m\n", QMessageBox.Yes, QMessageBox.No)
if self.ans == QMessageBox.Yes:
self.rchline.setEnabled(False)
self.maline.setEnabled(False)
self.nzline.setEnabled(False)
self.nzfline.setEnabled(False)
self.datline.setEnabled(False)
self.datvarline.setEnabled(False)
self.hiniratioLineEdit.setEnabled(False)
QMessageBox.about(self.centralWidget, "Confirmed", "Properties confirmed")
else:
QMessageBox.about(self.centralWidget, "Check values", "Check values again!")
def applyCalClicked(self):
self.perm_sed = self.sedKlineEdit.text()
self.Kconst = self.aqKlineEdit.text()
self.hydchr = self.faultKlineEdit.text()
self.sedKNlabel.setText(str(float(self.perm_sed)/float(self.rchline.text())))
self.aquiferKNlabel.setText(str(float(self.Kconst)/float(self.rchline.text())))
self.faultKNlabel.setText(str(float(self.hydchr)/float(self.rchline.text())))
self.ans = QMessageBox.question(self.centralWidget, "Confirmation",\
"Are these correct?\n" + "Period: " + self.Ma\
+ "Ma\n" + "Nz: " + str(self.nz) +"\n" + "Datum: "\
+ str(self.dat) + " m\n", QMessageBox.Yes, QMessageBox.No)
if self.ans == QMessageBox.Yes:
self.sedKlineEdit.setEnabled(False)
self.aqKlineEdit.setEnabled(False)
self.faultKlineEdit.setEnabled(False)
QMessageBox.about(self.centralWidget, "Confirmed", "Properties confirmed")
else:
QMessageBox.about(self.centralWidget, "Check values", "Check values again!")
#%%
def run(self):
self.Ma = float(self.maline.text())
self.Ma = format(self.Ma, '.1f')
self.nz = int(self.nzline.text())
self.nz_fixed = int(self.nzfline.text())
self.dx = 1000
self.dy = 1000
self.inz = self.nz - self.nz_fixed
self.dat = int(self.datline.text())
self.dat_var = int(self.datvarline.text())
self.idat = self.dat - self.dat_var
self.rech = float(self.rchline.text())
self.perm_sed = float(self.sedKlineEdit.text())
self.hratio = float(self.hiniratioLineEdit.text())
self.Kconst = float(self.aqKlineEdit.text())
self.hydchr = self.Kconst/1000
self.target_row = 101
self.iskip = 4
self.ivtk = 1
self.h_tol = 1e-4
self.model = T2H.main(self.Ma, self.nz, self.nz_fixed, self.inz, self.dx,\
self.dy, self.dat, self.dat_var, self.idat\
, self.rech, self.perm_sed, self.target_row,\
self.Kconst, self.hratio, self.hydchr,\
self.iskip, self.ivtk, self.h_tol)
self.mf = self.model.mf
self.mf.dis.check()
self.mf.write_input()
self.mf.run_model()
return self.mf
def plot(self):
try:
self.mf
except AttributeError:
QMessageBox.about(self.centralWidget, "Warning", "Please run a model first")
else:
self.vcb = self.visComboBox.itemData
print(self.vcb)
if self.vcb == "Cross Section":
figheadxsect, axheadxsect = plt.subplots(figsize=(40,5))
self.mfxsect = PLOT.fmfxsect(self.mf, self.model.mfdis, self.target_row, axheadxsect).mfxsect
self.a = PLOT.head(self.mf, self.model.fdirmodel).a
self.headc = PLOT.headc(self.mfxsect, self.a)
self.headcontour = self.headc.headcontour
self.gdplot = self.mfxsect.plot_grid(color='r', linewidths=0.2)
self.BCplot = self.mfxsect.plot_ibound(self.model.ibound, color_noflow = 'black',\
color_ch = 'blue', head = self.a)
self.canvas.draw()
print("plot")
def fileloader(self):
self.path = os.getcwd() + "/tisc_output/"
self.l = os.listdir(self.path)
self.bdtopo = [0]*len(self.l)
self.topo = [0]*len(self.l)
self.fault = [0]*len(self.l)
self.sedthick = [0]*len(self.l)
for file in range(len(self.l)):
if self.l[file].startswith("bdtopo"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.bdtopo[file] = float(self.l[file][7:]\
.split("Ma.txt")[0])
elif self.l[file].startswith("topo"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.topo[file] = float(self.l[file][5:]\
.split("Ma.txt")[0])
elif self.l[file].startswith("fault"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.fault[file] = float(self.l[file][6:]\
.split("Ma.txt")[0])
elif self.l[file].startswith("sedthick"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.sedthick[file] = float(self.l[file][9:]\
.split("Ma.txt")[0])
self.a = list(filter((0).__ne__, self.topo))
self.a.sort()
self.b = list(filter((0).__ne__, self.bdtopo))
self.b.sort()
self.c = list(filter((0).__ne__, self.fault))
self.c.sort()
self.d = list(filter((0).__ne__, self.sedthick))
self.d.sort()
self.df = []
for nfile in range(len(self.a)):
if self.b.count(self.a[nfile]) == 1:
if self.c.count(self.a[nfile]) == 1:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "y", "y", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "y", "y", "n"]
self.df.append(data)
elif self.c.count(self.a[nfile]) == 0:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "y", "n", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "y", "n", "n"]
self.df.append(data)
elif self.b.count(self.a[nfile]) == 0:
if self.c.count(self.a[nfile]) == 1:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "n", "y", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "n", "y", "n"]
self.df.append(data)
elif self.c.count(self.a[nfile]) == 0:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "n", "n", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "n", "n", "n"]
self.df.append(data)
for age in range(len(self.a)):
if self.df[age][2] == "y" and self.df[age][3] == "y" and self.df[age][4] == "y":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | Faults | Sediments")
elif self.df[age][2] == "y" and self.df[age][3] == "y" and self.df[age][4] == "n":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | Faults | No Sediments")
elif self.df[age][2] == "y" and self.df[age][3] == "n" and self.df[age][4] == "y":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | No Faults | Sediments")
elif self.df[age][2] == "y" and self.df[age][3] == "n" and self.df[age][4] == "n":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | No Faults | No Sediments")
#%%
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "T2H Graphical User Interface"))
self.applyButton.setText(_translate("MainWindow", "Apply"))
self.sedK.setText(_translate("MainWindow", "Sediment K (m/yr)"))
self.aqK.setText(_translate("MainWindow", "Aquifer K (m/yr)"))
self.faultK.setText(_translate("MainWindow", "Fault zone K (m/yr)"))
self.fileDialog_3.setText(_translate("MainWindow", "Apply"))
self.sedKN.setText(_translate("MainWindow", "Sediment K / N:"))
self.sedKNlabel.setText(_translate("MainWindow", "N/A"))
self.aquiferKNlabel.setText(_translate("MainWindow", "N/A"))
self.aqKN.setText(_translate("MainWindow", "Aquifer K / N:"))
self.faultKN.setText(_translate("MainWindow", "Fault K / N:"))
self.faultKNlabel.setText(_translate("MainWindow", "N/A"))
self.label_21.setText(_translate("MainWindow", "Solver"))
self.ModelRunButton.setText(_translate("MainWindow", "Execute"))
self.load.setText(_translate("MainWindow", "Load"))
self.load1.setText(_translate("MainWindow", "Set selected"))
self.QuitButton.setText(_translate("MainWindow", "Abort"))
self.VtkOutputButton.setText(_translate("MainWindow", "VTK output"))
self.PlotButton.setText(_translate("MainWindow", "Plot"))
self.visoptionsLabel.setText(_translate("MainWindow", "Visualization options"))
self.fileLabel.setText(_translate("MainWindow", "File: "))
self.fileLabel_path.setText(_translate("MainWindow", "path"))
self.label.setText(_translate("MainWindow", "*dx = dy = 1,000 m fixed in this version"))
self.nz.setText(_translate("MainWindow", "Number of layers (nz)"))
self.targetperiod.setText(_translate("MainWindow", "Target period (Ma)"))
self.nzfixed.setText(_translate("MainWindow", "Fixed layers (nz_fixed)"))
self.constrecharge.setText(_translate("MainWindow", "Datum of variable dz (m)"))
self.hiniratio.setText(_translate("MainWindow", "Initial head ratio to topo."))
self.elevdependentChecker.setText(_translate("MainWindow", "Elevation-dependent recharge"))
self.datvar.setText(_translate("MainWindow", "Model datum (m)"))
self.constrecharge_2.setText(_translate("MainWindow", "Const. Recharge (m/yr)"))
self.menuT2H_Main.setTitle(_translate("MainWindow", "T2H Main"))
self.menuT2H_Checker.setTitle(_translate("MainWindow", "T2H Checker"))
self.menuT2H_Plot.setTitle(_translate("MainWindow", "T2H Plot"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 47.975836
| 109
| 0.618728
| 24,704
| 0.957111
| 0
| 0
| 0
| 0
| 0
| 0
| 3,175
| 0.12301
|
584603df6f6456851f5001f52a65f8c0ba217511
| 226
|
py
|
Python
|
py/loadpage.py
|
katiehuang1221/onl_ds5_project_2
|
dc9243d6bdc0c1952a761b2ed3e91a8548202b42
|
[
"MIT"
] | null | null | null |
py/loadpage.py
|
katiehuang1221/onl_ds5_project_2
|
dc9243d6bdc0c1952a761b2ed3e91a8548202b42
|
[
"MIT"
] | 1
|
2021-01-20T02:34:07.000Z
|
2021-01-20T02:34:07.000Z
|
py/loadpage.py
|
katiehuang1221/onl_ds5_project_2
|
dc9243d6bdc0c1952a761b2ed3e91a8548202b42
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
from IPython.core.display import display, HTML
def get_soup(url):
response = requests.get(url)
page = response.text
soup = BeautifulSoup(page, "lxml")
return soup
| 22.6
| 46
| 0.734513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.026549
|
58483a9eb35db037bda84433b79608b84ed9f2c4
| 1,912
|
py
|
Python
|
hard-gists/5409581/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/5409581/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/5409581/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
'''
rename_selected_relation_box.py
Written by Alex Forsythe (awforsythe.com)
When executed, attempts to locate any selected box within any relation
constraint in the scene. If a selected relation box is found, prompts the user
to enter a new name for that box. Allows relation boxes to be given more
descriptive names. I'd recommend binding this script to a keyboard shortcut
(see MotionBuilder/bin/config/Scripts/ActionScript.txt) for quick access.
'''
from pyfbsdk import *
def get_first(f, xs):
'''
Returns the first x in xs for which f returns True, or else None.
'''
for x in xs:
if f(x):
return x
return None
def get_selected_relation_box():
'''
Returns a relation constraint box which has been selected by the user, or
None if no relation boxes are selected.
'''
for relation in [c for c in FBSystem().Scene.Constraints if c.Is(FBConstraintRelation_TypeInfo())]:
box = get_first(lambda box: box.Selected, relation.Boxes)
if box:
return box
return None
def get_new_box_name(box):
'''
Prompts the user to enter a new name for the given box. Returns the new
name if the user confirms the rename operation, or None if the user
cancels.
'''
button, string = FBMessageBoxGetUserValue(
'Rename Box?',
'Current name: %s' % box.Name,
box.Name,
FBPopupInputType.kFBPopupString,
'Rename',
'Cancel')
return string if button == 1 else None
def rename_selected_relation_box():
'''
Prompts the user to enter a new name for a selected relation constraint
box. If no boxes are selected, has no effect.
'''
box = get_selected_relation_box()
if box:
name = get_new_box_name(box)
if name:
box.Name = name
if __name__ in ('__main__', '__builtin__'):
rename_selected_relation_box()
| 30.83871
| 103
| 0.670502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,048
| 0.548117
|
584861b23601a5bd9f5d5e6bce09eb691a44f1c2
| 4,010
|
py
|
Python
|
osu_scene_switcher.py
|
FunOrange/osu-scene-switcher
|
471fc654fe4a222abaf4fbcf062e8302dd52bb18
|
[
"MIT"
] | 4
|
2021-05-22T20:56:36.000Z
|
2022-03-02T00:19:45.000Z
|
osu_scene_switcher.py
|
FunOrange/obs-osu-noise-suppression-switcher
|
471fc654fe4a222abaf4fbcf062e8302dd52bb18
|
[
"MIT"
] | null | null | null |
osu_scene_switcher.py
|
FunOrange/obs-osu-noise-suppression-switcher
|
471fc654fe4a222abaf4fbcf062e8302dd52bb18
|
[
"MIT"
] | 1
|
2021-01-29T18:28:04.000Z
|
2021-01-29T18:28:04.000Z
|
import os
import time
import obspython as obs
initial_load = False
status_file = ''
idle_scene = ''
playing_scene = ''
def undb(db):
return pow(10, db/20)
def script_description():
return 'Automatically switch scenes upon entering osu! gameplay.\n\n' \
'See github page for setup instructions.\n\n' \
'Stream Companion must be open to take effect.'
def script_properties():
props = obs.obs_properties_create()
obs.obs_properties_add_text(props, 'status_file', 'osu! status file location', obs.OBS_TEXT_DEFAULT)
obs.obs_properties_add_text(props, 'playing_scene', 'Scene to switch to when entering gameplay', obs.OBS_TEXT_DEFAULT)
obs.obs_properties_add_text(props, 'idle_scene', 'Scene to switch to when exiting gameplay', obs.OBS_TEXT_DEFAULT)
return props
def script_load(settings):
global status_file
global idle_scene
global playing_scene
status_file = obs.obs_data_get_string(settings, 'status_file')
idle_scene = obs.obs_data_get_string(settings, 'idle_scene')
playing_scene = obs.obs_data_get_string(settings, 'playing_scene')
# Delay check valid source until OBS is fully loaded
obs.script_log(obs.LOG_INFO, 'Starting in 10 seconds...')
obs.timer_add(validate_and_start, 10000)
"""
Checks if status file exists and both scenes exist, then starts the main script timer
"""
def validate_and_start():
global initial_load
global idle_scene
global playing_scene
initial_load = True
obs.timer_remove(validate_and_start)
obs.timer_remove(check_status_and_toggle)
# check if file exists
if not os.path.isfile(status_file):
raise FileNotFoundError(f"Could not find file '{status_file}'")
obs.script_log(obs.LOG_INFO, f'{status_file} found!')
# check if gameplay enter scene exists
src = obs.obs_get_source_by_name(playing_scene)
if src is None or obs.obs_source_get_type(src) != obs.OBS_SOURCE_TYPE_SCENE:
obs.obs_source_release(src)
raise FileNotFoundError(f" Could not find scene '{playing_scene}'")
obs.obs_source_release(src)
obs.script_log(obs.LOG_INFO, f"Scene '{playing_scene}' found!")
# check if gameplay exit scene exists
src = obs.obs_get_source_by_name(idle_scene)
if src is None or obs.obs_source_get_type(src) != obs.OBS_SOURCE_TYPE_SCENE:
obs.obs_source_release(src)
raise FileNotFoundError(f" Could not find scene '{idle_scene}'")
obs.obs_source_release(src)
obs.script_log(obs.LOG_INFO, f"Scene '{idle_scene}' found!")
obs.script_log(obs.LOG_INFO, 'Script is now active.')
obs.timer_add(check_status_and_toggle, 500)
def script_update(settings):
global status_file
global idle_scene
global playing_scene
global initial_load
if not initial_load:
return
status_file = obs.obs_data_get_string(settings, 'status_file')
idle_scene = obs.obs_data_get_string(settings, 'idle_scene')
playing_scene = obs.obs_data_get_string(settings, 'playing_scene')
validate_and_start()
"""
Checks the osu! status file for 'Playing',
then toggles Noise Suppression accordingly
"""
previous_status = ''
def check_status_and_toggle():
global status_file
global idle_scene
global playing_scene
global previous_status
# read status file contents
if not os.path.isfile(status_file):
obs.timer_remove(check_status_and_toggle)
raise FileNotFoundError("Could not find file '{status_file}'")
with open(status_file, 'r') as f:
status = f.readlines()
if status == []:
return
status = status[0].strip()
if status == previous_status: # status has not changed
return
# Switch scene according to game status
if status == 'Playing':
src = obs.obs_get_source_by_name(playing_scene)
else:
src = obs.obs_get_source_by_name(idle_scene)
obs.obs_frontend_set_current_scene(src)
obs.obs_source_release(src)
previous_status = status
| 33.416667
| 122
| 0.721696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,123
| 0.28005
|
5849254d7b154fa7533602568ea01800f7eb9d68
| 3,386
|
py
|
Python
|
donkey_gym/envs/donkey_env.py
|
mint26/donkey_gym
|
4d0302da5818d56f92247b9dbf389994961f487e
|
[
"MIT"
] | 5
|
2018-11-01T18:48:03.000Z
|
2021-03-11T14:36:22.000Z
|
donkey_gym/envs/donkey_env.py
|
mint26/donkey_gym
|
4d0302da5818d56f92247b9dbf389994961f487e
|
[
"MIT"
] | null | null | null |
donkey_gym/envs/donkey_env.py
|
mint26/donkey_gym
|
4d0302da5818d56f92247b9dbf389994961f487e
|
[
"MIT"
] | 7
|
2018-10-13T19:48:14.000Z
|
2021-10-31T15:10:52.000Z
|
'''
file: donkey_env.py
author: Tawn Kramer
date: 2018-08-31
'''
import os
from threading import Thread
import numpy as np
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from donkey_gym.envs.donkey_sim import DonkeyUnitySimContoller
from donkey_gym.envs.donkey_proc import DonkeyUnityProcess
class DonkeyEnv(gym.Env):
"""
OpenAI Gym Environment for Donkey
"""
metadata = {
"render.modes": ["human", "rgb_array"],
}
ACTION = ["steer", "throttle"]
def __init__(self, level, time_step=0.05, frame_skip=2):
print("starting DonkeyGym env")
# start Unity simulation subprocess
self.proc = DonkeyUnityProcess()
try:
exe_path = os.environ['DONKEY_SIM_PATH']
except:
print("Missing DONKEY_SIM_PATH environment var. Using defaults")
#you must start the executable on your own
exe_path = "self_start"
try:
port = int(os.environ['DONKEY_SIM_PORT'])
except:
print("Missing DONKEY_SIM_PORT environment var. Using defaults")
port = 9090
try:
headless = os.environ['DONKEY_SIM_HEADLESS']=='1'
except:
print("Missing DONKEY_SIM_HEADLESS environment var. Using defaults")
headless = False
self.proc.start(exe_path, headless=headless, port=port)
# start simulation com
self.viewer = DonkeyUnitySimContoller(level=level, time_step=time_step, port=port)
# steering
# TODO(r7vme): Add throttle
self.action_space = spaces.Box(low=np.array([-1.0]), high=np.array([1.0]))
# camera sensor data
self.observation_space = spaces.Box(0, 255, self.viewer.get_sensor_size(), dtype=np.uint8)
# simulation related variables.
self.seed()
# Frame Skipping
self.frame_skip = frame_skip
# wait until loaded
self.viewer.wait_until_loaded()
def close(self):
self.proc.quit()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
for i in range(self.frame_skip):
self.viewer.take_action(action)
observation, reward, done, info = self.viewer.observe()
return observation, reward, done, info
def reset(self):
self.viewer.reset()
observation, reward, done, info = self.viewer.observe()
return observation
def render(self, mode="human", close=False):
if close:
self.viewer.quit()
return self.viewer.render(mode)
def is_game_over(self):
return self.viewer.is_game_over()
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##
class GeneratedRoadsEnv(DonkeyEnv):
def __init__(self):
super(GeneratedRoadsEnv, self).__init__(level=0)
class WarehouseEnv(DonkeyEnv):
def __init__(self):
super(WarehouseEnv, self).__init__(level=1)
class AvcSparkfunEnv(DonkeyEnv):
def __init__(self):
super(AvcSparkfunEnv, self).__init__(level=2)
class GeneratedTrackEnv(DonkeyEnv):
def __init__(self):
super(GeneratedTrackEnv, self).__init__(level=3)
| 27.528455
| 99
| 0.60189
| 2,975
| 0.878618
| 0
| 0
| 0
| 0
| 0
| 0
| 723
| 0.213526
|
5849a619f304aa85187564eba6cb5913a8f7354f
| 2,403
|
py
|
Python
|
tests/unit/backend/corpora/common/entities/datasets/test_revision.py
|
chanzuckerberg/dcp-prototype
|
24d2323ba5ae1482395da35ea11c42708e3a52ce
|
[
"MIT"
] | 2
|
2020-02-07T18:12:12.000Z
|
2020-02-11T14:59:03.000Z
|
tests/unit/backend/corpora/common/entities/datasets/test_revision.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | 173
|
2020-01-29T17:48:02.000Z
|
2020-03-20T02:52:58.000Z
|
tests/unit/backend/corpora/common/entities/datasets/test_revision.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | 1
|
2020-03-20T17:06:54.000Z
|
2020-03-20T17:06:54.000Z
|
from tests.unit.backend.corpora.common.entities.datasets import TestDataset
class TestDatasetRevision(TestDataset):
def test__create_dataset_revision(self):
dataset = self.generate_dataset_with_s3_resources(self.session, published=True)
rev_dataset = dataset.create_revision("test_collection_id_revision").to_dict()
dataset = dataset.to_dict()
with self.subTest("artifacts are correctly created and point to correct s3 uri"):
rev_artifacts = rev_dataset.pop("artifacts")
original_artifacts = dataset.pop("artifacts")
for i in range(0, len(rev_artifacts)):
for key in rev_artifacts[i].keys():
self.compare_original_and_revision(
original_artifacts[i], rev_artifacts[i], key, ("dataset_id", "id")
)
with self.subTest("deployment is correctly created and points to correct s3 uri "):
rev_deployment = rev_dataset.pop("explorer_url")
original_deployment = dataset.pop("explorer_url")
self.assertIsNotNone(original_deployment)
self.assertEqual(rev_deployment, f"http://bogus.url/d/{rev_dataset['id']}.cxg/")
with self.subTest("Test processing status copied over"):
rev_processing_status = rev_dataset.pop("processing_status")
original_processing_status = dataset.pop("processing_status")
for key in rev_processing_status.keys():
self.compare_original_and_revision(
original_processing_status, rev_processing_status, key, ("dataset_id", "id")
)
with self.subTest("revision points at a different collection"):
revision_collection = rev_dataset.pop("collection")
dataset_1_collection = dataset.pop("collection")
self.assertNotEqual(revision_collection, dataset_1_collection)
with self.subTest("metadata of revised matches original"):
for key in rev_dataset.keys():
self.compare_original_and_revision(dataset, rev_dataset, key, ("original_id", "id", "collection_id"))
def compare_original_and_revision(self, original, revision, key, unique_fields):
if key in unique_fields:
self.assertNotEqual(original[key], revision[key])
else:
self.assertEqual(original[key], revision[key])
| 52.23913
| 117
| 0.665418
| 2,324
| 0.967124
| 0
| 0
| 0
| 0
| 0
| 0
| 492
| 0.204744
|
584a11d14b64edf45f4d6711e52adb48c3e934c3
| 3,966
|
py
|
Python
|
main.py
|
parzibyte/login-flask
|
165e10980f6a34c7587a53578ed361506eb37475
|
[
"MIT"
] | 2
|
2021-09-21T16:37:41.000Z
|
2021-12-09T17:38:18.000Z
|
main.py
|
parzibyte/login-flask
|
165e10980f6a34c7587a53578ed361506eb37475
|
[
"MIT"
] | null | null | null |
main.py
|
parzibyte/login-flask
|
165e10980f6a34c7587a53578ed361506eb37475
|
[
"MIT"
] | 1
|
2021-08-16T01:36:58.000Z
|
2021-08-16T01:36:58.000Z
|
"""
____ _____ _ _ _
| _ \ | __ \ (_) | | |
| |_) |_ _ | |__) |_ _ _ __ _____| |__ _ _| |_ ___
| _ <| | | | | ___/ _` | '__|_ / | '_ \| | | | __/ _ \
| |_) | |_| | | | | (_| | | / /| | |_) | |_| | || __/
|____/ \__, | |_| \__,_|_| /___|_|_.__/ \__, |\__\___|
__/ | __/ |
|___/ |___/
____________________________________
/ Si necesitas ayuda, contáctame en \
\ https://parzibyte.me /
------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
Creado por Parzibyte (https://parzibyte.me).
------------------------------------------------------------------------------------------------
| IMPORTANTE |
Si vas a borrar este encabezado, considera:
Seguirme: https://parzibyte.me/blog/sigueme/
Y compartir mi blog con tus amigos
También tengo canal de YouTube: https://www.youtube.com/channel/UCroP4BTWjfM0CkGB6AFUoBg?sub_confirmation=1
Twitter: https://twitter.com/parzibyte
Facebook: https://facebook.com/parzibyte.fanpage
Instagram: https://instagram.com/parzibyte
Hacer una donación vía PayPal: https://paypal.me/LuisCabreraBenito
------------------------------------------------------------------------------------------------
"""
from flask import Flask, render_template, request, redirect, session, flash
app = Flask(__name__)
"""
Clave secreta. Esta debe ser aleatoria, puedes generarla tú.
Primero instala Python y agrega python a la PATH: https://parzibyte.me/blog/2019/10/08/instalar-python-pip-64-bits-windows/
Luego abre una terminal y ejecuta:
python
Entrarás a la CLI de Python, ahí ejecuta:
import os; print(os.urandom(16));
Eso te dará algo como:
b'\x11\xad\xec\t\x99\x8f\xfa\x86\xe8A\xd9\x1a\xf6\x12Z\xf4'
Simplemente remplaza la clave que se ve a continuación con los bytes aleatorios que generaste
"""
app.secret_key = b'\xaa\xe4V}y~\x84G\xb5\x95\xa0\xe0\x96\xca\xa7\xe7'
"""
Definición de rutas
"""
# Protegida. Solo pueden entrar los que han iniciado sesión
@app.route("/escritorio")
def escritorio():
return render_template("escritorio.html")
# Formulario para iniciar sesión
@app.route("/login")
def login():
return render_template("login.html")
# Manejar login
@app.route("/hacer_login", methods=["POST"])
def hacer_login():
correo = request.form["correo"]
palabra_secreta = request.form["palabra_secreta"]
# Aquí comparamos. Lo hago así de fácil por simplicidad
# En la vida real debería ser con una base de datos y una contraseña hasheada
if correo == "parzibyte@gmail.com" and palabra_secreta == "123":
# Si coincide, iniciamos sesión y además redireccionamos
session["usuario"] = correo
# Aquí puedes colocar más datos. Por ejemplo
# session["nivel"] = "administrador"
return redirect("/escritorio")
else:
# Si NO coincide, lo regresamos
flash("Correo o contraseña incorrectos")
return redirect("/login")
# Cerrar sesión
@app.route("/logout")
def logout():
session.pop("usuario", None)
return redirect("/login")
# Un "middleware" que se ejecuta antes de responder a cualquier ruta. Aquí verificamos si el usuario ha iniciado sesión
@app.before_request
def antes_de_cada_peticion():
ruta = request.path
# Si no ha iniciado sesión y no quiere ir a algo relacionado al login, lo redireccionamos al login
if not 'usuario' in session and ruta != "/login" and ruta != "/hacer_login" and ruta != "/logout" and not ruta.startswith("/static"):
flash("Inicia sesión para continuar")
return redirect("/login")
# Si ya ha iniciado, no hacemos nada, es decir lo dejamos pasar
# Iniciar el servidor
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, debug=True)
| 36.054545
| 137
| 0.594049
| 0
| 0
| 0
| 0
| 1,470
| 0.368144
| 0
| 0
| 3,086
| 0.772852
|
584b5746e6a8959beb85942376ecc9e56d8276af
| 707
|
py
|
Python
|
events/kawacon2016/migrations/0003_auto_20160127_1924.py
|
jlaunonen/turska
|
fc6ec4e0ae50a823e931152ce8835098b96f5966
|
[
"CC-BY-3.0"
] | null | null | null |
events/kawacon2016/migrations/0003_auto_20160127_1924.py
|
jlaunonen/turska
|
fc6ec4e0ae50a823e931152ce8835098b96f5966
|
[
"CC-BY-3.0"
] | null | null | null |
events/kawacon2016/migrations/0003_auto_20160127_1924.py
|
jlaunonen/turska
|
fc6ec4e0ae50a823e931152ce8835098b96f5966
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-27 17:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kawacon2016', '0002_auto_20160127_1922'),
]
operations = [
migrations.AlterField(
model_name='signupextra',
name='needs_lodging',
field=models.ManyToManyField(blank=True, help_text='V\xe4nk\xe4rin\xe4 saat tarvittaessa maksuttoman majoituksen lattiamajoituksessa. Merkitse t\xe4h\xe4n, min\xe4 \xf6in\xe4 tarvitset lattiamajoitusta.', to='kawacon2016.Night', verbose_name='Majoitustarve lattiamajoituksessa'),
),
]
| 33.666667
| 291
| 0.700141
| 550
| 0.777935
| 0
| 0
| 0
| 0
| 0
| 0
| 342
| 0.483734
|